[llvm] 4e4cb43 - [RISCV][MC] Enable printing of zext.b alias (#133502)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 28 12:24:00 PDT 2025
Author: Alex Bradbury
Date: 2025-03-28T19:23:56Z
New Revision: 4e4cb4359ae915b5a14c94914e39a8cc94e98963
URL: https://github.com/llvm/llvm-project/commit/4e4cb4359ae915b5a14c94914e39a8cc94e98963
DIFF: https://github.com/llvm/llvm-project/commit/4e4cb4359ae915b5a14c94914e39a8cc94e98963.diff
LOG: [RISCV][MC] Enable printing of zext.b alias (#133502)
The comment shows that at the time we were worried about producing the
alias in assembly that might be ingested by a binutils version that
doesn't yet support it. binutils gained support over 4 years ago
<https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=c2137f55ad04e451d834048d4bfec1de2daea20e>.
With all the changes in areas such as ELF attributes, if you tried to
use LLVM's RISC-V assembler output with a binutils that old then zext.b
would be the least of your worries.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVInstrInfo.td
llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll
llvm/test/CodeGen/RISCV/GlobalISel/double-convert.ll
llvm/test/CodeGen/RISCV/GlobalISel/float-convert.ll
llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll
llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll
llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll
llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
llvm/test/CodeGen/RISCV/abdu-neg.ll
llvm/test/CodeGen/RISCV/abdu.ll
llvm/test/CodeGen/RISCV/alu8.ll
llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll
llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
llvm/test/CodeGen/RISCV/atomic-rmw.ll
llvm/test/CodeGen/RISCV/atomic-signext.ll
llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll
llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
llvm/test/CodeGen/RISCV/avgceilu.ll
llvm/test/CodeGen/RISCV/avgflooru.ll
llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll
llvm/test/CodeGen/RISCV/csr-first-use-cost.ll
llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
llvm/test/CodeGen/RISCV/div-by-constant.ll
llvm/test/CodeGen/RISCV/div.ll
llvm/test/CodeGen/RISCV/double-convert.ll
llvm/test/CodeGen/RISCV/float-convert.ll
llvm/test/CodeGen/RISCV/fold-mem-offset.ll
llvm/test/CodeGen/RISCV/half-convert.ll
llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll
llvm/test/CodeGen/RISCV/machine-combiner.ll
llvm/test/CodeGen/RISCV/machine-sink-load-immediate.ll
llvm/test/CodeGen/RISCV/memset-inline.ll
llvm/test/CodeGen/RISCV/overflow-intrinsics.ll
llvm/test/CodeGen/RISCV/pr65025.ll
llvm/test/CodeGen/RISCV/rem.ll
llvm/test/CodeGen/RISCV/rv32zbkb.ll
llvm/test/CodeGen/RISCV/rv64zbkb.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
llvm/test/CodeGen/RISCV/rvv/vec3-setcc-crash.ll
llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll
llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll
llvm/test/CodeGen/RISCV/rvv/vsetvlmax-ext.ll
llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
llvm/test/CodeGen/RISCV/simplify-condbr.ll
llvm/test/CodeGen/RISCV/split-store.ll
llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
llvm/test/CodeGen/RISCV/usub_sat_plus.ll
llvm/test/CodeGen/RISCV/zbb-logic-neg-imm.ll
llvm/test/CodeGen/RISCV/zcb-regalloc-hints.ll
llvm/test/MC/RISCV/rv32i-aliases-valid.s
llvm/test/MC/RISCV/rv64i-aliases-valid.s
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index b61992298ca95..49068780f697d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1118,10 +1118,7 @@ def : MnemonicAlias<"move", "mv">;
def : MnemonicAlias<"scall", "ecall">;
def : MnemonicAlias<"sbreak", "ebreak">;
-// This alias was added to the spec in December 2020. Don't print it by default
-// to allow assembly we print to be compatible with versions of GNU assembler
-// that don't support this alias.
-def : InstAlias<"zext.b $rd, $rs", (ANDI GPR:$rd, GPR:$rs, 0xFF), 0>;
+def : InstAlias<"zext.b $rd, $rs", (ANDI GPR:$rd, GPR:$rs, 0xFF)>;
let Predicates = [HasStdExtZicfilp] in {
def : InstAlias<"lpad $imm20", (AUIPC X0, uimm20:$imm20)>;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll b/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
index f1c0fccb78a36..1632f92e96b50 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
@@ -49,15 +49,15 @@ entry:
define i32 @add_i8_zeroext_i32(i8 %a, i8 %b) {
; RV32IM-LABEL: add_i8_zeroext_i32:
; RV32IM: # %bb.0: # %entry
-; RV32IM-NEXT: andi a0, a0, 255
-; RV32IM-NEXT: andi a1, a1, 255
+; RV32IM-NEXT: zext.b a0, a0
+; RV32IM-NEXT: zext.b a1, a1
; RV32IM-NEXT: add a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64IM-LABEL: add_i8_zeroext_i32:
; RV64IM: # %bb.0: # %entry
-; RV64IM-NEXT: andi a0, a0, 255
-; RV64IM-NEXT: andi a1, a1, 255
+; RV64IM-NEXT: zext.b a0, a0
+; RV64IM-NEXT: zext.b a1, a1
; RV64IM-NEXT: addw a0, a0, a1
; RV64IM-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll b/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll
index f62902cdd14d9..9c46e6792e8d8 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll
@@ -227,7 +227,7 @@ define i64 @udiv64_constant_add(i64 %a) nounwind {
define i8 @udiv8_constant_no_add(i8 %a) nounwind {
; RV32-LABEL: udiv8_constant_no_add:
; RV32: # %bb.0:
-; RV32-NEXT: andi a0, a0, 255
+; RV32-NEXT: zext.b a0, a0
; RV32-NEXT: li a1, 205
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: srli a0, a0, 10
@@ -235,7 +235,7 @@ define i8 @udiv8_constant_no_add(i8 %a) nounwind {
;
; RV64-LABEL: udiv8_constant_no_add:
; RV64: # %bb.0:
-; RV64-NEXT: andi a0, a0, 255
+; RV64-NEXT: zext.b a0, a0
; RV64-NEXT: li a1, 205
; RV64-NEXT: mul a0, a0, a1
; RV64-NEXT: srli a0, a0, 10
@@ -248,28 +248,28 @@ define i8 @udiv8_constant_add(i8 %a) nounwind {
; RV32-LABEL: udiv8_constant_add:
; RV32: # %bb.0:
; RV32-NEXT: li a1, 37
-; RV32-NEXT: andi a2, a0, 255
+; RV32-NEXT: zext.b a2, a0
; RV32-NEXT: mul a1, a2, a1
; RV32-NEXT: srli a1, a1, 8
; RV32-NEXT: sub a0, a0, a1
-; RV32-NEXT: andi a0, a0, 255
+; RV32-NEXT: zext.b a0, a0
; RV32-NEXT: srli a0, a0, 1
; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: andi a0, a0, 255
+; RV32-NEXT: zext.b a0, a0
; RV32-NEXT: srli a0, a0, 2
; RV32-NEXT: ret
;
; RV64-LABEL: udiv8_constant_add:
; RV64: # %bb.0:
; RV64-NEXT: li a1, 37
-; RV64-NEXT: andi a2, a0, 255
+; RV64-NEXT: zext.b a2, a0
; RV64-NEXT: mul a1, a2, a1
; RV64-NEXT: srli a1, a1, 8
; RV64-NEXT: subw a0, a0, a1
-; RV64-NEXT: andi a0, a0, 255
+; RV64-NEXT: zext.b a0, a0
; RV64-NEXT: srli a0, a0, 1
; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: andi a0, a0, 255
+; RV64-NEXT: zext.b a0, a0
; RV64-NEXT: srli a0, a0, 2
; RV64-NEXT: ret
%1 = udiv i8 %a, 7
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-convert.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-convert.ll
index aeed219d99555..a49e94f4bc910 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-convert.ll
@@ -855,7 +855,7 @@ define zeroext i8 @fcvt_wu_s_i8(double %a) nounwind {
; CHECKIFD-LABEL: fcvt_wu_s_i8:
; CHECKIFD: # %bb.0:
; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rtz
-; CHECKIFD-NEXT: andi a0, a0, 255
+; CHECKIFD-NEXT: zext.b a0, a0
; CHECKIFD-NEXT: ret
;
; RV32I-LABEL: fcvt_wu_s_i8:
@@ -863,7 +863,7 @@ define zeroext i8 @fcvt_wu_s_i8(double %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: call __fixunsdfsi
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -873,7 +873,7 @@ define zeroext i8 @fcvt_wu_s_i8(double %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: call __fixunsdfsi
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-convert.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-convert.ll
index 1820ecf3b5056..fa093623dd6f8 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-convert.ll
@@ -790,7 +790,7 @@ define zeroext i8 @fcvt_wu_s_i8(float %a) nounwind {
; CHECKIF-LABEL: fcvt_wu_s_i8:
; CHECKIF: # %bb.0:
; CHECKIF-NEXT: fcvt.wu.s a0, fa0, rtz
-; CHECKIF-NEXT: andi a0, a0, 255
+; CHECKIF-NEXT: zext.b a0, a0
; CHECKIF-NEXT: ret
;
; RV32I-LABEL: fcvt_wu_s_i8:
@@ -798,7 +798,7 @@ define zeroext i8 @fcvt_wu_s_i8(float %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: call __fixunssfsi
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -808,7 +808,7 @@ define zeroext i8 @fcvt_wu_s_i8(float %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: call __fixunssfsi
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll b/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll
index e29c450c26cb4..ae9b6cc8948f5 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll
@@ -469,7 +469,7 @@ define fp128 @uitofp_i8(i8 %x) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: call __floatunsitf
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll
index ababec16f7f8f..da95481a5e588 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll
@@ -327,7 +327,7 @@ define i64 @rori_i64_fshr(i64 %a) nounwind {
define i8 @srli_i8(i8 %a) nounwind {
; CHECK-LABEL: srli_i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: srli a0, a0, 6
; CHECK-NEXT: ret
%1 = lshr i8 %a, 6
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll
index b214cf68ddce8..55cb95413ae24 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbkb.ll
@@ -110,7 +110,7 @@ define i32 @packh_i32(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: packh_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a2, 16
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: addi a2, a2, -256
; CHECK-NEXT: slli a1, a1, 8
; CHECK-NEXT: and a1, a1, a2
@@ -126,8 +126,8 @@ define i32 @packh_i32(i32 %a, i32 %b) nounwind {
define i32 @packh_i32_2(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: packh_i32_2:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a0, a0, 255
-; RV32I-NEXT: andi a1, a1, 255
+; RV32I-NEXT: zext.b a0, a0
+; RV32I-NEXT: zext.b a1, a1
; RV32I-NEXT: slli a1, a1, 8
; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: ret
@@ -148,7 +148,7 @@ define i64 @packh_i64(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: packh_i64:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, 16
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: addi a1, a1, -256
; CHECK-NEXT: slli a2, a2, 8
; CHECK-NEXT: and a1, a2, a1
@@ -166,8 +166,8 @@ define i64 @packh_i64(i64 %a, i64 %b) nounwind {
define i64 @packh_i64_2(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: packh_i64_2:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a0, a0, 255
-; RV32I-NEXT: andi a1, a2, 255
+; RV32I-NEXT: zext.b a0, a0
+; RV32I-NEXT: zext.b a1, a2
; RV32I-NEXT: slli a2, a1, 8
; RV32I-NEXT: srli a1, a1, 24
; RV32I-NEXT: or a0, a2, a0
@@ -175,7 +175,7 @@ define i64 @packh_i64_2(i64 %a, i64 %b) nounwind {
;
; RV32ZBKB-LABEL: packh_i64_2:
; RV32ZBKB: # %bb.0:
-; RV32ZBKB-NEXT: andi a1, a2, 255
+; RV32ZBKB-NEXT: zext.b a1, a2
; RV32ZBKB-NEXT: srli a1, a1, 24
; RV32ZBKB-NEXT: packh a0, a0, a2
; RV32ZBKB-NEXT: ret
@@ -210,7 +210,7 @@ define zeroext i16 @packh_i16_2(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2) {
; RV32I-LABEL: packh_i16_2:
; RV32I: # %bb.0:
; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: slli a0, a0, 8
; RV32I-NEXT: or a0, a0, a2
; RV32I-NEXT: ret
@@ -232,7 +232,7 @@ define void @packh_i16_3(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, ptr %p) {
; RV32I-LABEL: packh_i16_3:
; RV32I: # %bb.0:
; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: slli a0, a0, 8
; RV32I-NEXT: or a0, a0, a2
; RV32I-NEXT: sh a0, 0(a3)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
index 79d08772e8853..1eddb8fc2797e 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
@@ -405,7 +405,7 @@ define i64 @rori_i64_fshr(i64 %a) nounwind {
define i8 @srli_i8(i8 %a) nounwind {
; CHECK-LABEL: srli_i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: srli a0, a0, 6
; CHECK-NEXT: ret
%1 = lshr i8 %a, 6
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
index 558424b53be95..f413abffcdccc 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll
@@ -140,7 +140,7 @@ define signext i32 @packh_i32(i32 signext %a, i32 signext %b) nounwind {
; RV64I-LABEL: packh_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a2, 16
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: addiw a2, a2, -256
; RV64I-NEXT: slli a1, a1, 8
; RV64I-NEXT: and a1, a1, a2
@@ -150,7 +150,7 @@ define signext i32 @packh_i32(i32 signext %a, i32 signext %b) nounwind {
; RV64ZBKB-LABEL: packh_i32:
; RV64ZBKB: # %bb.0:
; RV64ZBKB-NEXT: lui a2, 16
-; RV64ZBKB-NEXT: andi a0, a0, 255
+; RV64ZBKB-NEXT: zext.b a0, a0
; RV64ZBKB-NEXT: addiw a2, a2, -256
; RV64ZBKB-NEXT: slli a1, a1, 8
; RV64ZBKB-NEXT: and a1, a1, a2
@@ -166,8 +166,8 @@ define signext i32 @packh_i32(i32 signext %a, i32 signext %b) nounwind {
define i32 @packh_i32_2(i32 %a, i32 %b) nounwind {
; RV64I-LABEL: packh_i32_2:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: andi a1, a1, 255
+; RV64I-NEXT: zext.b a0, a0
+; RV64I-NEXT: zext.b a1, a1
; RV64I-NEXT: slli a1, a1, 8
; RV64I-NEXT: or a0, a1, a0
; RV64I-NEXT: ret
@@ -188,7 +188,7 @@ define i64 @packh_i64(i64 %a, i64 %b) nounwind {
; RV64I-LABEL: packh_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a2, 16
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: addiw a2, a2, -256
; RV64I-NEXT: slli a1, a1, 8
; RV64I-NEXT: and a1, a1, a2
@@ -198,7 +198,7 @@ define i64 @packh_i64(i64 %a, i64 %b) nounwind {
; RV64ZBKB-LABEL: packh_i64:
; RV64ZBKB: # %bb.0:
; RV64ZBKB-NEXT: lui a2, 16
-; RV64ZBKB-NEXT: andi a0, a0, 255
+; RV64ZBKB-NEXT: zext.b a0, a0
; RV64ZBKB-NEXT: addiw a2, a2, -256
; RV64ZBKB-NEXT: slli a1, a1, 8
; RV64ZBKB-NEXT: and a1, a1, a2
@@ -214,8 +214,8 @@ define i64 @packh_i64(i64 %a, i64 %b) nounwind {
define i64 @packh_i64_2(i64 %a, i64 %b) nounwind {
; RV64I-LABEL: packh_i64_2:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: andi a1, a1, 255
+; RV64I-NEXT: zext.b a0, a0
+; RV64I-NEXT: zext.b a1, a1
; RV64I-NEXT: slli a1, a1, 8
; RV64I-NEXT: or a0, a1, a0
; RV64I-NEXT: ret
@@ -253,7 +253,7 @@ define zeroext i16 @packh_i16_2(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2) {
; RV64I-LABEL: packh_i16_2:
; RV64I: # %bb.0:
; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: slli a0, a0, 8
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: ret
@@ -275,7 +275,7 @@ define void @packh_i16_3(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, ptr %p) {
; RV64I-LABEL: packh_i16_3:
; RV64I: # %bb.0:
; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: slli a0, a0, 8
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: sh a0, 0(a3)
diff --git a/llvm/test/CodeGen/RISCV/abdu-neg.ll b/llvm/test/CodeGen/RISCV/abdu-neg.ll
index 9e41cde7ae181..9fa142ee2aa1e 100644
--- a/llvm/test/CodeGen/RISCV/abdu-neg.ll
+++ b/llvm/test/CodeGen/RISCV/abdu-neg.ll
@@ -11,8 +11,8 @@
define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
; RV32I-LABEL: abd_ext_i8:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a1, a1, 255
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a1, a1
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
@@ -21,8 +21,8 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
;
; RV64I-LABEL: abd_ext_i8:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: andi a1, a1, 255
+; RV64I-NEXT: zext.b a0, a0
+; RV64I-NEXT: zext.b a1, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
@@ -31,8 +31,8 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
;
; RV32ZBB-LABEL: abd_ext_i8:
; RV32ZBB: # %bb.0:
-; RV32ZBB-NEXT: andi a1, a1, 255
-; RV32ZBB-NEXT: andi a0, a0, 255
+; RV32ZBB-NEXT: zext.b a1, a1
+; RV32ZBB-NEXT: zext.b a0, a0
; RV32ZBB-NEXT: maxu a2, a0, a1
; RV32ZBB-NEXT: minu a0, a0, a1
; RV32ZBB-NEXT: sub a0, a0, a2
@@ -40,8 +40,8 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
;
; RV64ZBB-LABEL: abd_ext_i8:
; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: andi a0, a0, 255
-; RV64ZBB-NEXT: andi a1, a1, 255
+; RV64ZBB-NEXT: zext.b a0, a0
+; RV64ZBB-NEXT: zext.b a1, a1
; RV64ZBB-NEXT: sub a0, a0, a1
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: min a0, a0, a1
@@ -60,7 +60,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a1, 16
; RV32I-NEXT: srli a1, a1, 16
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
@@ -69,7 +69,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
;
; RV64I-LABEL: abd_ext_i8_i16:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: slli a1, a1, 48
; RV64I-NEXT: srli a1, a1, 48
; RV64I-NEXT: sub a0, a0, a1
@@ -81,7 +81,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
; RV32ZBB-LABEL: abd_ext_i8_i16:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: zext.h a1, a1
-; RV32ZBB-NEXT: andi a0, a0, 255
+; RV32ZBB-NEXT: zext.b a0, a0
; RV32ZBB-NEXT: maxu a2, a0, a1
; RV32ZBB-NEXT: minu a0, a0, a1
; RV32ZBB-NEXT: sub a0, a0, a2
@@ -89,7 +89,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
;
; RV64ZBB-LABEL: abd_ext_i8_i16:
; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: andi a0, a0, 255
+; RV64ZBB-NEXT: zext.b a0, a0
; RV64ZBB-NEXT: zext.h a1, a1
; RV64ZBB-NEXT: sub a0, a0, a1
; RV64ZBB-NEXT: neg a1, a0
@@ -107,8 +107,8 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
; RV32I-LABEL: abd_ext_i8_undef:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a1, a1, 255
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a1, a1
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
@@ -117,8 +117,8 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
;
; RV64I-LABEL: abd_ext_i8_undef:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: andi a1, a1, 255
+; RV64I-NEXT: zext.b a0, a0
+; RV64I-NEXT: zext.b a1, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
@@ -127,8 +127,8 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
;
; RV32ZBB-LABEL: abd_ext_i8_undef:
; RV32ZBB: # %bb.0:
-; RV32ZBB-NEXT: andi a1, a1, 255
-; RV32ZBB-NEXT: andi a0, a0, 255
+; RV32ZBB-NEXT: zext.b a1, a1
+; RV32ZBB-NEXT: zext.b a0, a0
; RV32ZBB-NEXT: maxu a2, a0, a1
; RV32ZBB-NEXT: minu a0, a0, a1
; RV32ZBB-NEXT: sub a0, a0, a2
@@ -136,8 +136,8 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
;
; RV64ZBB-LABEL: abd_ext_i8_undef:
; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: andi a0, a0, 255
-; RV64ZBB-NEXT: andi a1, a1, 255
+; RV64ZBB-NEXT: zext.b a0, a0
+; RV64ZBB-NEXT: zext.b a1, a1
; RV64ZBB-NEXT: sub a0, a0, a1
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: min a0, a0, a1
@@ -1094,8 +1094,8 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind {
define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind {
; NOZBB-LABEL: abd_minmax_i8:
; NOZBB: # %bb.0:
-; NOZBB-NEXT: andi a1, a1, 255
-; NOZBB-NEXT: andi a0, a0, 255
+; NOZBB-NEXT: zext.b a1, a1
+; NOZBB-NEXT: zext.b a0, a0
; NOZBB-NEXT: mv a2, a0
; NOZBB-NEXT: bgeu a0, a1, .LBB13_3
; NOZBB-NEXT: # %bb.1:
@@ -1112,8 +1112,8 @@ define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind {
;
; ZBB-LABEL: abd_minmax_i8:
; ZBB: # %bb.0:
-; ZBB-NEXT: andi a1, a1, 255
-; ZBB-NEXT: andi a0, a0, 255
+; ZBB-NEXT: zext.b a1, a1
+; ZBB-NEXT: zext.b a0, a0
; ZBB-NEXT: minu a2, a0, a1
; ZBB-NEXT: maxu a0, a0, a1
; ZBB-NEXT: sub a0, a2, a0
@@ -1599,8 +1599,8 @@ define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind {
define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind {
; NOZBB-LABEL: abd_cmp_i8:
; NOZBB: # %bb.0:
-; NOZBB-NEXT: andi a2, a0, 255
-; NOZBB-NEXT: andi a3, a1, 255
+; NOZBB-NEXT: zext.b a2, a0
+; NOZBB-NEXT: zext.b a3, a1
; NOZBB-NEXT: bgeu a3, a2, .LBB18_2
; NOZBB-NEXT: # %bb.1:
; NOZBB-NEXT: sub a0, a1, a0
@@ -1611,8 +1611,8 @@ define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind {
;
; ZBB-LABEL: abd_cmp_i8:
; ZBB: # %bb.0:
-; ZBB-NEXT: andi a2, a0, 255
-; ZBB-NEXT: andi a3, a1, 255
+; ZBB-NEXT: zext.b a2, a0
+; ZBB-NEXT: zext.b a3, a1
; ZBB-NEXT: bgeu a3, a2, .LBB18_2
; ZBB-NEXT: # %bb.1:
; ZBB-NEXT: sub a0, a1, a0
diff --git a/llvm/test/CodeGen/RISCV/abdu.ll b/llvm/test/CodeGen/RISCV/abdu.ll
index 7c8638cb461e2..614d9c20ac574 100644
--- a/llvm/test/CodeGen/RISCV/abdu.ll
+++ b/llvm/test/CodeGen/RISCV/abdu.ll
@@ -11,8 +11,8 @@
define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
; RV32I-LABEL: abd_ext_i8:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a1, a1, 255
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a1, a1
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
@@ -21,8 +21,8 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
;
; RV64I-LABEL: abd_ext_i8:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a1, a1
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
@@ -31,8 +31,8 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
;
; ZBB-LABEL: abd_ext_i8:
; ZBB: # %bb.0:
-; ZBB-NEXT: andi a1, a1, 255
-; ZBB-NEXT: andi a0, a0, 255
+; ZBB-NEXT: zext.b a1, a1
+; ZBB-NEXT: zext.b a0, a0
; ZBB-NEXT: minu a2, a0, a1
; ZBB-NEXT: maxu a0, a0, a1
; ZBB-NEXT: sub a0, a0, a2
@@ -50,7 +50,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a1, 16
; RV32I-NEXT: srli a1, a1, 16
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
@@ -61,7 +61,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 48
; RV64I-NEXT: srli a1, a1, 48
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
@@ -71,7 +71,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
; ZBB-LABEL: abd_ext_i8_i16:
; ZBB: # %bb.0:
; ZBB-NEXT: zext.h a1, a1
-; ZBB-NEXT: andi a0, a0, 255
+; ZBB-NEXT: zext.b a0, a0
; ZBB-NEXT: minu a2, a0, a1
; ZBB-NEXT: maxu a0, a0, a1
; ZBB-NEXT: sub a0, a0, a2
@@ -87,8 +87,8 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
; RV32I-LABEL: abd_ext_i8_undef:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a1, a1, 255
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a1, a1
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
@@ -97,8 +97,8 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
;
; RV64I-LABEL: abd_ext_i8_undef:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a1, a1
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
@@ -107,8 +107,8 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
;
; ZBB-LABEL: abd_ext_i8_undef:
; ZBB: # %bb.0:
-; ZBB-NEXT: andi a1, a1, 255
-; ZBB-NEXT: andi a0, a0, 255
+; ZBB-NEXT: zext.b a1, a1
+; ZBB-NEXT: zext.b a0, a0
; ZBB-NEXT: minu a2, a0, a1
; ZBB-NEXT: maxu a0, a0, a1
; ZBB-NEXT: sub a0, a0, a2
@@ -944,8 +944,8 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind {
define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind {
; RV32I-LABEL: abd_minmax_i8:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a1, a1, 255
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a1, a1
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
@@ -954,8 +954,8 @@ define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind {
;
; RV64I-LABEL: abd_minmax_i8:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a1, a1
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
@@ -964,8 +964,8 @@ define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind {
;
; ZBB-LABEL: abd_minmax_i8:
; ZBB: # %bb.0:
-; ZBB-NEXT: andi a1, a1, 255
-; ZBB-NEXT: andi a0, a0, 255
+; ZBB-NEXT: zext.b a1, a1
+; ZBB-NEXT: zext.b a0, a0
; ZBB-NEXT: minu a2, a0, a1
; ZBB-NEXT: maxu a0, a0, a1
; ZBB-NEXT: sub a0, a0, a2
@@ -1333,8 +1333,8 @@ define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind {
define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind {
; RV32I-LABEL: abd_cmp_i8:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a1, a1, 255
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a1, a1
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
@@ -1343,8 +1343,8 @@ define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind {
;
; RV64I-LABEL: abd_cmp_i8:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a1, a1
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
@@ -1353,8 +1353,8 @@ define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind {
;
; ZBB-LABEL: abd_cmp_i8:
; ZBB: # %bb.0:
-; ZBB-NEXT: andi a1, a1, 255
-; ZBB-NEXT: andi a0, a0, 255
+; ZBB-NEXT: zext.b a1, a1
+; ZBB-NEXT: zext.b a0, a0
; ZBB-NEXT: minu a2, a0, a1
; ZBB-NEXT: maxu a0, a0, a1
; ZBB-NEXT: sub a0, a0, a2
@@ -1727,8 +1727,8 @@ define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
define i8 @abd_select_i8(i8 %a, i8 %b) nounwind {
; RV32I-LABEL: abd_select_i8:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a1, a1, 255
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a1, a1
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
@@ -1737,8 +1737,8 @@ define i8 @abd_select_i8(i8 %a, i8 %b) nounwind {
;
; RV64I-LABEL: abd_select_i8:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a1, a1
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
@@ -1747,8 +1747,8 @@ define i8 @abd_select_i8(i8 %a, i8 %b) nounwind {
;
; ZBB-LABEL: abd_select_i8:
; ZBB: # %bb.0:
-; ZBB-NEXT: andi a1, a1, 255
-; ZBB-NEXT: andi a0, a0, 255
+; ZBB-NEXT: zext.b a1, a1
+; ZBB-NEXT: zext.b a0, a0
; ZBB-NEXT: minu a2, a0, a1
; ZBB-NEXT: maxu a0, a0, a1
; ZBB-NEXT: sub a0, a0, a2
diff --git a/llvm/test/CodeGen/RISCV/alu8.ll b/llvm/test/CodeGen/RISCV/alu8.ll
index 6ae96e7c9deae..6ae2a7fa017ce 100644
--- a/llvm/test/CodeGen/RISCV/alu8.ll
+++ b/llvm/test/CodeGen/RISCV/alu8.ll
@@ -44,13 +44,13 @@ define i8 @slti(i8 %a) nounwind {
define i8 @sltiu(i8 %a) nounwind {
; RV32I-LABEL: sltiu:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: sltiu a0, a0, 3
; RV32I-NEXT: ret
;
; RV64I-LABEL: sltiu:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: sltiu a0, a0, 3
; RV64I-NEXT: ret
%1 = icmp ult i8 %a, 3
@@ -274,15 +274,15 @@ define i8 @slt(i8 %a, i8 %b) nounwind {
define i8 @sltu(i8 %a, i8 %b) nounwind {
; RV32I-LABEL: sltu:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a1, a1, 255
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a1, a1
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: sltu a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: sltu:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a1, a1
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: sltu a0, a0, a1
; RV64I-NEXT: ret
%1 = icmp ult i8 %a, %b
@@ -307,13 +307,13 @@ define i8 @xor(i8 %a, i8 %b) nounwind {
define i8 @srl(i8 %a, i8 %b) nounwind {
; RV32I-LABEL: srl:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: srl a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: srl:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: srl a0, a0, a1
; RV64I-NEXT: ret
%1 = lshr i8 %a, %b
diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll
index 741860db13957..d427b4435d37d 100644
--- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll
@@ -99,8 +99,8 @@ define void @cmpxchg_masked_and_branch1(ptr %ptr, i8 signext %cmp, i8 signext %v
; RV32IA-NEXT: andi a3, a0, -4
; RV32IA-NEXT: slli a4, a0, 3
; RV32IA-NEXT: li a0, 255
-; RV32IA-NEXT: andi a1, a1, 255
-; RV32IA-NEXT: andi a2, a2, 255
+; RV32IA-NEXT: zext.b a1, a1
+; RV32IA-NEXT: zext.b a2, a2
; RV32IA-NEXT: sll a0, a0, a4
; RV32IA-NEXT: sll a1, a1, a4
; RV32IA-NEXT: sll a2, a2, a4
@@ -129,8 +129,8 @@ define void @cmpxchg_masked_and_branch1(ptr %ptr, i8 signext %cmp, i8 signext %v
; RV32IA-ZACAS-NEXT: andi a3, a0, -4
; RV32IA-ZACAS-NEXT: slli a4, a0, 3
; RV32IA-ZACAS-NEXT: li a0, 255
-; RV32IA-ZACAS-NEXT: andi a1, a1, 255
-; RV32IA-ZACAS-NEXT: andi a2, a2, 255
+; RV32IA-ZACAS-NEXT: zext.b a1, a1
+; RV32IA-ZACAS-NEXT: zext.b a2, a2
; RV32IA-ZACAS-NEXT: sll a0, a0, a4
; RV32IA-ZACAS-NEXT: sll a1, a1, a4
; RV32IA-ZACAS-NEXT: sll a2, a2, a4
@@ -159,8 +159,8 @@ define void @cmpxchg_masked_and_branch1(ptr %ptr, i8 signext %cmp, i8 signext %v
; RV64IA-NEXT: andi a3, a0, -4
; RV64IA-NEXT: slli a4, a0, 3
; RV64IA-NEXT: li a0, 255
-; RV64IA-NEXT: andi a1, a1, 255
-; RV64IA-NEXT: andi a2, a2, 255
+; RV64IA-NEXT: zext.b a1, a1
+; RV64IA-NEXT: zext.b a2, a2
; RV64IA-NEXT: sllw a0, a0, a4
; RV64IA-NEXT: sllw a1, a1, a4
; RV64IA-NEXT: sllw a2, a2, a4
@@ -189,8 +189,8 @@ define void @cmpxchg_masked_and_branch1(ptr %ptr, i8 signext %cmp, i8 signext %v
; RV64IA-ZACAS-NEXT: andi a3, a0, -4
; RV64IA-ZACAS-NEXT: slli a4, a0, 3
; RV64IA-ZACAS-NEXT: li a0, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
-; RV64IA-ZACAS-NEXT: andi a2, a2, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
+; RV64IA-ZACAS-NEXT: zext.b a2, a2
; RV64IA-ZACAS-NEXT: sllw a0, a0, a4
; RV64IA-ZACAS-NEXT: sllw a1, a1, a4
; RV64IA-ZACAS-NEXT: sllw a2, a2, a4
@@ -240,8 +240,8 @@ define void @cmpxchg_masked_and_branch2(ptr %ptr, i8 signext %cmp, i8 signext %v
; RV32IA-NEXT: andi a3, a0, -4
; RV32IA-NEXT: slli a4, a0, 3
; RV32IA-NEXT: li a0, 255
-; RV32IA-NEXT: andi a1, a1, 255
-; RV32IA-NEXT: andi a2, a2, 255
+; RV32IA-NEXT: zext.b a1, a1
+; RV32IA-NEXT: zext.b a2, a2
; RV32IA-NEXT: sll a0, a0, a4
; RV32IA-NEXT: sll a1, a1, a4
; RV32IA-NEXT: sll a2, a2, a4
@@ -273,8 +273,8 @@ define void @cmpxchg_masked_and_branch2(ptr %ptr, i8 signext %cmp, i8 signext %v
; RV32IA-ZACAS-NEXT: andi a3, a0, -4
; RV32IA-ZACAS-NEXT: slli a4, a0, 3
; RV32IA-ZACAS-NEXT: li a0, 255
-; RV32IA-ZACAS-NEXT: andi a1, a1, 255
-; RV32IA-ZACAS-NEXT: andi a2, a2, 255
+; RV32IA-ZACAS-NEXT: zext.b a1, a1
+; RV32IA-ZACAS-NEXT: zext.b a2, a2
; RV32IA-ZACAS-NEXT: sll a0, a0, a4
; RV32IA-ZACAS-NEXT: sll a1, a1, a4
; RV32IA-ZACAS-NEXT: sll a2, a2, a4
@@ -306,8 +306,8 @@ define void @cmpxchg_masked_and_branch2(ptr %ptr, i8 signext %cmp, i8 signext %v
; RV64IA-NEXT: andi a3, a0, -4
; RV64IA-NEXT: slli a4, a0, 3
; RV64IA-NEXT: li a0, 255
-; RV64IA-NEXT: andi a1, a1, 255
-; RV64IA-NEXT: andi a2, a2, 255
+; RV64IA-NEXT: zext.b a1, a1
+; RV64IA-NEXT: zext.b a2, a2
; RV64IA-NEXT: sllw a0, a0, a4
; RV64IA-NEXT: sllw a1, a1, a4
; RV64IA-NEXT: sllw a2, a2, a4
@@ -339,8 +339,8 @@ define void @cmpxchg_masked_and_branch2(ptr %ptr, i8 signext %cmp, i8 signext %v
; RV64IA-ZACAS-NEXT: andi a3, a0, -4
; RV64IA-ZACAS-NEXT: slli a4, a0, 3
; RV64IA-ZACAS-NEXT: li a0, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
-; RV64IA-ZACAS-NEXT: andi a2, a2, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
+; RV64IA-ZACAS-NEXT: zext.b a2, a2
; RV64IA-ZACAS-NEXT: sllw a0, a0, a4
; RV64IA-ZACAS-NEXT: sllw a1, a1, a4
; RV64IA-ZACAS-NEXT: sllw a2, a2, a4
diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
index c3b972840377f..12f057ce4ccd3 100644
--- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
@@ -43,8 +43,8 @@ define void @cmpxchg_i8_monotonic_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind
; RV32IA-NEXT: andi a3, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a4, 255
-; RV32IA-NEXT: andi a1, a1, 255
-; RV32IA-NEXT: andi a2, a2, 255
+; RV32IA-NEXT: zext.b a1, a1
+; RV32IA-NEXT: zext.b a2, a2
; RV32IA-NEXT: sll a4, a4, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: sll a0, a2, a0
@@ -79,8 +79,8 @@ define void @cmpxchg_i8_monotonic_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind
; RV64IA-WMO-NEXT: andi a3, a0, -4
; RV64IA-WMO-NEXT: slli a0, a0, 3
; RV64IA-WMO-NEXT: li a4, 255
-; RV64IA-WMO-NEXT: andi a1, a1, 255
-; RV64IA-WMO-NEXT: andi a2, a2, 255
+; RV64IA-WMO-NEXT: zext.b a1, a1
+; RV64IA-WMO-NEXT: zext.b a2, a2
; RV64IA-WMO-NEXT: sllw a4, a4, a0
; RV64IA-WMO-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NEXT: sllw a0, a2, a0
@@ -102,8 +102,8 @@ define void @cmpxchg_i8_monotonic_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind
; RV64IA-ZACAS-NEXT: andi a3, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-ZACAS-NEXT: li a4, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
-; RV64IA-ZACAS-NEXT: andi a2, a2, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
+; RV64IA-ZACAS-NEXT: zext.b a2, a2
; RV64IA-ZACAS-NEXT: sllw a4, a4, a0
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-ZACAS-NEXT: sllw a0, a2, a0
@@ -130,8 +130,8 @@ define void @cmpxchg_i8_monotonic_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind
; RV64IA-TSO-NEXT: andi a3, a0, -4
; RV64IA-TSO-NEXT: slli a0, a0, 3
; RV64IA-TSO-NEXT: li a4, 255
-; RV64IA-TSO-NEXT: andi a1, a1, 255
-; RV64IA-TSO-NEXT: andi a2, a2, 255
+; RV64IA-TSO-NEXT: zext.b a1, a1
+; RV64IA-TSO-NEXT: zext.b a2, a2
; RV64IA-TSO-NEXT: sllw a4, a4, a0
; RV64IA-TSO-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NEXT: sllw a0, a2, a0
@@ -170,8 +170,8 @@ define void @cmpxchg_i8_acquire_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-WMO-NEXT: andi a3, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a4, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
-; RV32IA-WMO-NEXT: andi a2, a2, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
+; RV32IA-WMO-NEXT: zext.b a2, a2
; RV32IA-WMO-NEXT: sll a4, a4, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: sll a0, a2, a0
@@ -193,8 +193,8 @@ define void @cmpxchg_i8_acquire_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-WMO-ZACAS-NEXT: andi a3, a0, -4
; RV32IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV32IA-WMO-ZACAS-NEXT: li a4, 255
-; RV32IA-WMO-ZACAS-NEXT: andi a1, a1, 255
-; RV32IA-WMO-ZACAS-NEXT: andi a2, a2, 255
+; RV32IA-WMO-ZACAS-NEXT: zext.b a1, a1
+; RV32IA-WMO-ZACAS-NEXT: zext.b a2, a2
; RV32IA-WMO-ZACAS-NEXT: sll a4, a4, a0
; RV32IA-WMO-ZACAS-NEXT: sll a1, a1, a0
; RV32IA-WMO-ZACAS-NEXT: sll a0, a2, a0
@@ -216,8 +216,8 @@ define void @cmpxchg_i8_acquire_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-TSO-NEXT: andi a3, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a4, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
-; RV32IA-TSO-NEXT: andi a2, a2, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
+; RV32IA-TSO-NEXT: zext.b a2, a2
; RV32IA-TSO-NEXT: sll a4, a4, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: sll a0, a2, a0
@@ -239,8 +239,8 @@ define void @cmpxchg_i8_acquire_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-TSO-ZACAS-NEXT: andi a3, a0, -4
; RV32IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV32IA-TSO-ZACAS-NEXT: li a4, 255
-; RV32IA-TSO-ZACAS-NEXT: andi a1, a1, 255
-; RV32IA-TSO-ZACAS-NEXT: andi a2, a2, 255
+; RV32IA-TSO-ZACAS-NEXT: zext.b a1, a1
+; RV32IA-TSO-ZACAS-NEXT: zext.b a2, a2
; RV32IA-TSO-ZACAS-NEXT: sll a4, a4, a0
; RV32IA-TSO-ZACAS-NEXT: sll a1, a1, a0
; RV32IA-TSO-ZACAS-NEXT: sll a0, a2, a0
@@ -275,8 +275,8 @@ define void @cmpxchg_i8_acquire_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-WMO-NEXT: andi a3, a0, -4
; RV64IA-WMO-NEXT: slli a0, a0, 3
; RV64IA-WMO-NEXT: li a4, 255
-; RV64IA-WMO-NEXT: andi a1, a1, 255
-; RV64IA-WMO-NEXT: andi a2, a2, 255
+; RV64IA-WMO-NEXT: zext.b a1, a1
+; RV64IA-WMO-NEXT: zext.b a2, a2
; RV64IA-WMO-NEXT: sllw a4, a4, a0
; RV64IA-WMO-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NEXT: sllw a0, a2, a0
@@ -298,8 +298,8 @@ define void @cmpxchg_i8_acquire_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a3, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a4, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a2, a2, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
+; RV64IA-WMO-ZACAS-NEXT: zext.b a2, a2
; RV64IA-WMO-ZACAS-NEXT: sllw a4, a4, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a0, a2, a0
@@ -326,8 +326,8 @@ define void @cmpxchg_i8_acquire_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-TSO-NEXT: andi a3, a0, -4
; RV64IA-TSO-NEXT: slli a0, a0, 3
; RV64IA-TSO-NEXT: li a4, 255
-; RV64IA-TSO-NEXT: andi a1, a1, 255
-; RV64IA-TSO-NEXT: andi a2, a2, 255
+; RV64IA-TSO-NEXT: zext.b a1, a1
+; RV64IA-TSO-NEXT: zext.b a2, a2
; RV64IA-TSO-NEXT: sllw a4, a4, a0
; RV64IA-TSO-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NEXT: sllw a0, a2, a0
@@ -349,8 +349,8 @@ define void @cmpxchg_i8_acquire_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a3, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a4, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a2, a2, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
+; RV64IA-TSO-ZACAS-NEXT: zext.b a2, a2
; RV64IA-TSO-ZACAS-NEXT: sllw a4, a4, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a0, a2, a0
@@ -394,8 +394,8 @@ define void @cmpxchg_i8_acquire_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-WMO-NEXT: andi a3, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a4, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
-; RV32IA-WMO-NEXT: andi a2, a2, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
+; RV32IA-WMO-NEXT: zext.b a2, a2
; RV32IA-WMO-NEXT: sll a4, a4, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: sll a0, a2, a0
@@ -417,8 +417,8 @@ define void @cmpxchg_i8_acquire_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-WMO-ZACAS-NEXT: andi a3, a0, -4
; RV32IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV32IA-WMO-ZACAS-NEXT: li a4, 255
-; RV32IA-WMO-ZACAS-NEXT: andi a1, a1, 255
-; RV32IA-WMO-ZACAS-NEXT: andi a2, a2, 255
+; RV32IA-WMO-ZACAS-NEXT: zext.b a1, a1
+; RV32IA-WMO-ZACAS-NEXT: zext.b a2, a2
; RV32IA-WMO-ZACAS-NEXT: sll a4, a4, a0
; RV32IA-WMO-ZACAS-NEXT: sll a1, a1, a0
; RV32IA-WMO-ZACAS-NEXT: sll a0, a2, a0
@@ -440,8 +440,8 @@ define void @cmpxchg_i8_acquire_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-TSO-NEXT: andi a3, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a4, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
-; RV32IA-TSO-NEXT: andi a2, a2, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
+; RV32IA-TSO-NEXT: zext.b a2, a2
; RV32IA-TSO-NEXT: sll a4, a4, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: sll a0, a2, a0
@@ -463,8 +463,8 @@ define void @cmpxchg_i8_acquire_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-TSO-ZACAS-NEXT: andi a3, a0, -4
; RV32IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV32IA-TSO-ZACAS-NEXT: li a4, 255
-; RV32IA-TSO-ZACAS-NEXT: andi a1, a1, 255
-; RV32IA-TSO-ZACAS-NEXT: andi a2, a2, 255
+; RV32IA-TSO-ZACAS-NEXT: zext.b a1, a1
+; RV32IA-TSO-ZACAS-NEXT: zext.b a2, a2
; RV32IA-TSO-ZACAS-NEXT: sll a4, a4, a0
; RV32IA-TSO-ZACAS-NEXT: sll a1, a1, a0
; RV32IA-TSO-ZACAS-NEXT: sll a0, a2, a0
@@ -499,8 +499,8 @@ define void @cmpxchg_i8_acquire_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-WMO-NEXT: andi a3, a0, -4
; RV64IA-WMO-NEXT: slli a0, a0, 3
; RV64IA-WMO-NEXT: li a4, 255
-; RV64IA-WMO-NEXT: andi a1, a1, 255
-; RV64IA-WMO-NEXT: andi a2, a2, 255
+; RV64IA-WMO-NEXT: zext.b a1, a1
+; RV64IA-WMO-NEXT: zext.b a2, a2
; RV64IA-WMO-NEXT: sllw a4, a4, a0
; RV64IA-WMO-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NEXT: sllw a0, a2, a0
@@ -522,8 +522,8 @@ define void @cmpxchg_i8_acquire_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a3, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a4, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a2, a2, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
+; RV64IA-WMO-ZACAS-NEXT: zext.b a2, a2
; RV64IA-WMO-ZACAS-NEXT: sllw a4, a4, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a0, a2, a0
@@ -550,8 +550,8 @@ define void @cmpxchg_i8_acquire_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-TSO-NEXT: andi a3, a0, -4
; RV64IA-TSO-NEXT: slli a0, a0, 3
; RV64IA-TSO-NEXT: li a4, 255
-; RV64IA-TSO-NEXT: andi a1, a1, 255
-; RV64IA-TSO-NEXT: andi a2, a2, 255
+; RV64IA-TSO-NEXT: zext.b a1, a1
+; RV64IA-TSO-NEXT: zext.b a2, a2
; RV64IA-TSO-NEXT: sllw a4, a4, a0
; RV64IA-TSO-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NEXT: sllw a0, a2, a0
@@ -573,8 +573,8 @@ define void @cmpxchg_i8_acquire_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a3, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a4, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a2, a2, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
+; RV64IA-TSO-ZACAS-NEXT: zext.b a2, a2
; RV64IA-TSO-ZACAS-NEXT: sllw a4, a4, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a0, a2, a0
@@ -618,8 +618,8 @@ define void @cmpxchg_i8_release_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-WMO-NEXT: andi a3, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a4, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
-; RV32IA-WMO-NEXT: andi a2, a2, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
+; RV32IA-WMO-NEXT: zext.b a2, a2
; RV32IA-WMO-NEXT: sll a4, a4, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: sll a0, a2, a0
@@ -641,8 +641,8 @@ define void @cmpxchg_i8_release_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-WMO-ZACAS-NEXT: andi a3, a0, -4
; RV32IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV32IA-WMO-ZACAS-NEXT: li a4, 255
-; RV32IA-WMO-ZACAS-NEXT: andi a1, a1, 255
-; RV32IA-WMO-ZACAS-NEXT: andi a2, a2, 255
+; RV32IA-WMO-ZACAS-NEXT: zext.b a1, a1
+; RV32IA-WMO-ZACAS-NEXT: zext.b a2, a2
; RV32IA-WMO-ZACAS-NEXT: sll a4, a4, a0
; RV32IA-WMO-ZACAS-NEXT: sll a1, a1, a0
; RV32IA-WMO-ZACAS-NEXT: sll a0, a2, a0
@@ -664,8 +664,8 @@ define void @cmpxchg_i8_release_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-TSO-NEXT: andi a3, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a4, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
-; RV32IA-TSO-NEXT: andi a2, a2, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
+; RV32IA-TSO-NEXT: zext.b a2, a2
; RV32IA-TSO-NEXT: sll a4, a4, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: sll a0, a2, a0
@@ -687,8 +687,8 @@ define void @cmpxchg_i8_release_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-TSO-ZACAS-NEXT: andi a3, a0, -4
; RV32IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV32IA-TSO-ZACAS-NEXT: li a4, 255
-; RV32IA-TSO-ZACAS-NEXT: andi a1, a1, 255
-; RV32IA-TSO-ZACAS-NEXT: andi a2, a2, 255
+; RV32IA-TSO-ZACAS-NEXT: zext.b a1, a1
+; RV32IA-TSO-ZACAS-NEXT: zext.b a2, a2
; RV32IA-TSO-ZACAS-NEXT: sll a4, a4, a0
; RV32IA-TSO-ZACAS-NEXT: sll a1, a1, a0
; RV32IA-TSO-ZACAS-NEXT: sll a0, a2, a0
@@ -723,8 +723,8 @@ define void @cmpxchg_i8_release_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-WMO-NEXT: andi a3, a0, -4
; RV64IA-WMO-NEXT: slli a0, a0, 3
; RV64IA-WMO-NEXT: li a4, 255
-; RV64IA-WMO-NEXT: andi a1, a1, 255
-; RV64IA-WMO-NEXT: andi a2, a2, 255
+; RV64IA-WMO-NEXT: zext.b a1, a1
+; RV64IA-WMO-NEXT: zext.b a2, a2
; RV64IA-WMO-NEXT: sllw a4, a4, a0
; RV64IA-WMO-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NEXT: sllw a0, a2, a0
@@ -746,8 +746,8 @@ define void @cmpxchg_i8_release_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a3, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a4, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a2, a2, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
+; RV64IA-WMO-ZACAS-NEXT: zext.b a2, a2
; RV64IA-WMO-ZACAS-NEXT: sllw a4, a4, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a0, a2, a0
@@ -774,8 +774,8 @@ define void @cmpxchg_i8_release_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-TSO-NEXT: andi a3, a0, -4
; RV64IA-TSO-NEXT: slli a0, a0, 3
; RV64IA-TSO-NEXT: li a4, 255
-; RV64IA-TSO-NEXT: andi a1, a1, 255
-; RV64IA-TSO-NEXT: andi a2, a2, 255
+; RV64IA-TSO-NEXT: zext.b a1, a1
+; RV64IA-TSO-NEXT: zext.b a2, a2
; RV64IA-TSO-NEXT: sllw a4, a4, a0
; RV64IA-TSO-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NEXT: sllw a0, a2, a0
@@ -797,8 +797,8 @@ define void @cmpxchg_i8_release_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a3, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a4, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a2, a2, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
+; RV64IA-TSO-ZACAS-NEXT: zext.b a2, a2
; RV64IA-TSO-ZACAS-NEXT: sllw a4, a4, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a0, a2, a0
@@ -842,8 +842,8 @@ define void @cmpxchg_i8_release_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-WMO-NEXT: andi a3, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a4, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
-; RV32IA-WMO-NEXT: andi a2, a2, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
+; RV32IA-WMO-NEXT: zext.b a2, a2
; RV32IA-WMO-NEXT: sll a4, a4, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: sll a0, a2, a0
@@ -865,8 +865,8 @@ define void @cmpxchg_i8_release_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-WMO-ZACAS-NEXT: andi a3, a0, -4
; RV32IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV32IA-WMO-ZACAS-NEXT: li a4, 255
-; RV32IA-WMO-ZACAS-NEXT: andi a1, a1, 255
-; RV32IA-WMO-ZACAS-NEXT: andi a2, a2, 255
+; RV32IA-WMO-ZACAS-NEXT: zext.b a1, a1
+; RV32IA-WMO-ZACAS-NEXT: zext.b a2, a2
; RV32IA-WMO-ZACAS-NEXT: sll a4, a4, a0
; RV32IA-WMO-ZACAS-NEXT: sll a1, a1, a0
; RV32IA-WMO-ZACAS-NEXT: sll a0, a2, a0
@@ -888,8 +888,8 @@ define void @cmpxchg_i8_release_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-TSO-NEXT: andi a3, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a4, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
-; RV32IA-TSO-NEXT: andi a2, a2, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
+; RV32IA-TSO-NEXT: zext.b a2, a2
; RV32IA-TSO-NEXT: sll a4, a4, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: sll a0, a2, a0
@@ -911,8 +911,8 @@ define void @cmpxchg_i8_release_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-TSO-ZACAS-NEXT: andi a3, a0, -4
; RV32IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV32IA-TSO-ZACAS-NEXT: li a4, 255
-; RV32IA-TSO-ZACAS-NEXT: andi a1, a1, 255
-; RV32IA-TSO-ZACAS-NEXT: andi a2, a2, 255
+; RV32IA-TSO-ZACAS-NEXT: zext.b a1, a1
+; RV32IA-TSO-ZACAS-NEXT: zext.b a2, a2
; RV32IA-TSO-ZACAS-NEXT: sll a4, a4, a0
; RV32IA-TSO-ZACAS-NEXT: sll a1, a1, a0
; RV32IA-TSO-ZACAS-NEXT: sll a0, a2, a0
@@ -947,8 +947,8 @@ define void @cmpxchg_i8_release_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-WMO-NEXT: andi a3, a0, -4
; RV64IA-WMO-NEXT: slli a0, a0, 3
; RV64IA-WMO-NEXT: li a4, 255
-; RV64IA-WMO-NEXT: andi a1, a1, 255
-; RV64IA-WMO-NEXT: andi a2, a2, 255
+; RV64IA-WMO-NEXT: zext.b a1, a1
+; RV64IA-WMO-NEXT: zext.b a2, a2
; RV64IA-WMO-NEXT: sllw a4, a4, a0
; RV64IA-WMO-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NEXT: sllw a0, a2, a0
@@ -970,8 +970,8 @@ define void @cmpxchg_i8_release_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a3, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a4, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a2, a2, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
+; RV64IA-WMO-ZACAS-NEXT: zext.b a2, a2
; RV64IA-WMO-ZACAS-NEXT: sllw a4, a4, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a0, a2, a0
@@ -998,8 +998,8 @@ define void @cmpxchg_i8_release_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-TSO-NEXT: andi a3, a0, -4
; RV64IA-TSO-NEXT: slli a0, a0, 3
; RV64IA-TSO-NEXT: li a4, 255
-; RV64IA-TSO-NEXT: andi a1, a1, 255
-; RV64IA-TSO-NEXT: andi a2, a2, 255
+; RV64IA-TSO-NEXT: zext.b a1, a1
+; RV64IA-TSO-NEXT: zext.b a2, a2
; RV64IA-TSO-NEXT: sllw a4, a4, a0
; RV64IA-TSO-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NEXT: sllw a0, a2, a0
@@ -1021,8 +1021,8 @@ define void @cmpxchg_i8_release_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a3, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a4, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a2, a2, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
+; RV64IA-TSO-ZACAS-NEXT: zext.b a2, a2
; RV64IA-TSO-ZACAS-NEXT: sllw a4, a4, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a0, a2, a0
@@ -1066,8 +1066,8 @@ define void @cmpxchg_i8_acq_rel_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-WMO-NEXT: andi a3, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a4, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
-; RV32IA-WMO-NEXT: andi a2, a2, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
+; RV32IA-WMO-NEXT: zext.b a2, a2
; RV32IA-WMO-NEXT: sll a4, a4, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: sll a0, a2, a0
@@ -1089,8 +1089,8 @@ define void @cmpxchg_i8_acq_rel_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-WMO-ZACAS-NEXT: andi a3, a0, -4
; RV32IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV32IA-WMO-ZACAS-NEXT: li a4, 255
-; RV32IA-WMO-ZACAS-NEXT: andi a1, a1, 255
-; RV32IA-WMO-ZACAS-NEXT: andi a2, a2, 255
+; RV32IA-WMO-ZACAS-NEXT: zext.b a1, a1
+; RV32IA-WMO-ZACAS-NEXT: zext.b a2, a2
; RV32IA-WMO-ZACAS-NEXT: sll a4, a4, a0
; RV32IA-WMO-ZACAS-NEXT: sll a1, a1, a0
; RV32IA-WMO-ZACAS-NEXT: sll a0, a2, a0
@@ -1112,8 +1112,8 @@ define void @cmpxchg_i8_acq_rel_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-TSO-NEXT: andi a3, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a4, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
-; RV32IA-TSO-NEXT: andi a2, a2, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
+; RV32IA-TSO-NEXT: zext.b a2, a2
; RV32IA-TSO-NEXT: sll a4, a4, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: sll a0, a2, a0
@@ -1135,8 +1135,8 @@ define void @cmpxchg_i8_acq_rel_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-TSO-ZACAS-NEXT: andi a3, a0, -4
; RV32IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV32IA-TSO-ZACAS-NEXT: li a4, 255
-; RV32IA-TSO-ZACAS-NEXT: andi a1, a1, 255
-; RV32IA-TSO-ZACAS-NEXT: andi a2, a2, 255
+; RV32IA-TSO-ZACAS-NEXT: zext.b a1, a1
+; RV32IA-TSO-ZACAS-NEXT: zext.b a2, a2
; RV32IA-TSO-ZACAS-NEXT: sll a4, a4, a0
; RV32IA-TSO-ZACAS-NEXT: sll a1, a1, a0
; RV32IA-TSO-ZACAS-NEXT: sll a0, a2, a0
@@ -1171,8 +1171,8 @@ define void @cmpxchg_i8_acq_rel_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-WMO-NEXT: andi a3, a0, -4
; RV64IA-WMO-NEXT: slli a0, a0, 3
; RV64IA-WMO-NEXT: li a4, 255
-; RV64IA-WMO-NEXT: andi a1, a1, 255
-; RV64IA-WMO-NEXT: andi a2, a2, 255
+; RV64IA-WMO-NEXT: zext.b a1, a1
+; RV64IA-WMO-NEXT: zext.b a2, a2
; RV64IA-WMO-NEXT: sllw a4, a4, a0
; RV64IA-WMO-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NEXT: sllw a0, a2, a0
@@ -1194,8 +1194,8 @@ define void @cmpxchg_i8_acq_rel_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a3, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a4, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a2, a2, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
+; RV64IA-WMO-ZACAS-NEXT: zext.b a2, a2
; RV64IA-WMO-ZACAS-NEXT: sllw a4, a4, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a0, a2, a0
@@ -1222,8 +1222,8 @@ define void @cmpxchg_i8_acq_rel_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-TSO-NEXT: andi a3, a0, -4
; RV64IA-TSO-NEXT: slli a0, a0, 3
; RV64IA-TSO-NEXT: li a4, 255
-; RV64IA-TSO-NEXT: andi a1, a1, 255
-; RV64IA-TSO-NEXT: andi a2, a2, 255
+; RV64IA-TSO-NEXT: zext.b a1, a1
+; RV64IA-TSO-NEXT: zext.b a2, a2
; RV64IA-TSO-NEXT: sllw a4, a4, a0
; RV64IA-TSO-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NEXT: sllw a0, a2, a0
@@ -1245,8 +1245,8 @@ define void @cmpxchg_i8_acq_rel_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a3, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a4, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a2, a2, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
+; RV64IA-TSO-ZACAS-NEXT: zext.b a2, a2
; RV64IA-TSO-ZACAS-NEXT: sllw a4, a4, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a0, a2, a0
@@ -1290,8 +1290,8 @@ define void @cmpxchg_i8_acq_rel_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-WMO-NEXT: andi a3, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a4, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
-; RV32IA-WMO-NEXT: andi a2, a2, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
+; RV32IA-WMO-NEXT: zext.b a2, a2
; RV32IA-WMO-NEXT: sll a4, a4, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: sll a0, a2, a0
@@ -1313,8 +1313,8 @@ define void @cmpxchg_i8_acq_rel_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-WMO-ZACAS-NEXT: andi a3, a0, -4
; RV32IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV32IA-WMO-ZACAS-NEXT: li a4, 255
-; RV32IA-WMO-ZACAS-NEXT: andi a1, a1, 255
-; RV32IA-WMO-ZACAS-NEXT: andi a2, a2, 255
+; RV32IA-WMO-ZACAS-NEXT: zext.b a1, a1
+; RV32IA-WMO-ZACAS-NEXT: zext.b a2, a2
; RV32IA-WMO-ZACAS-NEXT: sll a4, a4, a0
; RV32IA-WMO-ZACAS-NEXT: sll a1, a1, a0
; RV32IA-WMO-ZACAS-NEXT: sll a0, a2, a0
@@ -1336,8 +1336,8 @@ define void @cmpxchg_i8_acq_rel_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-TSO-NEXT: andi a3, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a4, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
-; RV32IA-TSO-NEXT: andi a2, a2, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
+; RV32IA-TSO-NEXT: zext.b a2, a2
; RV32IA-TSO-NEXT: sll a4, a4, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: sll a0, a2, a0
@@ -1359,8 +1359,8 @@ define void @cmpxchg_i8_acq_rel_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-TSO-ZACAS-NEXT: andi a3, a0, -4
; RV32IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV32IA-TSO-ZACAS-NEXT: li a4, 255
-; RV32IA-TSO-ZACAS-NEXT: andi a1, a1, 255
-; RV32IA-TSO-ZACAS-NEXT: andi a2, a2, 255
+; RV32IA-TSO-ZACAS-NEXT: zext.b a1, a1
+; RV32IA-TSO-ZACAS-NEXT: zext.b a2, a2
; RV32IA-TSO-ZACAS-NEXT: sll a4, a4, a0
; RV32IA-TSO-ZACAS-NEXT: sll a1, a1, a0
; RV32IA-TSO-ZACAS-NEXT: sll a0, a2, a0
@@ -1395,8 +1395,8 @@ define void @cmpxchg_i8_acq_rel_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-WMO-NEXT: andi a3, a0, -4
; RV64IA-WMO-NEXT: slli a0, a0, 3
; RV64IA-WMO-NEXT: li a4, 255
-; RV64IA-WMO-NEXT: andi a1, a1, 255
-; RV64IA-WMO-NEXT: andi a2, a2, 255
+; RV64IA-WMO-NEXT: zext.b a1, a1
+; RV64IA-WMO-NEXT: zext.b a2, a2
; RV64IA-WMO-NEXT: sllw a4, a4, a0
; RV64IA-WMO-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NEXT: sllw a0, a2, a0
@@ -1418,8 +1418,8 @@ define void @cmpxchg_i8_acq_rel_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a3, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a4, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a2, a2, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
+; RV64IA-WMO-ZACAS-NEXT: zext.b a2, a2
; RV64IA-WMO-ZACAS-NEXT: sllw a4, a4, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a0, a2, a0
@@ -1446,8 +1446,8 @@ define void @cmpxchg_i8_acq_rel_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-TSO-NEXT: andi a3, a0, -4
; RV64IA-TSO-NEXT: slli a0, a0, 3
; RV64IA-TSO-NEXT: li a4, 255
-; RV64IA-TSO-NEXT: andi a1, a1, 255
-; RV64IA-TSO-NEXT: andi a2, a2, 255
+; RV64IA-TSO-NEXT: zext.b a1, a1
+; RV64IA-TSO-NEXT: zext.b a2, a2
; RV64IA-TSO-NEXT: sllw a4, a4, a0
; RV64IA-TSO-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NEXT: sllw a0, a2, a0
@@ -1469,8 +1469,8 @@ define void @cmpxchg_i8_acq_rel_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a3, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a4, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a2, a2, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
+; RV64IA-TSO-ZACAS-NEXT: zext.b a2, a2
; RV64IA-TSO-ZACAS-NEXT: sllw a4, a4, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a0, a2, a0
@@ -1514,8 +1514,8 @@ define void @cmpxchg_i8_seq_cst_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-NEXT: andi a3, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a4, 255
-; RV32IA-NEXT: andi a1, a1, 255
-; RV32IA-NEXT: andi a2, a2, 255
+; RV32IA-NEXT: zext.b a1, a1
+; RV32IA-NEXT: zext.b a2, a2
; RV32IA-NEXT: sll a4, a4, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: sll a0, a2, a0
@@ -1550,8 +1550,8 @@ define void @cmpxchg_i8_seq_cst_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-WMO-NEXT: andi a3, a0, -4
; RV64IA-WMO-NEXT: slli a0, a0, 3
; RV64IA-WMO-NEXT: li a4, 255
-; RV64IA-WMO-NEXT: andi a1, a1, 255
-; RV64IA-WMO-NEXT: andi a2, a2, 255
+; RV64IA-WMO-NEXT: zext.b a1, a1
+; RV64IA-WMO-NEXT: zext.b a2, a2
; RV64IA-WMO-NEXT: sllw a4, a4, a0
; RV64IA-WMO-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NEXT: sllw a0, a2, a0
@@ -1573,8 +1573,8 @@ define void @cmpxchg_i8_seq_cst_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-ZACAS-NEXT: andi a3, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-ZACAS-NEXT: li a4, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
-; RV64IA-ZACAS-NEXT: andi a2, a2, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
+; RV64IA-ZACAS-NEXT: zext.b a2, a2
; RV64IA-ZACAS-NEXT: sllw a4, a4, a0
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-ZACAS-NEXT: sllw a0, a2, a0
@@ -1601,8 +1601,8 @@ define void @cmpxchg_i8_seq_cst_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-TSO-NEXT: andi a3, a0, -4
; RV64IA-TSO-NEXT: slli a0, a0, 3
; RV64IA-TSO-NEXT: li a4, 255
-; RV64IA-TSO-NEXT: andi a1, a1, 255
-; RV64IA-TSO-NEXT: andi a2, a2, 255
+; RV64IA-TSO-NEXT: zext.b a1, a1
+; RV64IA-TSO-NEXT: zext.b a2, a2
; RV64IA-TSO-NEXT: sllw a4, a4, a0
; RV64IA-TSO-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NEXT: sllw a0, a2, a0
@@ -1646,8 +1646,8 @@ define void @cmpxchg_i8_seq_cst_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-NEXT: andi a3, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a4, 255
-; RV32IA-NEXT: andi a1, a1, 255
-; RV32IA-NEXT: andi a2, a2, 255
+; RV32IA-NEXT: zext.b a1, a1
+; RV32IA-NEXT: zext.b a2, a2
; RV32IA-NEXT: sll a4, a4, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: sll a0, a2, a0
@@ -1682,8 +1682,8 @@ define void @cmpxchg_i8_seq_cst_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-WMO-NEXT: andi a3, a0, -4
; RV64IA-WMO-NEXT: slli a0, a0, 3
; RV64IA-WMO-NEXT: li a4, 255
-; RV64IA-WMO-NEXT: andi a1, a1, 255
-; RV64IA-WMO-NEXT: andi a2, a2, 255
+; RV64IA-WMO-NEXT: zext.b a1, a1
+; RV64IA-WMO-NEXT: zext.b a2, a2
; RV64IA-WMO-NEXT: sllw a4, a4, a0
; RV64IA-WMO-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NEXT: sllw a0, a2, a0
@@ -1705,8 +1705,8 @@ define void @cmpxchg_i8_seq_cst_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-ZACAS-NEXT: andi a3, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-ZACAS-NEXT: li a4, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
-; RV64IA-ZACAS-NEXT: andi a2, a2, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
+; RV64IA-ZACAS-NEXT: zext.b a2, a2
; RV64IA-ZACAS-NEXT: sllw a4, a4, a0
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-ZACAS-NEXT: sllw a0, a2, a0
@@ -1733,8 +1733,8 @@ define void @cmpxchg_i8_seq_cst_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-TSO-NEXT: andi a3, a0, -4
; RV64IA-TSO-NEXT: slli a0, a0, 3
; RV64IA-TSO-NEXT: li a4, 255
-; RV64IA-TSO-NEXT: andi a1, a1, 255
-; RV64IA-TSO-NEXT: andi a2, a2, 255
+; RV64IA-TSO-NEXT: zext.b a1, a1
+; RV64IA-TSO-NEXT: zext.b a2, a2
; RV64IA-TSO-NEXT: sllw a4, a4, a0
; RV64IA-TSO-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NEXT: sllw a0, a2, a0
@@ -1778,8 +1778,8 @@ define void @cmpxchg_i8_seq_cst_seq_cst(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-NEXT: andi a3, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a4, 255
-; RV32IA-NEXT: andi a1, a1, 255
-; RV32IA-NEXT: andi a2, a2, 255
+; RV32IA-NEXT: zext.b a1, a1
+; RV32IA-NEXT: zext.b a2, a2
; RV32IA-NEXT: sll a4, a4, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: sll a0, a2, a0
@@ -1814,8 +1814,8 @@ define void @cmpxchg_i8_seq_cst_seq_cst(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-WMO-NEXT: andi a3, a0, -4
; RV64IA-WMO-NEXT: slli a0, a0, 3
; RV64IA-WMO-NEXT: li a4, 255
-; RV64IA-WMO-NEXT: andi a1, a1, 255
-; RV64IA-WMO-NEXT: andi a2, a2, 255
+; RV64IA-WMO-NEXT: zext.b a1, a1
+; RV64IA-WMO-NEXT: zext.b a2, a2
; RV64IA-WMO-NEXT: sllw a4, a4, a0
; RV64IA-WMO-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NEXT: sllw a0, a2, a0
@@ -1837,8 +1837,8 @@ define void @cmpxchg_i8_seq_cst_seq_cst(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-ZACAS-NEXT: andi a3, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-ZACAS-NEXT: li a4, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
-; RV64IA-ZACAS-NEXT: andi a2, a2, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
+; RV64IA-ZACAS-NEXT: zext.b a2, a2
; RV64IA-ZACAS-NEXT: sllw a4, a4, a0
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-ZACAS-NEXT: sllw a0, a2, a0
@@ -1866,8 +1866,8 @@ define void @cmpxchg_i8_seq_cst_seq_cst(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-TSO-NEXT: andi a3, a0, -4
; RV64IA-TSO-NEXT: slli a0, a0, 3
; RV64IA-TSO-NEXT: li a4, 255
-; RV64IA-TSO-NEXT: andi a1, a1, 255
-; RV64IA-TSO-NEXT: andi a2, a2, 255
+; RV64IA-TSO-NEXT: zext.b a1, a1
+; RV64IA-TSO-NEXT: zext.b a2, a2
; RV64IA-TSO-NEXT: sllw a4, a4, a0
; RV64IA-TSO-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NEXT: sllw a0, a2, a0
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
index 81518541477a8..1e5acd2575b88 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
@@ -46,7 +46,7 @@ define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
@@ -76,7 +76,7 @@ define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-NOZACAS-NEXT: li a3, 255
-; RV64IA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-NOZACAS-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
@@ -96,7 +96,7 @@ define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-ZACAS-NEXT: li a3, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
; RV64IA-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-ZACAS-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
@@ -140,7 +140,7 @@ define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
@@ -160,7 +160,7 @@ define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
@@ -190,7 +190,7 @@ define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
@@ -210,7 +210,7 @@ define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
@@ -230,7 +230,7 @@ define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
@@ -250,7 +250,7 @@ define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
@@ -294,7 +294,7 @@ define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1
@@ -314,7 +314,7 @@ define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1
@@ -344,7 +344,7 @@ define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1
@@ -364,7 +364,7 @@ define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1
@@ -384,7 +384,7 @@ define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1
@@ -404,7 +404,7 @@ define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1
@@ -448,7 +448,7 @@ define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
@@ -468,7 +468,7 @@ define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
@@ -498,7 +498,7 @@ define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
@@ -518,7 +518,7 @@ define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
@@ -538,7 +538,7 @@ define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
@@ -558,7 +558,7 @@ define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
@@ -602,7 +602,7 @@ define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1
@@ -632,7 +632,7 @@ define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-NOZACAS-NEXT: li a3, 255
-; RV64IA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-NOZACAS-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1
@@ -652,7 +652,7 @@ define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-ZACAS-NEXT: li a3, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
; RV64IA-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-ZACAS-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1
@@ -1636,7 +1636,7 @@ define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB15_1: # =>This Inner Loop Header: Depth=1
@@ -1666,7 +1666,7 @@ define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-NOZACAS-NEXT: li a3, 255
-; RV64IA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-NOZACAS-NEXT: .LBB15_1: # =>This Inner Loop Header: Depth=1
@@ -1686,7 +1686,7 @@ define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-ZACAS-NEXT: li a3, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
; RV64IA-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-ZACAS-NEXT: .LBB15_1: # =>This Inner Loop Header: Depth=1
@@ -1730,7 +1730,7 @@ define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1
@@ -1750,7 +1750,7 @@ define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1
@@ -1780,7 +1780,7 @@ define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1
@@ -1800,7 +1800,7 @@ define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1
@@ -1820,7 +1820,7 @@ define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1
@@ -1840,7 +1840,7 @@ define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1
@@ -1884,7 +1884,7 @@ define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: .LBB17_1: # =>This Inner Loop Header: Depth=1
@@ -1904,7 +1904,7 @@ define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: .LBB17_1: # =>This Inner Loop Header: Depth=1
@@ -1934,7 +1934,7 @@ define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: .LBB17_1: # =>This Inner Loop Header: Depth=1
@@ -1954,7 +1954,7 @@ define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: .LBB17_1: # =>This Inner Loop Header: Depth=1
@@ -1974,7 +1974,7 @@ define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: .LBB17_1: # =>This Inner Loop Header: Depth=1
@@ -1994,7 +1994,7 @@ define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: .LBB17_1: # =>This Inner Loop Header: Depth=1
@@ -2038,7 +2038,7 @@ define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1
@@ -2058,7 +2058,7 @@ define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1
@@ -2088,7 +2088,7 @@ define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1
@@ -2108,7 +2108,7 @@ define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1
@@ -2128,7 +2128,7 @@ define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1
@@ -2148,7 +2148,7 @@ define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1
@@ -2192,7 +2192,7 @@ define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB19_1: # =>This Inner Loop Header: Depth=1
@@ -2222,7 +2222,7 @@ define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-NOZACAS-NEXT: li a3, 255
-; RV64IA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-NOZACAS-NEXT: .LBB19_1: # =>This Inner Loop Header: Depth=1
@@ -2242,7 +2242,7 @@ define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-ZACAS-NEXT: li a3, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
; RV64IA-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-ZACAS-NEXT: .LBB19_1: # =>This Inner Loop Header: Depth=1
@@ -2286,7 +2286,7 @@ define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB20_1: # =>This Inner Loop Header: Depth=1
@@ -2316,7 +2316,7 @@ define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-NOZACAS-NEXT: li a3, 255
-; RV64IA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-NOZACAS-NEXT: .LBB20_1: # =>This Inner Loop Header: Depth=1
@@ -2336,7 +2336,7 @@ define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-ZACAS-NEXT: li a3, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
; RV64IA-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-ZACAS-NEXT: .LBB20_1: # =>This Inner Loop Header: Depth=1
@@ -2382,7 +2382,7 @@ define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1
@@ -2402,7 +2402,7 @@ define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1
@@ -2432,7 +2432,7 @@ define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1
@@ -2452,7 +2452,7 @@ define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1
@@ -2472,7 +2472,7 @@ define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1
@@ -2492,7 +2492,7 @@ define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1
@@ -2538,7 +2538,7 @@ define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1
@@ -2558,7 +2558,7 @@ define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1
@@ -2588,7 +2588,7 @@ define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1
@@ -2608,7 +2608,7 @@ define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1
@@ -2628,7 +2628,7 @@ define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1
@@ -2648,7 +2648,7 @@ define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1
@@ -2694,7 +2694,7 @@ define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1
@@ -2714,7 +2714,7 @@ define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1
@@ -2744,7 +2744,7 @@ define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1
@@ -2764,7 +2764,7 @@ define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1
@@ -2784,7 +2784,7 @@ define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1
@@ -2804,7 +2804,7 @@ define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1
@@ -2850,7 +2850,7 @@ define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1
@@ -2880,7 +2880,7 @@ define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-NOZACAS-NEXT: li a3, 255
-; RV64IA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-NOZACAS-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1
@@ -2900,7 +2900,7 @@ define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-ZACAS-NEXT: li a3, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
; RV64IA-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-ZACAS-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1
@@ -2946,7 +2946,7 @@ define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: not a3, a3
; RV32IA-NEXT: sll a1, a1, a0
@@ -2970,7 +2970,7 @@ define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-NOZACAS-NEXT: li a3, 255
-; RV64IA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-NOZACAS-NEXT: not a3, a3
; RV64IA-NOZACAS-NEXT: sllw a1, a1, a0
@@ -2984,7 +2984,7 @@ define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-ZACAS-NEXT: li a3, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
; RV64IA-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-ZACAS-NEXT: not a3, a3
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
@@ -3022,7 +3022,7 @@ define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: not a3, a3
; RV32IA-WMO-NEXT: sll a1, a1, a0
@@ -3036,7 +3036,7 @@ define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: not a3, a3
; RV32IA-TSO-NEXT: sll a1, a1, a0
@@ -3060,7 +3060,7 @@ define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: not a3, a3
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
@@ -3074,7 +3074,7 @@ define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: not a3, a3
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
@@ -3088,7 +3088,7 @@ define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: not a3, a3
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
@@ -3102,7 +3102,7 @@ define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: not a3, a3
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
@@ -3140,7 +3140,7 @@ define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: not a3, a3
; RV32IA-WMO-NEXT: sll a1, a1, a0
@@ -3154,7 +3154,7 @@ define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: not a3, a3
; RV32IA-TSO-NEXT: sll a1, a1, a0
@@ -3178,7 +3178,7 @@ define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: not a3, a3
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
@@ -3192,7 +3192,7 @@ define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: not a3, a3
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
@@ -3206,7 +3206,7 @@ define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: not a3, a3
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
@@ -3220,7 +3220,7 @@ define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: not a3, a3
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
@@ -3258,7 +3258,7 @@ define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: not a3, a3
; RV32IA-WMO-NEXT: sll a1, a1, a0
@@ -3272,7 +3272,7 @@ define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: not a3, a3
; RV32IA-TSO-NEXT: sll a1, a1, a0
@@ -3296,7 +3296,7 @@ define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: not a3, a3
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
@@ -3310,7 +3310,7 @@ define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: not a3, a3
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
@@ -3324,7 +3324,7 @@ define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: not a3, a3
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
@@ -3338,7 +3338,7 @@ define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: not a3, a3
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
@@ -3376,7 +3376,7 @@ define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: not a3, a3
; RV32IA-WMO-NEXT: sll a1, a1, a0
@@ -3390,7 +3390,7 @@ define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: not a3, a3
; RV32IA-TSO-NEXT: sll a1, a1, a0
@@ -3414,7 +3414,7 @@ define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: not a3, a3
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
@@ -3428,7 +3428,7 @@ define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: not a3, a3
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
@@ -3442,7 +3442,7 @@ define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: not a3, a3
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
@@ -3456,7 +3456,7 @@ define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: not a3, a3
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
@@ -3494,7 +3494,7 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB30_1: # =>This Inner Loop Header: Depth=1
@@ -3525,7 +3525,7 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-NOZACAS-NEXT: li a3, 255
-; RV64IA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-NOZACAS-NEXT: .LBB30_1: # =>This Inner Loop Header: Depth=1
@@ -3546,7 +3546,7 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-ZACAS-NEXT: li a3, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
; RV64IA-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-ZACAS-NEXT: .LBB30_1: # =>This Inner Loop Header: Depth=1
@@ -3567,7 +3567,7 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZABHA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZABHA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: .LBB30_1: # =>This Inner Loop Header: Depth=1
@@ -3588,7 +3588,7 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZABHA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZABHA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: .LBB30_1: # =>This Inner Loop Header: Depth=1
@@ -3653,7 +3653,7 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: .LBB31_1: # =>This Inner Loop Header: Depth=1
@@ -3674,7 +3674,7 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: .LBB31_1: # =>This Inner Loop Header: Depth=1
@@ -3705,7 +3705,7 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: .LBB31_1: # =>This Inner Loop Header: Depth=1
@@ -3726,7 +3726,7 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: .LBB31_1: # =>This Inner Loop Header: Depth=1
@@ -3747,7 +3747,7 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: .LBB31_1: # =>This Inner Loop Header: Depth=1
@@ -3768,7 +3768,7 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: .LBB31_1: # =>This Inner Loop Header: Depth=1
@@ -3789,7 +3789,7 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZABHA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZABHA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: .LBB31_1: # =>This Inner Loop Header: Depth=1
@@ -3810,7 +3810,7 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZABHA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZABHA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: .LBB31_1: # =>This Inner Loop Header: Depth=1
@@ -3875,7 +3875,7 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1
@@ -3896,7 +3896,7 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1
@@ -3927,7 +3927,7 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1
@@ -3948,7 +3948,7 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1
@@ -3969,7 +3969,7 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1
@@ -3990,7 +3990,7 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1
@@ -4011,7 +4011,7 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZABHA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZABHA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1
@@ -4032,7 +4032,7 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZABHA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZABHA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1
@@ -4097,7 +4097,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1
@@ -4118,7 +4118,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1
@@ -4149,7 +4149,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1
@@ -4170,7 +4170,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1
@@ -4191,7 +4191,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1
@@ -4212,7 +4212,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1
@@ -4233,7 +4233,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZABHA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZABHA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1
@@ -4254,7 +4254,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZABHA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZABHA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1
@@ -4319,7 +4319,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB34_1: # =>This Inner Loop Header: Depth=1
@@ -4350,7 +4350,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-NOZACAS-NEXT: li a3, 255
-; RV64IA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-NOZACAS-NEXT: .LBB34_1: # =>This Inner Loop Header: Depth=1
@@ -4371,7 +4371,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-ZACAS-NEXT: li a3, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
; RV64IA-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-ZACAS-NEXT: .LBB34_1: # =>This Inner Loop Header: Depth=1
@@ -4392,7 +4392,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZABHA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZABHA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZABHA-NOZACAS-NEXT: .LBB34_1: # =>This Inner Loop Header: Depth=1
@@ -4413,7 +4413,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZABHA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZABHA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZABHA-NOZACAS-NEXT: .LBB34_1: # =>This Inner Loop Header: Depth=1
@@ -4479,7 +4479,7 @@ define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: amoor.w a1, a1, (a2)
; RV32IA-NEXT: srl a0, a1, a0
@@ -4499,7 +4499,7 @@ define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-NOZACAS-NEXT: slli a0, a0, 3
-; RV64IA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-NOZACAS-NEXT: amoor.w a1, a1, (a2)
; RV64IA-NOZACAS-NEXT: srlw a0, a1, a0
@@ -4509,7 +4509,7 @@ define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-ZACAS: # %bb.0:
; RV64IA-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-ZACAS-NEXT: amoor.w a1, a1, (a2)
; RV64IA-ZACAS-NEXT: srlw a0, a1, a0
@@ -4543,7 +4543,7 @@ define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: amoor.w.aq a1, a1, (a2)
; RV32IA-WMO-NEXT: srl a0, a1, a0
@@ -4553,7 +4553,7 @@ define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32IA-TSO: # %bb.0:
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: amoor.w a1, a1, (a2)
; RV32IA-TSO-NEXT: srl a0, a1, a0
@@ -4573,7 +4573,7 @@ define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: amoor.w.aq a1, a1, (a2)
; RV64IA-WMO-NOZACAS-NEXT: srlw a0, a1, a0
@@ -4583,7 +4583,7 @@ define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS: # %bb.0:
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: amoor.w a1, a1, (a2)
; RV64IA-TSO-NOZACAS-NEXT: srlw a0, a1, a0
@@ -4593,7 +4593,7 @@ define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS: # %bb.0:
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: amoor.w.aq a1, a1, (a2)
; RV64IA-WMO-ZACAS-NEXT: srlw a0, a1, a0
@@ -4603,7 +4603,7 @@ define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS: # %bb.0:
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: amoor.w a1, a1, (a2)
; RV64IA-TSO-ZACAS-NEXT: srlw a0, a1, a0
@@ -4637,7 +4637,7 @@ define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: amoor.w.rl a1, a1, (a2)
; RV32IA-WMO-NEXT: srl a0, a1, a0
@@ -4647,7 +4647,7 @@ define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
; RV32IA-TSO: # %bb.0:
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: amoor.w a1, a1, (a2)
; RV32IA-TSO-NEXT: srl a0, a1, a0
@@ -4667,7 +4667,7 @@ define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: amoor.w.rl a1, a1, (a2)
; RV64IA-WMO-NOZACAS-NEXT: srlw a0, a1, a0
@@ -4677,7 +4677,7 @@ define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS: # %bb.0:
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: amoor.w a1, a1, (a2)
; RV64IA-TSO-NOZACAS-NEXT: srlw a0, a1, a0
@@ -4687,7 +4687,7 @@ define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS: # %bb.0:
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: amoor.w.rl a1, a1, (a2)
; RV64IA-WMO-ZACAS-NEXT: srlw a0, a1, a0
@@ -4697,7 +4697,7 @@ define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS: # %bb.0:
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: amoor.w a1, a1, (a2)
; RV64IA-TSO-ZACAS-NEXT: srlw a0, a1, a0
@@ -4731,7 +4731,7 @@ define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: amoor.w.aqrl a1, a1, (a2)
; RV32IA-WMO-NEXT: srl a0, a1, a0
@@ -4741,7 +4741,7 @@ define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32IA-TSO: # %bb.0:
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: amoor.w a1, a1, (a2)
; RV32IA-TSO-NEXT: srl a0, a1, a0
@@ -4761,7 +4761,7 @@ define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: amoor.w.aqrl a1, a1, (a2)
; RV64IA-WMO-NOZACAS-NEXT: srlw a0, a1, a0
@@ -4771,7 +4771,7 @@ define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS: # %bb.0:
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: amoor.w a1, a1, (a2)
; RV64IA-TSO-NOZACAS-NEXT: srlw a0, a1, a0
@@ -4781,7 +4781,7 @@ define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS: # %bb.0:
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: amoor.w.aqrl a1, a1, (a2)
; RV64IA-WMO-ZACAS-NEXT: srlw a0, a1, a0
@@ -4791,7 +4791,7 @@ define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS: # %bb.0:
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: amoor.w a1, a1, (a2)
; RV64IA-TSO-ZACAS-NEXT: srlw a0, a1, a0
@@ -4825,7 +4825,7 @@ define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: amoor.w.aqrl a1, a1, (a2)
; RV32IA-WMO-NEXT: srl a0, a1, a0
@@ -4835,7 +4835,7 @@ define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-TSO: # %bb.0:
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: amoor.w a1, a1, (a2)
; RV32IA-TSO-NEXT: srl a0, a1, a0
@@ -4855,7 +4855,7 @@ define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: amoor.w.aqrl a1, a1, (a2)
; RV64IA-WMO-NOZACAS-NEXT: srlw a0, a1, a0
@@ -4865,7 +4865,7 @@ define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS: # %bb.0:
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: amoor.w a1, a1, (a2)
; RV64IA-TSO-NOZACAS-NEXT: srlw a0, a1, a0
@@ -4875,7 +4875,7 @@ define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS: # %bb.0:
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: amoor.w.aqrl a1, a1, (a2)
; RV64IA-WMO-ZACAS-NEXT: srlw a0, a1, a0
@@ -4885,7 +4885,7 @@ define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS: # %bb.0:
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: amoor.w a1, a1, (a2)
; RV64IA-TSO-ZACAS-NEXT: srlw a0, a1, a0
@@ -4919,7 +4919,7 @@ define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: amoxor.w a1, a1, (a2)
; RV32IA-NEXT: srl a0, a1, a0
@@ -4939,7 +4939,7 @@ define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-NOZACAS-NEXT: slli a0, a0, 3
-; RV64IA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-NOZACAS-NEXT: amoxor.w a1, a1, (a2)
; RV64IA-NOZACAS-NEXT: srlw a0, a1, a0
@@ -4949,7 +4949,7 @@ define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-ZACAS: # %bb.0:
; RV64IA-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-ZACAS-NEXT: amoxor.w a1, a1, (a2)
; RV64IA-ZACAS-NEXT: srlw a0, a1, a0
@@ -4983,7 +4983,7 @@ define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: amoxor.w.aq a1, a1, (a2)
; RV32IA-WMO-NEXT: srl a0, a1, a0
@@ -4993,7 +4993,7 @@ define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32IA-TSO: # %bb.0:
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: amoxor.w a1, a1, (a2)
; RV32IA-TSO-NEXT: srl a0, a1, a0
@@ -5013,7 +5013,7 @@ define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: amoxor.w.aq a1, a1, (a2)
; RV64IA-WMO-NOZACAS-NEXT: srlw a0, a1, a0
@@ -5023,7 +5023,7 @@ define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS: # %bb.0:
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: amoxor.w a1, a1, (a2)
; RV64IA-TSO-NOZACAS-NEXT: srlw a0, a1, a0
@@ -5033,7 +5033,7 @@ define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS: # %bb.0:
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: amoxor.w.aq a1, a1, (a2)
; RV64IA-WMO-ZACAS-NEXT: srlw a0, a1, a0
@@ -5043,7 +5043,7 @@ define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS: # %bb.0:
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: amoxor.w a1, a1, (a2)
; RV64IA-TSO-ZACAS-NEXT: srlw a0, a1, a0
@@ -5077,7 +5077,7 @@ define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: amoxor.w.rl a1, a1, (a2)
; RV32IA-WMO-NEXT: srl a0, a1, a0
@@ -5087,7 +5087,7 @@ define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
; RV32IA-TSO: # %bb.0:
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: amoxor.w a1, a1, (a2)
; RV32IA-TSO-NEXT: srl a0, a1, a0
@@ -5107,7 +5107,7 @@ define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: amoxor.w.rl a1, a1, (a2)
; RV64IA-WMO-NOZACAS-NEXT: srlw a0, a1, a0
@@ -5117,7 +5117,7 @@ define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS: # %bb.0:
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: amoxor.w a1, a1, (a2)
; RV64IA-TSO-NOZACAS-NEXT: srlw a0, a1, a0
@@ -5127,7 +5127,7 @@ define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS: # %bb.0:
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: amoxor.w.rl a1, a1, (a2)
; RV64IA-WMO-ZACAS-NEXT: srlw a0, a1, a0
@@ -5137,7 +5137,7 @@ define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS: # %bb.0:
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: amoxor.w a1, a1, (a2)
; RV64IA-TSO-ZACAS-NEXT: srlw a0, a1, a0
@@ -5171,7 +5171,7 @@ define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: amoxor.w.aqrl a1, a1, (a2)
; RV32IA-WMO-NEXT: srl a0, a1, a0
@@ -5181,7 +5181,7 @@ define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32IA-TSO: # %bb.0:
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: amoxor.w a1, a1, (a2)
; RV32IA-TSO-NEXT: srl a0, a1, a0
@@ -5201,7 +5201,7 @@ define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: amoxor.w.aqrl a1, a1, (a2)
; RV64IA-WMO-NOZACAS-NEXT: srlw a0, a1, a0
@@ -5211,7 +5211,7 @@ define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS: # %bb.0:
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: amoxor.w a1, a1, (a2)
; RV64IA-TSO-NOZACAS-NEXT: srlw a0, a1, a0
@@ -5221,7 +5221,7 @@ define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS: # %bb.0:
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: amoxor.w.aqrl a1, a1, (a2)
; RV64IA-WMO-ZACAS-NEXT: srlw a0, a1, a0
@@ -5231,7 +5231,7 @@ define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS: # %bb.0:
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: amoxor.w a1, a1, (a2)
; RV64IA-TSO-ZACAS-NEXT: srlw a0, a1, a0
@@ -5265,7 +5265,7 @@ define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: amoxor.w.aqrl a1, a1, (a2)
; RV32IA-WMO-NEXT: srl a0, a1, a0
@@ -5275,7 +5275,7 @@ define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-TSO: # %bb.0:
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: amoxor.w a1, a1, (a2)
; RV32IA-TSO-NEXT: srl a0, a1, a0
@@ -5295,7 +5295,7 @@ define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: amoxor.w.aqrl a1, a1, (a2)
; RV64IA-WMO-NOZACAS-NEXT: srlw a0, a1, a0
@@ -5305,7 +5305,7 @@ define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS: # %bb.0:
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: amoxor.w a1, a1, (a2)
; RV64IA-TSO-NOZACAS-NEXT: srlw a0, a1, a0
@@ -5315,7 +5315,7 @@ define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS: # %bb.0:
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: amoxor.w.aqrl a1, a1, (a2)
; RV64IA-WMO-ZACAS-NEXT: srlw a0, a1, a0
@@ -5325,7 +5325,7 @@ define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS: # %bb.0:
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: amoxor.w a1, a1, (a2)
; RV64IA-TSO-ZACAS-NEXT: srlw a0, a1, a0
@@ -7727,7 +7727,7 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
; RV32I-NEXT: mv s1, a1
-; RV32I-NEXT: andi s2, a1, 255
+; RV32I-NEXT: zext.b s2, a1
; RV32I-NEXT: j .LBB55_2
; RV32I-NEXT: .LBB55_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB55_2 Depth=1
@@ -7741,7 +7741,7 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: bnez a0, .LBB55_4
; RV32I-NEXT: .LBB55_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: andi a0, a3, 255
+; RV32I-NEXT: zext.b a0, a3
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bltu s2, a0, .LBB55_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
@@ -7762,7 +7762,7 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB55_1: # =>This Inner Loop Header: Depth=1
@@ -7791,7 +7791,7 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: mv s1, a1
-; RV64I-NEXT: andi s2, a1, 255
+; RV64I-NEXT: zext.b s2, a1
; RV64I-NEXT: j .LBB55_2
; RV64I-NEXT: .LBB55_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB55_2 Depth=1
@@ -7805,7 +7805,7 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: bnez a0, .LBB55_4
; RV64I-NEXT: .LBB55_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: andi a0, a3, 255
+; RV64I-NEXT: zext.b a0, a3
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bltu s2, a0, .LBB55_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
@@ -7826,7 +7826,7 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-NOZACAS-NEXT: li a3, 255
-; RV64IA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-NOZACAS-NEXT: .LBB55_1: # =>This Inner Loop Header: Depth=1
@@ -7850,7 +7850,7 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-ZACAS-NEXT: li a3, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
; RV64IA-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-ZACAS-NEXT: .LBB55_1: # =>This Inner Loop Header: Depth=1
@@ -7893,7 +7893,7 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
; RV32I-NEXT: mv s1, a1
-; RV32I-NEXT: andi s2, a1, 255
+; RV32I-NEXT: zext.b s2, a1
; RV32I-NEXT: j .LBB56_2
; RV32I-NEXT: .LBB56_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB56_2 Depth=1
@@ -7907,7 +7907,7 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: bnez a0, .LBB56_4
; RV32I-NEXT: .LBB56_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: andi a0, a3, 255
+; RV32I-NEXT: zext.b a0, a3
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bltu s2, a0, .LBB56_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
@@ -7928,7 +7928,7 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: .LBB56_1: # =>This Inner Loop Header: Depth=1
@@ -7952,7 +7952,7 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: .LBB56_1: # =>This Inner Loop Header: Depth=1
@@ -7981,7 +7981,7 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: mv s1, a1
-; RV64I-NEXT: andi s2, a1, 255
+; RV64I-NEXT: zext.b s2, a1
; RV64I-NEXT: j .LBB56_2
; RV64I-NEXT: .LBB56_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB56_2 Depth=1
@@ -7995,7 +7995,7 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: bnez a0, .LBB56_4
; RV64I-NEXT: .LBB56_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: andi a0, a3, 255
+; RV64I-NEXT: zext.b a0, a3
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bltu s2, a0, .LBB56_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
@@ -8016,7 +8016,7 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: .LBB56_1: # =>This Inner Loop Header: Depth=1
@@ -8040,7 +8040,7 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: .LBB56_1: # =>This Inner Loop Header: Depth=1
@@ -8064,7 +8064,7 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: .LBB56_1: # =>This Inner Loop Header: Depth=1
@@ -8088,7 +8088,7 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: .LBB56_1: # =>This Inner Loop Header: Depth=1
@@ -8131,7 +8131,7 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
; RV32I-NEXT: mv s1, a1
-; RV32I-NEXT: andi s2, a1, 255
+; RV32I-NEXT: zext.b s2, a1
; RV32I-NEXT: j .LBB57_2
; RV32I-NEXT: .LBB57_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB57_2 Depth=1
@@ -8145,7 +8145,7 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: bnez a0, .LBB57_4
; RV32I-NEXT: .LBB57_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: andi a0, a3, 255
+; RV32I-NEXT: zext.b a0, a3
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bltu s2, a0, .LBB57_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
@@ -8166,7 +8166,7 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: .LBB57_1: # =>This Inner Loop Header: Depth=1
@@ -8190,7 +8190,7 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: .LBB57_1: # =>This Inner Loop Header: Depth=1
@@ -8219,7 +8219,7 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: mv s1, a1
-; RV64I-NEXT: andi s2, a1, 255
+; RV64I-NEXT: zext.b s2, a1
; RV64I-NEXT: j .LBB57_2
; RV64I-NEXT: .LBB57_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB57_2 Depth=1
@@ -8233,7 +8233,7 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: bnez a0, .LBB57_4
; RV64I-NEXT: .LBB57_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: andi a0, a3, 255
+; RV64I-NEXT: zext.b a0, a3
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bltu s2, a0, .LBB57_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
@@ -8254,7 +8254,7 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: .LBB57_1: # =>This Inner Loop Header: Depth=1
@@ -8278,7 +8278,7 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: .LBB57_1: # =>This Inner Loop Header: Depth=1
@@ -8302,7 +8302,7 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: .LBB57_1: # =>This Inner Loop Header: Depth=1
@@ -8326,7 +8326,7 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: .LBB57_1: # =>This Inner Loop Header: Depth=1
@@ -8369,7 +8369,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
; RV32I-NEXT: mv s1, a1
-; RV32I-NEXT: andi s2, a1, 255
+; RV32I-NEXT: zext.b s2, a1
; RV32I-NEXT: j .LBB58_2
; RV32I-NEXT: .LBB58_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB58_2 Depth=1
@@ -8383,7 +8383,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: bnez a0, .LBB58_4
; RV32I-NEXT: .LBB58_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: andi a0, a3, 255
+; RV32I-NEXT: zext.b a0, a3
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bltu s2, a0, .LBB58_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
@@ -8404,7 +8404,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: .LBB58_1: # =>This Inner Loop Header: Depth=1
@@ -8428,7 +8428,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: .LBB58_1: # =>This Inner Loop Header: Depth=1
@@ -8457,7 +8457,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: mv s1, a1
-; RV64I-NEXT: andi s2, a1, 255
+; RV64I-NEXT: zext.b s2, a1
; RV64I-NEXT: j .LBB58_2
; RV64I-NEXT: .LBB58_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB58_2 Depth=1
@@ -8471,7 +8471,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: bnez a0, .LBB58_4
; RV64I-NEXT: .LBB58_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: andi a0, a3, 255
+; RV64I-NEXT: zext.b a0, a3
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bltu s2, a0, .LBB58_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
@@ -8492,7 +8492,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: .LBB58_1: # =>This Inner Loop Header: Depth=1
@@ -8516,7 +8516,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: .LBB58_1: # =>This Inner Loop Header: Depth=1
@@ -8540,7 +8540,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: .LBB58_1: # =>This Inner Loop Header: Depth=1
@@ -8564,7 +8564,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: .LBB58_1: # =>This Inner Loop Header: Depth=1
@@ -8607,7 +8607,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
; RV32I-NEXT: mv s1, a1
-; RV32I-NEXT: andi s2, a1, 255
+; RV32I-NEXT: zext.b s2, a1
; RV32I-NEXT: j .LBB59_2
; RV32I-NEXT: .LBB59_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB59_2 Depth=1
@@ -8621,7 +8621,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: bnez a0, .LBB59_4
; RV32I-NEXT: .LBB59_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: andi a0, a3, 255
+; RV32I-NEXT: zext.b a0, a3
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bltu s2, a0, .LBB59_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
@@ -8642,7 +8642,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB59_1: # =>This Inner Loop Header: Depth=1
@@ -8671,7 +8671,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: mv s1, a1
-; RV64I-NEXT: andi s2, a1, 255
+; RV64I-NEXT: zext.b s2, a1
; RV64I-NEXT: j .LBB59_2
; RV64I-NEXT: .LBB59_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB59_2 Depth=1
@@ -8685,7 +8685,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: bnez a0, .LBB59_4
; RV64I-NEXT: .LBB59_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: andi a0, a3, 255
+; RV64I-NEXT: zext.b a0, a3
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bltu s2, a0, .LBB59_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
@@ -8706,7 +8706,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-NOZACAS-NEXT: li a3, 255
-; RV64IA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-NOZACAS-NEXT: .LBB59_1: # =>This Inner Loop Header: Depth=1
@@ -8730,7 +8730,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-ZACAS-NEXT: li a3, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
; RV64IA-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-ZACAS-NEXT: .LBB59_1: # =>This Inner Loop Header: Depth=1
@@ -8773,7 +8773,7 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
; RV32I-NEXT: mv s1, a1
-; RV32I-NEXT: andi s2, a1, 255
+; RV32I-NEXT: zext.b s2, a1
; RV32I-NEXT: j .LBB60_2
; RV32I-NEXT: .LBB60_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB60_2 Depth=1
@@ -8787,7 +8787,7 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: bnez a0, .LBB60_4
; RV32I-NEXT: .LBB60_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: andi a0, a3, 255
+; RV32I-NEXT: zext.b a0, a3
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bgeu s2, a0, .LBB60_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
@@ -8808,7 +8808,7 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB60_1: # =>This Inner Loop Header: Depth=1
@@ -8837,7 +8837,7 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: mv s1, a1
-; RV64I-NEXT: andi s2, a1, 255
+; RV64I-NEXT: zext.b s2, a1
; RV64I-NEXT: j .LBB60_2
; RV64I-NEXT: .LBB60_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB60_2 Depth=1
@@ -8851,7 +8851,7 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: bnez a0, .LBB60_4
; RV64I-NEXT: .LBB60_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: andi a0, a3, 255
+; RV64I-NEXT: zext.b a0, a3
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bgeu s2, a0, .LBB60_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
@@ -8872,7 +8872,7 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-NOZACAS-NEXT: li a3, 255
-; RV64IA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-NOZACAS-NEXT: .LBB60_1: # =>This Inner Loop Header: Depth=1
@@ -8896,7 +8896,7 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-ZACAS-NEXT: li a3, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
; RV64IA-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-ZACAS-NEXT: .LBB60_1: # =>This Inner Loop Header: Depth=1
@@ -8939,7 +8939,7 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
; RV32I-NEXT: mv s1, a1
-; RV32I-NEXT: andi s2, a1, 255
+; RV32I-NEXT: zext.b s2, a1
; RV32I-NEXT: j .LBB61_2
; RV32I-NEXT: .LBB61_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB61_2 Depth=1
@@ -8953,7 +8953,7 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: bnez a0, .LBB61_4
; RV32I-NEXT: .LBB61_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: andi a0, a3, 255
+; RV32I-NEXT: zext.b a0, a3
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bgeu s2, a0, .LBB61_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
@@ -8974,7 +8974,7 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: .LBB61_1: # =>This Inner Loop Header: Depth=1
@@ -8998,7 +8998,7 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: .LBB61_1: # =>This Inner Loop Header: Depth=1
@@ -9027,7 +9027,7 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: mv s1, a1
-; RV64I-NEXT: andi s2, a1, 255
+; RV64I-NEXT: zext.b s2, a1
; RV64I-NEXT: j .LBB61_2
; RV64I-NEXT: .LBB61_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB61_2 Depth=1
@@ -9041,7 +9041,7 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: bnez a0, .LBB61_4
; RV64I-NEXT: .LBB61_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: andi a0, a3, 255
+; RV64I-NEXT: zext.b a0, a3
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bgeu s2, a0, .LBB61_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
@@ -9062,7 +9062,7 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: .LBB61_1: # =>This Inner Loop Header: Depth=1
@@ -9086,7 +9086,7 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: .LBB61_1: # =>This Inner Loop Header: Depth=1
@@ -9110,7 +9110,7 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: .LBB61_1: # =>This Inner Loop Header: Depth=1
@@ -9134,7 +9134,7 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: .LBB61_1: # =>This Inner Loop Header: Depth=1
@@ -9177,7 +9177,7 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
; RV32I-NEXT: mv s1, a1
-; RV32I-NEXT: andi s2, a1, 255
+; RV32I-NEXT: zext.b s2, a1
; RV32I-NEXT: j .LBB62_2
; RV32I-NEXT: .LBB62_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB62_2 Depth=1
@@ -9191,7 +9191,7 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: bnez a0, .LBB62_4
; RV32I-NEXT: .LBB62_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: andi a0, a3, 255
+; RV32I-NEXT: zext.b a0, a3
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bgeu s2, a0, .LBB62_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
@@ -9212,7 +9212,7 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: .LBB62_1: # =>This Inner Loop Header: Depth=1
@@ -9236,7 +9236,7 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: .LBB62_1: # =>This Inner Loop Header: Depth=1
@@ -9265,7 +9265,7 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: mv s1, a1
-; RV64I-NEXT: andi s2, a1, 255
+; RV64I-NEXT: zext.b s2, a1
; RV64I-NEXT: j .LBB62_2
; RV64I-NEXT: .LBB62_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB62_2 Depth=1
@@ -9279,7 +9279,7 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: bnez a0, .LBB62_4
; RV64I-NEXT: .LBB62_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: andi a0, a3, 255
+; RV64I-NEXT: zext.b a0, a3
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bgeu s2, a0, .LBB62_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
@@ -9300,7 +9300,7 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: .LBB62_1: # =>This Inner Loop Header: Depth=1
@@ -9324,7 +9324,7 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: .LBB62_1: # =>This Inner Loop Header: Depth=1
@@ -9348,7 +9348,7 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: .LBB62_1: # =>This Inner Loop Header: Depth=1
@@ -9372,7 +9372,7 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: .LBB62_1: # =>This Inner Loop Header: Depth=1
@@ -9415,7 +9415,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
; RV32I-NEXT: mv s1, a1
-; RV32I-NEXT: andi s2, a1, 255
+; RV32I-NEXT: zext.b s2, a1
; RV32I-NEXT: j .LBB63_2
; RV32I-NEXT: .LBB63_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB63_2 Depth=1
@@ -9429,7 +9429,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: bnez a0, .LBB63_4
; RV32I-NEXT: .LBB63_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: andi a0, a3, 255
+; RV32I-NEXT: zext.b a0, a3
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bgeu s2, a0, .LBB63_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
@@ -9450,7 +9450,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32IA-WMO-NEXT: andi a2, a0, -4
; RV32IA-WMO-NEXT: slli a0, a0, 3
; RV32IA-WMO-NEXT: li a3, 255
-; RV32IA-WMO-NEXT: andi a1, a1, 255
+; RV32IA-WMO-NEXT: zext.b a1, a1
; RV32IA-WMO-NEXT: sll a3, a3, a0
; RV32IA-WMO-NEXT: sll a1, a1, a0
; RV32IA-WMO-NEXT: .LBB63_1: # =>This Inner Loop Header: Depth=1
@@ -9474,7 +9474,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32IA-TSO-NEXT: andi a2, a0, -4
; RV32IA-TSO-NEXT: slli a0, a0, 3
; RV32IA-TSO-NEXT: li a3, 255
-; RV32IA-TSO-NEXT: andi a1, a1, 255
+; RV32IA-TSO-NEXT: zext.b a1, a1
; RV32IA-TSO-NEXT: sll a3, a3, a0
; RV32IA-TSO-NEXT: sll a1, a1, a0
; RV32IA-TSO-NEXT: .LBB63_1: # =>This Inner Loop Header: Depth=1
@@ -9503,7 +9503,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: mv s1, a1
-; RV64I-NEXT: andi s2, a1, 255
+; RV64I-NEXT: zext.b s2, a1
; RV64I-NEXT: j .LBB63_2
; RV64I-NEXT: .LBB63_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB63_2 Depth=1
@@ -9517,7 +9517,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: bnez a0, .LBB63_4
; RV64I-NEXT: .LBB63_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: andi a0, a3, 255
+; RV64I-NEXT: zext.b a0, a3
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bgeu s2, a0, .LBB63_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
@@ -9538,7 +9538,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-NOZACAS-NEXT: li a3, 255
-; RV64IA-WMO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-NOZACAS-NEXT: .LBB63_1: # =>This Inner Loop Header: Depth=1
@@ -9562,7 +9562,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-NOZACAS-NEXT: li a3, 255
-; RV64IA-TSO-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-NOZACAS-NEXT: .LBB63_1: # =>This Inner Loop Header: Depth=1
@@ -9586,7 +9586,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-WMO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-WMO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-WMO-ZACAS-NEXT: li a3, 255
-; RV64IA-WMO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-WMO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-WMO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-WMO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-WMO-ZACAS-NEXT: .LBB63_1: # =>This Inner Loop Header: Depth=1
@@ -9610,7 +9610,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64IA-TSO-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-TSO-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-TSO-ZACAS-NEXT: li a3, 255
-; RV64IA-TSO-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-TSO-ZACAS-NEXT: zext.b a1, a1
; RV64IA-TSO-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-TSO-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-TSO-ZACAS-NEXT: .LBB63_1: # =>This Inner Loop Header: Depth=1
@@ -9653,7 +9653,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
; RV32I-NEXT: mv s1, a1
-; RV32I-NEXT: andi s2, a1, 255
+; RV32I-NEXT: zext.b s2, a1
; RV32I-NEXT: j .LBB64_2
; RV32I-NEXT: .LBB64_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB64_2 Depth=1
@@ -9667,7 +9667,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: bnez a0, .LBB64_4
; RV32I-NEXT: .LBB64_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: andi a0, a3, 255
+; RV32I-NEXT: zext.b a0, a3
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bgeu s2, a0, .LBB64_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
@@ -9688,7 +9688,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB64_1: # =>This Inner Loop Header: Depth=1
@@ -9717,7 +9717,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: mv s1, a1
-; RV64I-NEXT: andi s2, a1, 255
+; RV64I-NEXT: zext.b s2, a1
; RV64I-NEXT: j .LBB64_2
; RV64I-NEXT: .LBB64_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB64_2 Depth=1
@@ -9731,7 +9731,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: bnez a0, .LBB64_4
; RV64I-NEXT: .LBB64_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: andi a0, a3, 255
+; RV64I-NEXT: zext.b a0, a3
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bgeu s2, a0, .LBB64_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
@@ -9752,7 +9752,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
; RV64IA-NOZACAS-NEXT: slli a0, a0, 3
; RV64IA-NOZACAS-NEXT: li a3, 255
-; RV64IA-NOZACAS-NEXT: andi a1, a1, 255
+; RV64IA-NOZACAS-NEXT: zext.b a1, a1
; RV64IA-NOZACAS-NEXT: sllw a3, a3, a0
; RV64IA-NOZACAS-NEXT: sllw a1, a1, a0
; RV64IA-NOZACAS-NEXT: .LBB64_1: # =>This Inner Loop Header: Depth=1
@@ -9776,7 +9776,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-ZACAS-NEXT: andi a2, a0, -4
; RV64IA-ZACAS-NEXT: slli a0, a0, 3
; RV64IA-ZACAS-NEXT: li a3, 255
-; RV64IA-ZACAS-NEXT: andi a1, a1, 255
+; RV64IA-ZACAS-NEXT: zext.b a1, a1
; RV64IA-ZACAS-NEXT: sllw a3, a3, a0
; RV64IA-ZACAS-NEXT: sllw a1, a1, a0
; RV64IA-ZACAS-NEXT: .LBB64_1: # =>This Inner Loop Header: Depth=1
diff --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll
index aea7473ceece4..b9702e9fe0fc2 100644
--- a/llvm/test/CodeGen/RISCV/atomic-signext.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll
@@ -142,7 +142,7 @@ define signext i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
@@ -176,7 +176,7 @@ define signext i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: li a3, 255
-; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: zext.b a1, a1
; RV64IA-NEXT: sllw a3, a3, a0
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
@@ -214,7 +214,7 @@ define signext i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1
@@ -248,7 +248,7 @@ define signext i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: li a3, 255
-; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: zext.b a1, a1
; RV64IA-NEXT: sllw a3, a3, a0
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1
@@ -286,7 +286,7 @@ define signext i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1
@@ -320,7 +320,7 @@ define signext i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: li a3, 255
-; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: zext.b a1, a1
; RV64IA-NEXT: sllw a3, a3, a0
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1
@@ -358,7 +358,7 @@ define signext i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: not a3, a3
; RV32IA-NEXT: sll a1, a1, a0
@@ -386,7 +386,7 @@ define signext i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: li a3, 255
-; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: zext.b a1, a1
; RV64IA-NEXT: sllw a3, a3, a0
; RV64IA-NEXT: not a3, a3
; RV64IA-NEXT: sllw a1, a1, a0
@@ -418,7 +418,7 @@ define signext i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1
@@ -453,7 +453,7 @@ define signext i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: li a3, 255
-; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: zext.b a1, a1
; RV64IA-NEXT: sllw a3, a3, a0
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1
@@ -491,7 +491,7 @@ define signext i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: amoor.w a1, a1, (a2)
; RV32IA-NEXT: srl a0, a1, a0
@@ -515,7 +515,7 @@ define signext i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
-; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: zext.b a1, a1
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: amoor.w a1, a1, (a2)
; RV64IA-NEXT: srlw a0, a1, a0
@@ -543,7 +543,7 @@ define signext i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA: # %bb.0:
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: amoxor.w a1, a1, (a2)
; RV32IA-NEXT: srl a0, a1, a0
@@ -567,7 +567,7 @@ define signext i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
-; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: zext.b a1, a1
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: amoxor.w a1, a1, (a2)
; RV64IA-NEXT: srlw a0, a1, a0
@@ -893,7 +893,7 @@ define signext i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
; RV32I-NEXT: mv s1, a1
-; RV32I-NEXT: andi s2, a1, 255
+; RV32I-NEXT: zext.b s2, a1
; RV32I-NEXT: j .LBB12_2
; RV32I-NEXT: .LBB12_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB12_2 Depth=1
@@ -907,7 +907,7 @@ define signext i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: bnez a0, .LBB12_4
; RV32I-NEXT: .LBB12_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: andi a0, a3, 255
+; RV32I-NEXT: zext.b a0, a3
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bltu s2, a0, .LBB12_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
@@ -929,7 +929,7 @@ define signext i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1
@@ -960,7 +960,7 @@ define signext i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: mv s1, a1
-; RV64I-NEXT: andi s2, a1, 255
+; RV64I-NEXT: zext.b s2, a1
; RV64I-NEXT: j .LBB12_2
; RV64I-NEXT: .LBB12_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB12_2 Depth=1
@@ -974,7 +974,7 @@ define signext i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: bnez a0, .LBB12_4
; RV64I-NEXT: .LBB12_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: andi a0, a3, 255
+; RV64I-NEXT: zext.b a0, a3
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bltu s2, a0, .LBB12_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
@@ -996,7 +996,7 @@ define signext i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: li a3, 255
-; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: zext.b a1, a1
; RV64IA-NEXT: sllw a3, a3, a0
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1
@@ -1031,7 +1031,7 @@ define signext i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
; RV32I-NEXT: mv s1, a1
-; RV32I-NEXT: andi s2, a1, 255
+; RV32I-NEXT: zext.b s2, a1
; RV32I-NEXT: j .LBB13_2
; RV32I-NEXT: .LBB13_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB13_2 Depth=1
@@ -1045,7 +1045,7 @@ define signext i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: bnez a0, .LBB13_4
; RV32I-NEXT: .LBB13_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: andi a0, a3, 255
+; RV32I-NEXT: zext.b a0, a3
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bgeu s2, a0, .LBB13_1
; RV32I-NEXT: # %bb.3: # %atomicrmw.start
@@ -1067,7 +1067,7 @@ define signext i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: andi a2, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a3, 255
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: sll a3, a3, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1
@@ -1098,7 +1098,7 @@ define signext i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: mv s1, a1
-; RV64I-NEXT: andi s2, a1, 255
+; RV64I-NEXT: zext.b s2, a1
; RV64I-NEXT: j .LBB13_2
; RV64I-NEXT: .LBB13_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB13_2 Depth=1
@@ -1112,7 +1112,7 @@ define signext i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: bnez a0, .LBB13_4
; RV64I-NEXT: .LBB13_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: andi a0, a3, 255
+; RV64I-NEXT: zext.b a0, a3
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bgeu s2, a0, .LBB13_1
; RV64I-NEXT: # %bb.3: # %atomicrmw.start
@@ -1134,7 +1134,7 @@ define signext i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: andi a2, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: li a3, 255
-; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: zext.b a1, a1
; RV64IA-NEXT: sllw a3, a3, a0
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1
@@ -3806,8 +3806,8 @@ define signext i8 @cmpxchg_i8_monotonic_monotonic_val0(ptr %ptr, i8 signext %cmp
; RV32IA-NEXT: andi a3, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a4, 255
-; RV32IA-NEXT: andi a1, a1, 255
-; RV32IA-NEXT: andi a2, a2, 255
+; RV32IA-NEXT: zext.b a1, a1
+; RV32IA-NEXT: zext.b a2, a2
; RV32IA-NEXT: sll a4, a4, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: sll a2, a2, a0
@@ -3846,8 +3846,8 @@ define signext i8 @cmpxchg_i8_monotonic_monotonic_val0(ptr %ptr, i8 signext %cmp
; RV64IA-NEXT: andi a3, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: li a4, 255
-; RV64IA-NEXT: andi a1, a1, 255
-; RV64IA-NEXT: andi a2, a2, 255
+; RV64IA-NEXT: zext.b a1, a1
+; RV64IA-NEXT: zext.b a2, a2
; RV64IA-NEXT: sllw a4, a4, a0
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: sllw a2, a2, a0
@@ -3890,8 +3890,8 @@ define i1 @cmpxchg_i8_monotonic_monotonic_val1(ptr %ptr, i8 signext %cmp, i8 sig
; RV32IA-NEXT: andi a3, a0, -4
; RV32IA-NEXT: slli a0, a0, 3
; RV32IA-NEXT: li a4, 255
-; RV32IA-NEXT: andi a1, a1, 255
-; RV32IA-NEXT: andi a2, a2, 255
+; RV32IA-NEXT: zext.b a1, a1
+; RV32IA-NEXT: zext.b a2, a2
; RV32IA-NEXT: sll a4, a4, a0
; RV32IA-NEXT: sll a1, a1, a0
; RV32IA-NEXT: sll a0, a2, a0
@@ -3929,8 +3929,8 @@ define i1 @cmpxchg_i8_monotonic_monotonic_val1(ptr %ptr, i8 signext %cmp, i8 sig
; RV64IA-NEXT: andi a3, a0, -4
; RV64IA-NEXT: slli a0, a0, 3
; RV64IA-NEXT: li a4, 255
-; RV64IA-NEXT: andi a1, a1, 255
-; RV64IA-NEXT: andi a2, a2, 255
+; RV64IA-NEXT: zext.b a1, a1
+; RV64IA-NEXT: zext.b a2, a2
; RV64IA-NEXT: sllw a4, a4, a0
; RV64IA-NEXT: sllw a1, a1, a0
; RV64IA-NEXT: sllw a0, a2, a0
diff --git a/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll
index 34b29ea1dc6c2..2db6f80f4fd61 100644
--- a/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll
+++ b/llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll
@@ -29,10 +29,10 @@ define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
; RV32I-NEXT: mv s1, a1
-; RV32I-NEXT: andi s2, a1, 255
+; RV32I-NEXT: zext.b s2, a1
; RV32I-NEXT: .LBB0_1: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: andi a0, a3, 255
+; RV32I-NEXT: zext.b a0, a3
; RV32I-NEXT: sltu a0, a0, s2
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s1
@@ -68,18 +68,18 @@ define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
; RV32IA-NEXT: lw a5, 0(a2)
; RV32IA-NEXT: sll a3, a4, a3
; RV32IA-NEXT: not a3, a3
-; RV32IA-NEXT: andi a4, a1, 255
+; RV32IA-NEXT: zext.b a4, a1
; RV32IA-NEXT: .LBB0_1: # %atomicrmw.start
; RV32IA-NEXT: # =>This Loop Header: Depth=1
; RV32IA-NEXT: # Child Loop BB0_3 Depth 2
; RV32IA-NEXT: mv a6, a5
; RV32IA-NEXT: srl a5, a5, a0
-; RV32IA-NEXT: andi a7, a5, 255
+; RV32IA-NEXT: zext.b a7, a5
; RV32IA-NEXT: sltu a7, a7, a4
; RV32IA-NEXT: addi a7, a7, -1
; RV32IA-NEXT: and a7, a7, a1
; RV32IA-NEXT: sub a5, a5, a7
-; RV32IA-NEXT: andi a5, a5, 255
+; RV32IA-NEXT: zext.b a5, a5
; RV32IA-NEXT: sll a5, a5, a0
; RV32IA-NEXT: and a7, a6, a3
; RV32IA-NEXT: or a7, a7, a5
@@ -112,10 +112,10 @@ define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: mv s1, a1
-; RV64I-NEXT: andi s2, a1, 255
+; RV64I-NEXT: zext.b s2, a1
; RV64I-NEXT: .LBB0_1: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: andi a0, a3, 255
+; RV64I-NEXT: zext.b a0, a3
; RV64I-NEXT: sltu a0, a0, s2
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -151,18 +151,18 @@ define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
; RV64IA-NEXT: lw a3, 0(a2)
; RV64IA-NEXT: sllw a4, a5, a4
; RV64IA-NEXT: not a4, a4
-; RV64IA-NEXT: andi a5, a1, 255
+; RV64IA-NEXT: zext.b a5, a1
; RV64IA-NEXT: .LBB0_1: # %atomicrmw.start
; RV64IA-NEXT: # =>This Loop Header: Depth=1
; RV64IA-NEXT: # Child Loop BB0_3 Depth 2
; RV64IA-NEXT: srlw a6, a3, a0
; RV64IA-NEXT: sext.w a7, a3
-; RV64IA-NEXT: andi t0, a6, 255
+; RV64IA-NEXT: zext.b t0, a6
; RV64IA-NEXT: sltu t0, t0, a5
; RV64IA-NEXT: addi t0, t0, -1
; RV64IA-NEXT: and t0, t0, a1
; RV64IA-NEXT: subw a6, a6, t0
-; RV64IA-NEXT: andi a6, a6, 255
+; RV64IA-NEXT: zext.b a6, a6
; RV64IA-NEXT: sllw a6, a6, a0
; RV64IA-NEXT: and a3, a3, a4
; RV64IA-NEXT: or a6, a3, a6
@@ -710,10 +710,10 @@ define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
; RV32I-NEXT: .cfi_offset s1, -12
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
-; RV32I-NEXT: andi s1, a1, 255
+; RV32I-NEXT: zext.b s1, a1
; RV32I-NEXT: .LBB4_1: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: andi a0, a3, 255
+; RV32I-NEXT: zext.b a0, a3
; RV32I-NEXT: sub a1, a0, s1
; RV32I-NEXT: sltu a0, a0, a1
; RV32I-NEXT: addi a0, a0, -1
@@ -747,13 +747,13 @@ define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
; RV32IA-NEXT: lw a4, 0(a2)
; RV32IA-NEXT: andi a0, a0, 24
; RV32IA-NEXT: not a3, a3
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: .LBB4_1: # %atomicrmw.start
; RV32IA-NEXT: # =>This Loop Header: Depth=1
; RV32IA-NEXT: # Child Loop BB4_3 Depth 2
; RV32IA-NEXT: mv a5, a4
; RV32IA-NEXT: srl a4, a4, a0
-; RV32IA-NEXT: andi a4, a4, 255
+; RV32IA-NEXT: zext.b a4, a4
; RV32IA-NEXT: sub a6, a4, a1
; RV32IA-NEXT: sltu a4, a4, a6
; RV32IA-NEXT: addi a4, a4, -1
@@ -787,10 +787,10 @@ define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
; RV64I-NEXT: .cfi_offset s1, -24
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
-; RV64I-NEXT: andi s1, a1, 255
+; RV64I-NEXT: zext.b s1, a1
; RV64I-NEXT: .LBB4_1: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: andi a0, a3, 255
+; RV64I-NEXT: zext.b a0, a3
; RV64I-NEXT: sub a1, a0, s1
; RV64I-NEXT: sltu a0, a0, a1
; RV64I-NEXT: addi a0, a0, -1
@@ -824,13 +824,13 @@ define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
; RV64IA-NEXT: lw a3, 0(a2)
; RV64IA-NEXT: andi a0, a0, 24
; RV64IA-NEXT: not a4, a4
-; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: zext.b a1, a1
; RV64IA-NEXT: .LBB4_1: # %atomicrmw.start
; RV64IA-NEXT: # =>This Loop Header: Depth=1
; RV64IA-NEXT: # Child Loop BB4_3 Depth 2
; RV64IA-NEXT: srlw a5, a3, a0
; RV64IA-NEXT: sext.w a6, a3
-; RV64IA-NEXT: andi a5, a5, 255
+; RV64IA-NEXT: zext.b a5, a5
; RV64IA-NEXT: sub a7, a5, a1
; RV64IA-NEXT: sltu a5, a5, a7
; RV64IA-NEXT: addi a5, a5, -1
diff --git a/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll b/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
index 3ff01e4987bd5..ae1db4f1d62da 100644
--- a/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
+++ b/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
@@ -26,11 +26,11 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
; RV32I-NEXT: .cfi_offset s1, -12
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
-; RV32I-NEXT: andi s1, a1, 255
+; RV32I-NEXT: zext.b s1, a1
; RV32I-NEXT: .LBB0_1: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: addi a0, a3, 1
-; RV32I-NEXT: andi a1, a3, 255
+; RV32I-NEXT: zext.b a1, a3
; RV32I-NEXT: sltu a1, a1, s1
; RV32I-NEXT: neg a2, a1
; RV32I-NEXT: and a2, a2, a0
@@ -63,18 +63,18 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
; RV32IA-NEXT: lw a4, 0(a2)
; RV32IA-NEXT: andi a0, a0, 24
; RV32IA-NEXT: not a3, a3
-; RV32IA-NEXT: andi a1, a1, 255
+; RV32IA-NEXT: zext.b a1, a1
; RV32IA-NEXT: .LBB0_1: # %atomicrmw.start
; RV32IA-NEXT: # =>This Loop Header: Depth=1
; RV32IA-NEXT: # Child Loop BB0_3 Depth 2
; RV32IA-NEXT: mv a5, a4
; RV32IA-NEXT: srl a4, a4, a0
-; RV32IA-NEXT: andi a6, a4, 255
+; RV32IA-NEXT: zext.b a6, a4
; RV32IA-NEXT: addi a4, a4, 1
; RV32IA-NEXT: sltu a6, a6, a1
; RV32IA-NEXT: neg a6, a6
; RV32IA-NEXT: and a4, a6, a4
-; RV32IA-NEXT: andi a4, a4, 255
+; RV32IA-NEXT: zext.b a4, a4
; RV32IA-NEXT: sll a4, a4, a0
; RV32IA-NEXT: and a6, a5, a3
; RV32IA-NEXT: or a6, a6, a4
@@ -104,11 +104,11 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
; RV64I-NEXT: .cfi_offset s1, -24
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
-; RV64I-NEXT: andi s1, a1, 255
+; RV64I-NEXT: zext.b s1, a1
; RV64I-NEXT: .LBB0_1: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: addi a0, a3, 1
-; RV64I-NEXT: andi a1, a3, 255
+; RV64I-NEXT: zext.b a1, a3
; RV64I-NEXT: sltu a1, a1, s1
; RV64I-NEXT: neg a2, a1
; RV64I-NEXT: and a2, a2, a0
@@ -141,18 +141,18 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
; RV64IA-NEXT: lw a3, 0(a2)
; RV64IA-NEXT: andi a0, a0, 24
; RV64IA-NEXT: not a4, a4
-; RV64IA-NEXT: andi a1, a1, 255
+; RV64IA-NEXT: zext.b a1, a1
; RV64IA-NEXT: .LBB0_1: # %atomicrmw.start
; RV64IA-NEXT: # =>This Loop Header: Depth=1
; RV64IA-NEXT: # Child Loop BB0_3 Depth 2
; RV64IA-NEXT: srlw a5, a3, a0
; RV64IA-NEXT: sext.w a6, a3
-; RV64IA-NEXT: andi a7, a5, 255
+; RV64IA-NEXT: zext.b a7, a5
; RV64IA-NEXT: addi a5, a5, 1
; RV64IA-NEXT: sltu a7, a7, a1
; RV64IA-NEXT: negw a7, a7
; RV64IA-NEXT: and a5, a7, a5
-; RV64IA-NEXT: andi a5, a5, 255
+; RV64IA-NEXT: zext.b a5, a5
; RV64IA-NEXT: sllw a5, a5, a0
; RV64IA-NEXT: and a3, a3, a4
; RV64IA-NEXT: or a5, a3, a5
@@ -684,7 +684,7 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lbu a3, 0(a0)
; RV32I-NEXT: mv s1, a1
-; RV32I-NEXT: andi s2, a1, 255
+; RV32I-NEXT: zext.b s2, a1
; RV32I-NEXT: j .LBB4_2
; RV32I-NEXT: .LBB4_1: # %atomicrmw.start
; RV32I-NEXT: # in Loop: Header=BB4_2 Depth=1
@@ -698,7 +698,7 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
; RV32I-NEXT: bnez a0, .LBB4_4
; RV32I-NEXT: .LBB4_2: # %atomicrmw.start
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: andi a0, a3, 255
+; RV32I-NEXT: zext.b a0, a3
; RV32I-NEXT: seqz a1, a0
; RV32I-NEXT: sltu a0, s2, a0
; RV32I-NEXT: or a0, a1, a0
@@ -731,11 +731,11 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
; RV32IA-NEXT: lw a6, 0(a2)
; RV32IA-NEXT: sll a3, a4, a3
; RV32IA-NEXT: not a3, a3
-; RV32IA-NEXT: andi a4, a1, 255
+; RV32IA-NEXT: zext.b a4, a1
; RV32IA-NEXT: j .LBB4_2
; RV32IA-NEXT: .LBB4_1: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB4_2 Depth=1
-; RV32IA-NEXT: andi a6, a7, 255
+; RV32IA-NEXT: zext.b a6, a7
; RV32IA-NEXT: sll a6, a6, a0
; RV32IA-NEXT: and a7, a5, a3
; RV32IA-NEXT: or a7, a7, a6
@@ -756,7 +756,7 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
; RV32IA-NEXT: # Child Loop BB4_5 Depth 2
; RV32IA-NEXT: mv a5, a6
; RV32IA-NEXT: srl a6, a6, a0
-; RV32IA-NEXT: andi a7, a6, 255
+; RV32IA-NEXT: zext.b a7, a6
; RV32IA-NEXT: seqz t0, a7
; RV32IA-NEXT: sltu a7, a4, a7
; RV32IA-NEXT: or t0, t0, a7
@@ -785,7 +785,7 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lbu a3, 0(a0)
; RV64I-NEXT: mv s1, a1
-; RV64I-NEXT: andi s2, a1, 255
+; RV64I-NEXT: zext.b s2, a1
; RV64I-NEXT: j .LBB4_2
; RV64I-NEXT: .LBB4_1: # %atomicrmw.start
; RV64I-NEXT: # in Loop: Header=BB4_2 Depth=1
@@ -799,7 +799,7 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
; RV64I-NEXT: bnez a0, .LBB4_4
; RV64I-NEXT: .LBB4_2: # %atomicrmw.start
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: andi a0, a3, 255
+; RV64I-NEXT: zext.b a0, a3
; RV64I-NEXT: seqz a1, a0
; RV64I-NEXT: sltu a0, s2, a0
; RV64I-NEXT: or a0, a1, a0
@@ -832,12 +832,12 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
; RV64IA-NEXT: lw a3, 0(a2)
; RV64IA-NEXT: sllw a4, a5, a4
; RV64IA-NEXT: not a4, a4
-; RV64IA-NEXT: andi a5, a1, 255
+; RV64IA-NEXT: zext.b a5, a1
; RV64IA-NEXT: j .LBB4_2
; RV64IA-NEXT: .LBB4_1: # %atomicrmw.start
; RV64IA-NEXT: # in Loop: Header=BB4_2 Depth=1
; RV64IA-NEXT: sext.w a6, a3
-; RV64IA-NEXT: andi a7, a7, 255
+; RV64IA-NEXT: zext.b a7, a7
; RV64IA-NEXT: sllw a7, a7, a0
; RV64IA-NEXT: and a3, a3, a4
; RV64IA-NEXT: or a7, a3, a7
@@ -857,7 +857,7 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
; RV64IA-NEXT: # =>This Loop Header: Depth=1
; RV64IA-NEXT: # Child Loop BB4_5 Depth 2
; RV64IA-NEXT: srlw a6, a3, a0
-; RV64IA-NEXT: andi a7, a6, 255
+; RV64IA-NEXT: zext.b a7, a6
; RV64IA-NEXT: seqz t0, a7
; RV64IA-NEXT: sltu a7, a5, a7
; RV64IA-NEXT: or t0, t0, a7
diff --git a/llvm/test/CodeGen/RISCV/avgceilu.ll b/llvm/test/CodeGen/RISCV/avgceilu.ll
index 924a50a836dda..735bd19909d5f 100644
--- a/llvm/test/CodeGen/RISCV/avgceilu.ll
+++ b/llvm/test/CodeGen/RISCV/avgceilu.ll
@@ -11,8 +11,8 @@
define i8 @test_fixed_i8(i8 %a0, i8 %a1) nounwind {
; RV32I-LABEL: test_fixed_i8:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a1, a1, 255
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a1, a1
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: addi a0, a0, 1
; RV32I-NEXT: srli a0, a0, 1
@@ -20,8 +20,8 @@ define i8 @test_fixed_i8(i8 %a0, i8 %a1) nounwind {
;
; RV64I-LABEL: test_fixed_i8:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a1, a1
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: addi a0, a0, 1
; RV64I-NEXT: srli a0, a0, 1
@@ -36,8 +36,8 @@ define i8 @test_fixed_i8(i8 %a0, i8 %a1) nounwind {
define i8 @test_ext_i8(i8 %a0, i8 %a1) nounwind {
; RV32I-LABEL: test_ext_i8:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a1, a1, 255
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a1, a1
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: addi a0, a0, 1
; RV32I-NEXT: srli a0, a0, 1
@@ -45,8 +45,8 @@ define i8 @test_ext_i8(i8 %a0, i8 %a1) nounwind {
;
; RV64I-LABEL: test_ext_i8:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a1, a1
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: addi a0, a0, 1
; RV64I-NEXT: srli a0, a0, 1
diff --git a/llvm/test/CodeGen/RISCV/avgflooru.ll b/llvm/test/CodeGen/RISCV/avgflooru.ll
index 550cc3136bbc3..8a69c9393c87a 100644
--- a/llvm/test/CodeGen/RISCV/avgflooru.ll
+++ b/llvm/test/CodeGen/RISCV/avgflooru.ll
@@ -11,16 +11,16 @@
define i8 @test_fixed_i8(i8 %a0, i8 %a1) nounwind {
; RV32I-LABEL: test_fixed_i8:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a1, a1, 255
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a1, a1
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 1
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_fixed_i8:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a1, a1
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 1
; RV64I-NEXT: ret
@@ -34,16 +34,16 @@ define i8 @test_fixed_i8(i8 %a0, i8 %a1) nounwind {
define i8 @test_ext_i8(i8 %a0, i8 %a1) nounwind {
; RV32I-LABEL: test_ext_i8:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a1, a1, 255
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a1, a1
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 1
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_ext_i8:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a1, a1
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 1
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
index 18916dd69eb43..8149179c6412d 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
@@ -84,7 +84,7 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i
; RV32I-FPELIM: # %bb.0:
; RV32I-FPELIM-NEXT: lw t0, 4(sp)
; RV32I-FPELIM-NEXT: lw t1, 0(sp)
-; RV32I-FPELIM-NEXT: andi a0, a0, 255
+; RV32I-FPELIM-NEXT: zext.b a0, a0
; RV32I-FPELIM-NEXT: slli a1, a1, 16
; RV32I-FPELIM-NEXT: xor a3, a3, a7
; RV32I-FPELIM-NEXT: srli a1, a1, 16
@@ -107,7 +107,7 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i
; RV32I-WITHFP-NEXT: addi s0, sp, 16
; RV32I-WITHFP-NEXT: lw t0, 4(s0)
; RV32I-WITHFP-NEXT: lw t1, 0(s0)
-; RV32I-WITHFP-NEXT: andi a0, a0, 255
+; RV32I-WITHFP-NEXT: zext.b a0, a0
; RV32I-WITHFP-NEXT: slli a1, a1, 16
; RV32I-WITHFP-NEXT: xor a3, a3, a7
; RV32I-WITHFP-NEXT: srli a1, a1, 16
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
index e16bed5400300..807fe9e3a581e 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll
@@ -1152,7 +1152,7 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i
; ILP32E-FPELIM-NEXT: lw a7, 0(sp)
; ILP32E-FPELIM-NEXT: lw t0, 4(sp)
; ILP32E-FPELIM-NEXT: lw t1, 8(sp)
-; ILP32E-FPELIM-NEXT: andi a0, a0, 255
+; ILP32E-FPELIM-NEXT: zext.b a0, a0
; ILP32E-FPELIM-NEXT: slli a1, a1, 16
; ILP32E-FPELIM-NEXT: srli a1, a1, 16
; ILP32E-FPELIM-NEXT: add a0, a0, a2
@@ -1181,7 +1181,7 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i
; ILP32E-WITHFP-NEXT: lw a7, 0(s0)
; ILP32E-WITHFP-NEXT: lw t0, 4(s0)
; ILP32E-WITHFP-NEXT: lw t1, 8(s0)
-; ILP32E-WITHFP-NEXT: andi a0, a0, 255
+; ILP32E-WITHFP-NEXT: zext.b a0, a0
; ILP32E-WITHFP-NEXT: slli a1, a1, 16
; ILP32E-WITHFP-NEXT: srli a1, a1, 16
; ILP32E-WITHFP-NEXT: add a0, a0, a2
@@ -1209,7 +1209,7 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: lw a7, 0(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: lw t0, 4(sp)
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: lw t1, 8(sp)
-; ILP32E-FPELIM-SAVE-RESTORE-NEXT: andi a0, a0, 255
+; ILP32E-FPELIM-SAVE-RESTORE-NEXT: zext.b a0, a0
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: slli a1, a1, 16
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: srli a1, a1, 16
; ILP32E-FPELIM-SAVE-RESTORE-NEXT: add a0, a0, a2
@@ -1236,7 +1236,7 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: lw a7, 0(s0)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: lw t0, 4(s0)
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: lw t1, 8(s0)
-; ILP32E-WITHFP-SAVE-RESTORE-NEXT: andi a0, a0, 255
+; ILP32E-WITHFP-SAVE-RESTORE-NEXT: zext.b a0, a0
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: slli a1, a1, 16
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: srli a1, a1, 16
; ILP32E-WITHFP-SAVE-RESTORE-NEXT: add a0, a0, a2
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
index 746b71a08a30b..e0f46e7484518 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
@@ -50,7 +50,7 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i128 %d, i32 %e, i32 %f,
; RV64I: # %bb.0:
; RV64I-NEXT: lw t0, 8(sp)
; RV64I-NEXT: ld t1, 0(sp)
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: slli a1, a1, 48
; RV64I-NEXT: xor a3, a3, a7
; RV64I-NEXT: srli a1, a1, 48
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll b/llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll
index 5bae6b1d7f544..ace3dc94d80f4 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll
@@ -122,7 +122,7 @@ define signext i32 @ret_callresult_uint8_as_anyint32() nounwind {
define zeroext i8 @sint8_arg_to_uint8_ret(i8 signext %a) nounwind {
; RV32I-LABEL: sint8_arg_to_uint8_ret:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: ret
ret i8 %a
}
@@ -132,7 +132,7 @@ define void @pass_sint8_as_uint8(i8 signext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: call receive_uint8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -149,7 +149,7 @@ define zeroext i8 @ret_callresult_sint8_as_uint8() nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: call return_sint8
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -229,7 +229,7 @@ define signext i32 @ret_callresult_sint8_as_anyint32() nounwind {
define zeroext i8 @anyint32_arg_to_uint8_ret(i32 signext %a) nounwind {
; RV32I-LABEL: anyint32_arg_to_uint8_ret:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: ret
%1 = trunc i32 %a to i8
ret i8 %1
@@ -240,7 +240,7 @@ define void @pass_anyint32_as_uint8(i32 signext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: call receive_uint8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -258,7 +258,7 @@ define zeroext i8 @ret_callresult_anyint32_as_uint8() nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: call return_anyint32
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/csr-first-use-cost.ll b/llvm/test/CodeGen/RISCV/csr-first-use-cost.ll
index c6e5bae3c3c24..7a14a6ca30961 100644
--- a/llvm/test/CodeGen/RISCV/csr-first-use-cost.ll
+++ b/llvm/test/CodeGen/RISCV/csr-first-use-cost.ll
@@ -9,7 +9,7 @@ define fastcc void @Perl_sv_setnv(i8 %c, ptr %.str.54.3682) nounwind {
; ZERO-COST-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; ZERO-COST-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; ZERO-COST-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; ZERO-COST-NEXT: andi a0, a0, 255
+; ZERO-COST-NEXT: zext.b a0, a0
; ZERO-COST-NEXT: li a2, 2
; ZERO-COST-NEXT: blt a2, a0, .LBB0_3
; ZERO-COST-NEXT: # %bb.1: # %entry
@@ -49,7 +49,7 @@ define fastcc void @Perl_sv_setnv(i8 %c, ptr %.str.54.3682) nounwind {
; DEFAULT-COST-NEXT: addi sp, sp, -32
; DEFAULT-COST-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; DEFAULT-COST-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; DEFAULT-COST-NEXT: andi a0, a0, 255
+; DEFAULT-COST-NEXT: zext.b a0, a0
; DEFAULT-COST-NEXT: li a2, 2
; DEFAULT-COST-NEXT: blt a2, a0, .LBB0_3
; DEFAULT-COST-NEXT: # %bb.1: # %entry
diff --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
index da97ac0d74237..a46168f114bb9 100644
--- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
@@ -32,7 +32,7 @@ declare i64 @llvm.ctpop.i64(i64)
define i8 @test_cttz_i8(i8 %a) nounwind {
; RV32_NOZBB-LABEL: test_cttz_i8:
; RV32_NOZBB: # %bb.0:
-; RV32_NOZBB-NEXT: andi a1, a0, 255
+; RV32_NOZBB-NEXT: zext.b a1, a0
; RV32_NOZBB-NEXT: beqz a1, .LBB0_2
; RV32_NOZBB-NEXT: # %bb.1: # %cond.false
; RV32_NOZBB-NEXT: addi a1, a0, -1
@@ -55,7 +55,7 @@ define i8 @test_cttz_i8(i8 %a) nounwind {
;
; RV64NOZBB-LABEL: test_cttz_i8:
; RV64NOZBB: # %bb.0:
-; RV64NOZBB-NEXT: andi a1, a0, 255
+; RV64NOZBB-NEXT: zext.b a1, a0
; RV64NOZBB-NEXT: beqz a1, .LBB0_2
; RV64NOZBB-NEXT: # %bb.1: # %cond.false
; RV64NOZBB-NEXT: addi a1, a0, -1
@@ -90,7 +90,7 @@ define i8 @test_cttz_i8(i8 %a) nounwind {
;
; RV32XTHEADBB-LABEL: test_cttz_i8:
; RV32XTHEADBB: # %bb.0:
-; RV32XTHEADBB-NEXT: andi a1, a0, 255
+; RV32XTHEADBB-NEXT: zext.b a1, a0
; RV32XTHEADBB-NEXT: beqz a1, .LBB0_2
; RV32XTHEADBB-NEXT: # %bb.1: # %cond.false
; RV32XTHEADBB-NEXT: addi a1, a0, -1
@@ -106,7 +106,7 @@ define i8 @test_cttz_i8(i8 %a) nounwind {
;
; RV64XTHEADBB-LABEL: test_cttz_i8:
; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: andi a1, a0, 255
+; RV64XTHEADBB-NEXT: zext.b a1, a0
; RV64XTHEADBB-NEXT: beqz a1, .LBB0_2
; RV64XTHEADBB-NEXT: # %bb.1: # %cond.false
; RV64XTHEADBB-NEXT: addi a1, a0, -1
@@ -956,7 +956,7 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
define i8 @test_ctlz_i8(i8 %a) nounwind {
; RV32_NOZBB-LABEL: test_ctlz_i8:
; RV32_NOZBB: # %bb.0:
-; RV32_NOZBB-NEXT: andi a1, a0, 255
+; RV32_NOZBB-NEXT: zext.b a1, a0
; RV32_NOZBB-NEXT: beqz a1, .LBB8_2
; RV32_NOZBB-NEXT: # %bb.1: # %cond.false
; RV32_NOZBB-NEXT: slli a1, a0, 24
@@ -986,7 +986,7 @@ define i8 @test_ctlz_i8(i8 %a) nounwind {
;
; RV64NOZBB-LABEL: test_ctlz_i8:
; RV64NOZBB: # %bb.0:
-; RV64NOZBB-NEXT: andi a1, a0, 255
+; RV64NOZBB-NEXT: zext.b a1, a0
; RV64NOZBB-NEXT: beqz a1, .LBB8_2
; RV64NOZBB-NEXT: # %bb.1: # %cond.false
; RV64NOZBB-NEXT: slli a1, a0, 56
@@ -1016,28 +1016,28 @@ define i8 @test_ctlz_i8(i8 %a) nounwind {
;
; RV32ZBB-LABEL: test_ctlz_i8:
; RV32ZBB: # %bb.0:
-; RV32ZBB-NEXT: andi a0, a0, 255
+; RV32ZBB-NEXT: zext.b a0, a0
; RV32ZBB-NEXT: clz a0, a0
; RV32ZBB-NEXT: addi a0, a0, -24
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: test_ctlz_i8:
; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: andi a0, a0, 255
+; RV64ZBB-NEXT: zext.b a0, a0
; RV64ZBB-NEXT: clz a0, a0
; RV64ZBB-NEXT: addi a0, a0, -56
; RV64ZBB-NEXT: ret
;
; RV32XTHEADBB-LABEL: test_ctlz_i8:
; RV32XTHEADBB: # %bb.0:
-; RV32XTHEADBB-NEXT: andi a0, a0, 255
+; RV32XTHEADBB-NEXT: zext.b a0, a0
; RV32XTHEADBB-NEXT: th.ff1 a0, a0
; RV32XTHEADBB-NEXT: addi a0, a0, -24
; RV32XTHEADBB-NEXT: ret
;
; RV64XTHEADBB-LABEL: test_ctlz_i8:
; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: andi a0, a0, 255
+; RV64XTHEADBB-NEXT: zext.b a0, a0
; RV64XTHEADBB-NEXT: th.ff1 a0, a0
; RV64XTHEADBB-NEXT: addi a0, a0, -56
; RV64XTHEADBB-NEXT: ret
@@ -2256,13 +2256,13 @@ define i8 @test_ctpop_i8(i8 %a) nounwind {
;
; RV32ZBB-LABEL: test_ctpop_i8:
; RV32ZBB: # %bb.0:
-; RV32ZBB-NEXT: andi a0, a0, 255
+; RV32ZBB-NEXT: zext.b a0, a0
; RV32ZBB-NEXT: cpop a0, a0
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: test_ctpop_i8:
; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: andi a0, a0, 255
+; RV64ZBB-NEXT: zext.b a0, a0
; RV64ZBB-NEXT: cpopw a0, a0
; RV64ZBB-NEXT: ret
;
@@ -2797,7 +2797,7 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
define i8 @test_parity_i8(i8 %a) {
; RV32_NOZBB-LABEL: test_parity_i8:
; RV32_NOZBB: # %bb.0:
-; RV32_NOZBB-NEXT: andi a0, a0, 255
+; RV32_NOZBB-NEXT: zext.b a0, a0
; RV32_NOZBB-NEXT: srli a1, a0, 4
; RV32_NOZBB-NEXT: xor a0, a0, a1
; RV32_NOZBB-NEXT: srli a1, a0, 2
@@ -2809,7 +2809,7 @@ define i8 @test_parity_i8(i8 %a) {
;
; RV64NOZBB-LABEL: test_parity_i8:
; RV64NOZBB: # %bb.0:
-; RV64NOZBB-NEXT: andi a0, a0, 255
+; RV64NOZBB-NEXT: zext.b a0, a0
; RV64NOZBB-NEXT: srli a1, a0, 4
; RV64NOZBB-NEXT: xor a0, a0, a1
; RV64NOZBB-NEXT: srli a1, a0, 2
@@ -2821,21 +2821,21 @@ define i8 @test_parity_i8(i8 %a) {
;
; RV32ZBB-LABEL: test_parity_i8:
; RV32ZBB: # %bb.0:
-; RV32ZBB-NEXT: andi a0, a0, 255
+; RV32ZBB-NEXT: zext.b a0, a0
; RV32ZBB-NEXT: cpop a0, a0
; RV32ZBB-NEXT: andi a0, a0, 1
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: test_parity_i8:
; RV64ZBB: # %bb.0:
-; RV64ZBB-NEXT: andi a0, a0, 255
+; RV64ZBB-NEXT: zext.b a0, a0
; RV64ZBB-NEXT: cpopw a0, a0
; RV64ZBB-NEXT: andi a0, a0, 1
; RV64ZBB-NEXT: ret
;
; RV32XTHEADBB-LABEL: test_parity_i8:
; RV32XTHEADBB: # %bb.0:
-; RV32XTHEADBB-NEXT: andi a0, a0, 255
+; RV32XTHEADBB-NEXT: zext.b a0, a0
; RV32XTHEADBB-NEXT: srli a1, a0, 4
; RV32XTHEADBB-NEXT: xor a0, a0, a1
; RV32XTHEADBB-NEXT: srli a1, a0, 2
@@ -2847,7 +2847,7 @@ define i8 @test_parity_i8(i8 %a) {
;
; RV64XTHEADBB-LABEL: test_parity_i8:
; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: andi a0, a0, 255
+; RV64XTHEADBB-NEXT: zext.b a0, a0
; RV64XTHEADBB-NEXT: srli a1, a0, 4
; RV64XTHEADBB-NEXT: xor a0, a0, a1
; RV64XTHEADBB-NEXT: srli a1, a0, 2
diff --git a/llvm/test/CodeGen/RISCV/div-by-constant.ll b/llvm/test/CodeGen/RISCV/div-by-constant.ll
index 844fa0d1e6ad6..e14a894bf1878 100644
--- a/llvm/test/CodeGen/RISCV/div-by-constant.ll
+++ b/llvm/test/CodeGen/RISCV/div-by-constant.ll
@@ -143,7 +143,7 @@ define i64 @udiv64_constant_add(i64 %a) nounwind {
define i8 @udiv8_constant_no_add(i8 %a) nounwind {
; RV32-LABEL: udiv8_constant_no_add:
; RV32: # %bb.0:
-; RV32-NEXT: andi a0, a0, 255
+; RV32-NEXT: zext.b a0, a0
; RV32-NEXT: li a1, 205
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: srli a0, a0, 10
@@ -151,7 +151,7 @@ define i8 @udiv8_constant_no_add(i8 %a) nounwind {
;
; RV64-LABEL: udiv8_constant_no_add:
; RV64: # %bb.0:
-; RV64-NEXT: andi a0, a0, 255
+; RV64-NEXT: zext.b a0, a0
; RV64-NEXT: li a1, 205
; RV64-NEXT: mul a0, a0, a1
; RV64-NEXT: srli a0, a0, 10
@@ -163,7 +163,7 @@ define i8 @udiv8_constant_no_add(i8 %a) nounwind {
define i8 @udiv8_constant_add(i8 %a) nounwind {
; RV32IM-LABEL: udiv8_constant_add:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: andi a1, a0, 255
+; RV32IM-NEXT: zext.b a1, a0
; RV32IM-NEXT: li a2, 37
; RV32IM-NEXT: mul a1, a1, a2
; RV32IM-NEXT: srli a1, a1, 8
@@ -176,7 +176,7 @@ define i8 @udiv8_constant_add(i8 %a) nounwind {
;
; RV32IMZB-LABEL: udiv8_constant_add:
; RV32IMZB: # %bb.0:
-; RV32IMZB-NEXT: andi a1, a0, 255
+; RV32IMZB-NEXT: zext.b a1, a0
; RV32IMZB-NEXT: sh3add a2, a1, a1
; RV32IMZB-NEXT: sh2add a1, a2, a1
; RV32IMZB-NEXT: srli a1, a1, 8
@@ -189,7 +189,7 @@ define i8 @udiv8_constant_add(i8 %a) nounwind {
;
; RV64IM-LABEL: udiv8_constant_add:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: andi a1, a0, 255
+; RV64IM-NEXT: zext.b a1, a0
; RV64IM-NEXT: li a2, 37
; RV64IM-NEXT: mul a1, a1, a2
; RV64IM-NEXT: srli a1, a1, 8
@@ -202,7 +202,7 @@ define i8 @udiv8_constant_add(i8 %a) nounwind {
;
; RV64IMZB-LABEL: udiv8_constant_add:
; RV64IMZB: # %bb.0:
-; RV64IMZB-NEXT: andi a1, a0, 255
+; RV64IMZB-NEXT: zext.b a1, a0
; RV64IMZB-NEXT: sh3add a2, a1, a1
; RV64IMZB-NEXT: sh2add a1, a2, a1
; RV64IMZB-NEXT: srli a1, a1, 8
diff --git a/llvm/test/CodeGen/RISCV/div.ll b/llvm/test/CodeGen/RISCV/div.ll
index bda6ff43a5e7c..415848449e13f 100644
--- a/llvm/test/CodeGen/RISCV/div.ll
+++ b/llvm/test/CodeGen/RISCV/div.ll
@@ -268,8 +268,8 @@ define i8 @udiv8(i8 %a, i8 %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: andi a0, a0, 255
-; RV32I-NEXT: andi a1, a1, 255
+; RV32I-NEXT: zext.b a0, a0
+; RV32I-NEXT: zext.b a1, a1
; RV32I-NEXT: call __udivsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -277,8 +277,8 @@ define i8 @udiv8(i8 %a, i8 %b) nounwind {
;
; RV32IM-LABEL: udiv8:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: andi a1, a1, 255
-; RV32IM-NEXT: andi a0, a0, 255
+; RV32IM-NEXT: zext.b a1, a1
+; RV32IM-NEXT: zext.b a0, a0
; RV32IM-NEXT: divu a0, a0, a1
; RV32IM-NEXT: ret
;
@@ -286,8 +286,8 @@ define i8 @udiv8(i8 %a, i8 %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: andi a1, a1, 255
+; RV64I-NEXT: zext.b a0, a0
+; RV64I-NEXT: zext.b a1, a1
; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -295,8 +295,8 @@ define i8 @udiv8(i8 %a, i8 %b) nounwind {
;
; RV64IM-LABEL: udiv8:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: andi a1, a1, 255
-; RV64IM-NEXT: andi a0, a0, 255
+; RV64IM-NEXT: zext.b a1, a1
+; RV64IM-NEXT: zext.b a0, a0
; RV64IM-NEXT: divuw a0, a0, a1
; RV64IM-NEXT: ret
%1 = udiv i8 %a, %b
@@ -308,7 +308,7 @@ define i8 @udiv8_constant(i8 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: li a1, 5
; RV32I-NEXT: call __udivsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -317,7 +317,7 @@ define i8 @udiv8_constant(i8 %a) nounwind {
;
; RV32IM-LABEL: udiv8_constant:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: andi a0, a0, 255
+; RV32IM-NEXT: zext.b a0, a0
; RV32IM-NEXT: li a1, 205
; RV32IM-NEXT: mul a0, a0, a1
; RV32IM-NEXT: srli a0, a0, 10
@@ -327,7 +327,7 @@ define i8 @udiv8_constant(i8 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: li a1, 5
; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -336,7 +336,7 @@ define i8 @udiv8_constant(i8 %a) nounwind {
;
; RV64IM-LABEL: udiv8_constant:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: andi a0, a0, 255
+; RV64IM-NEXT: zext.b a0, a0
; RV64IM-NEXT: li a1, 205
; RV64IM-NEXT: mul a0, a0, a1
; RV64IM-NEXT: srli a0, a0, 10
@@ -378,7 +378,7 @@ define i8 @udiv8_constant_lhs(i8 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: andi a1, a0, 255
+; RV32I-NEXT: zext.b a1, a0
; RV32I-NEXT: li a0, 10
; RV32I-NEXT: call __udivsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -387,7 +387,7 @@ define i8 @udiv8_constant_lhs(i8 %a) nounwind {
;
; RV32IM-LABEL: udiv8_constant_lhs:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: andi a0, a0, 255
+; RV32IM-NEXT: zext.b a0, a0
; RV32IM-NEXT: li a1, 10
; RV32IM-NEXT: divu a0, a1, a0
; RV32IM-NEXT: ret
@@ -396,7 +396,7 @@ define i8 @udiv8_constant_lhs(i8 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: andi a1, a0, 255
+; RV64I-NEXT: zext.b a1, a0
; RV64I-NEXT: li a0, 10
; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -405,7 +405,7 @@ define i8 @udiv8_constant_lhs(i8 %a) nounwind {
;
; RV64IM-LABEL: udiv8_constant_lhs:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: andi a0, a0, 255
+; RV64IM-NEXT: zext.b a0, a0
; RV64IM-NEXT: li a1, 10
; RV64IM-NEXT: divuw a0, a1, a0
; RV64IM-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll
index c39085a80ddc1..03ab83ece8ce7 100644
--- a/llvm/test/CodeGen/RISCV/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert.ll
@@ -2273,7 +2273,7 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: and a0, a1, a0
; RV32I-NEXT: .LBB32_3: # %start
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -2309,7 +2309,7 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
; RV64I-NEXT: .LBB32_3: # %start
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index fc866d71a3a70..3fc2b6598b69b 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -2002,7 +2002,7 @@ define zeroext i8 @fcvt_wu_s_sat_i8(float %a) nounwind {
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s1
; RV32I-NEXT: .LBB30_3: # %start
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -2036,7 +2036,7 @@ define zeroext i8 @fcvt_wu_s_sat_i8(float %a) nounwind {
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
; RV64I-NEXT: .LBB30_3: # %start
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/fold-mem-offset.ll b/llvm/test/CodeGen/RISCV/fold-mem-offset.ll
index 54eb3c9627691..7d8b8d29aa3c9 100644
--- a/llvm/test/CodeGen/RISCV/fold-mem-offset.ll
+++ b/llvm/test/CodeGen/RISCV/fold-mem-offset.ll
@@ -183,7 +183,7 @@ define zeroext i8 @test_add(ptr %p, iXLen %x, iXLen %y) {
; CHECK-NEXT: lbu a1, 1800(a1)
; CHECK-NEXT: lbu a0, 1810(a0)
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: ret
;
; ZBA-LABEL: test_add:
@@ -193,7 +193,7 @@ define zeroext i8 @test_add(ptr %p, iXLen %x, iXLen %y) {
; ZBA-NEXT: lbu a1, 1800(a1)
; ZBA-NEXT: lbu a0, 1810(a0)
; ZBA-NEXT: add a0, a0, a1
-; ZBA-NEXT: andi a0, a0, 255
+; ZBA-NEXT: zext.b a0, a0
; ZBA-NEXT: ret
entry:
%e = getelementptr inbounds nuw i8, ptr %p, i64 1800
@@ -397,7 +397,7 @@ define zeroext i8 @test_add_uw(ptr %p, i32 signext %x, i32 signext %y) {
; RV32I-NEXT: lbu a1, 1800(a1)
; RV32I-NEXT: lbu a0, 1800(a0)
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_add_uw:
@@ -411,7 +411,7 @@ define zeroext i8 @test_add_uw(ptr %p, i32 signext %x, i32 signext %y) {
; RV64I-NEXT: lbu a1, 1800(a1)
; RV64I-NEXT: lbu a0, 1800(a0)
; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: ret
;
; RV32ZBA-LABEL: test_add_uw:
@@ -421,7 +421,7 @@ define zeroext i8 @test_add_uw(ptr %p, i32 signext %x, i32 signext %y) {
; RV32ZBA-NEXT: lbu a1, 1800(a1)
; RV32ZBA-NEXT: lbu a0, 1800(a0)
; RV32ZBA-NEXT: add a0, a0, a1
-; RV32ZBA-NEXT: andi a0, a0, 255
+; RV32ZBA-NEXT: zext.b a0, a0
; RV32ZBA-NEXT: ret
;
; RV64ZBA-LABEL: test_add_uw:
@@ -431,7 +431,7 @@ define zeroext i8 @test_add_uw(ptr %p, i32 signext %x, i32 signext %y) {
; RV64ZBA-NEXT: lbu a1, 1800(a1)
; RV64ZBA-NEXT: lbu a0, 1800(a0)
; RV64ZBA-NEXT: add a0, a0, a1
-; RV64ZBA-NEXT: andi a0, a0, 255
+; RV64ZBA-NEXT: zext.b a0, a0
; RV64ZBA-NEXT: ret
entry:
%e = getelementptr inbounds nuw i8, ptr %p, i64 1800
@@ -675,7 +675,7 @@ define zeroext i8 @test_optsize(ptr %p, iXLen %x, iXLen %y) optsize {
; CHECK-NEXT: lbu a1, 0(a1)
; CHECK-NEXT: lbu a0, 10(a0)
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: ret
;
; ZBA-LABEL: test_optsize:
@@ -686,7 +686,7 @@ define zeroext i8 @test_optsize(ptr %p, iXLen %x, iXLen %y) optsize {
; ZBA-NEXT: lbu a1, 0(a1)
; ZBA-NEXT: lbu a0, 10(a0)
; ZBA-NEXT: add a0, a0, a1
-; ZBA-NEXT: andi a0, a0, 255
+; ZBA-NEXT: zext.b a0, a0
; ZBA-NEXT: ret
entry:
%e = getelementptr inbounds nuw i8, ptr %p, i64 1800
@@ -708,7 +708,7 @@ define zeroext i8 @test_minsize(ptr %p, iXLen %x, iXLen %y) minsize {
; CHECK-NEXT: lbu a1, 0(a1)
; CHECK-NEXT: lbu a0, 10(a0)
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: ret
;
; ZBA-LABEL: test_minsize:
@@ -719,7 +719,7 @@ define zeroext i8 @test_minsize(ptr %p, iXLen %x, iXLen %y) minsize {
; ZBA-NEXT: lbu a1, 0(a1)
; ZBA-NEXT: lbu a0, 10(a0)
; ZBA-NEXT: add a0, a0, a1
-; ZBA-NEXT: andi a0, a0, 255
+; ZBA-NEXT: zext.b a0, a0
; ZBA-NEXT: ret
entry:
%e = getelementptr inbounds nuw i8, ptr %p, i64 1800
diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index cf57ecd6cd1e4..f59f86ded76b4 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -7874,7 +7874,7 @@ define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s1
; RV32I-NEXT: .LBB38_3: # %start
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -7911,7 +7911,7 @@ define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
; RV64I-NEXT: .LBB38_3: # %start
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll
index 94b717b42e92b..20dd590d2ea98 100644
--- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll
+++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll
@@ -19,7 +19,7 @@ define i16 @ctz_v4i32(<4 x i32> %a) {
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: li a1, 4
; RV32-NEXT: sub a1, a1, a0
-; RV32-NEXT: andi a0, a1, 255
+; RV32-NEXT: zext.b a0, a1
; RV32-NEXT: ret
;
; RV64-LABEL: ctz_v4i32:
@@ -36,7 +36,7 @@ define i16 @ctz_v4i32(<4 x i32> %a) {
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: li a1, 4
; RV64-NEXT: subw a1, a1, a0
-; RV64-NEXT: andi a0, a1, 255
+; RV64-NEXT: zext.b a0, a1
; RV64-NEXT: ret
%res = call i16 @llvm.experimental.cttz.elts.i16.v4i32(<4 x i32> %a, i1 0)
ret i16 %res
diff --git a/llvm/test/CodeGen/RISCV/machine-combiner.ll b/llvm/test/CodeGen/RISCV/machine-combiner.ll
index a18f5d6902dca..1be599e4f8e1e 100644
--- a/llvm/test/CodeGen/RISCV/machine-combiner.ll
+++ b/llvm/test/CodeGen/RISCV/machine-combiner.ll
@@ -739,10 +739,10 @@ define i64 @test_reassoc_mul_i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3) {
define i8 @test_reassoc_minu_i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3) {
; CHECK-LABEL: test_reassoc_minu_i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: andi a3, a3, 255
-; CHECK-NEXT: andi a1, a1, 255
-; CHECK-NEXT: andi a0, a0, 255
-; CHECK-NEXT: andi a2, a2, 255
+; CHECK-NEXT: zext.b a3, a3
+; CHECK-NEXT: zext.b a1, a1
+; CHECK-NEXT: zext.b a0, a0
+; CHECK-NEXT: zext.b a2, a2
; CHECK-NEXT: minu a0, a0, a1
; CHECK-NEXT: minu a1, a2, a3
; CHECK-NEXT: minu a0, a0, a1
@@ -867,10 +867,10 @@ define i64 @test_reassoc_min_i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3) {
define i8 @test_reassoc_maxu_i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3) {
; CHECK-LABEL: test_reassoc_maxu_i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: andi a3, a3, 255
-; CHECK-NEXT: andi a1, a1, 255
-; CHECK-NEXT: andi a0, a0, 255
-; CHECK-NEXT: andi a2, a2, 255
+; CHECK-NEXT: zext.b a3, a3
+; CHECK-NEXT: zext.b a1, a1
+; CHECK-NEXT: zext.b a0, a0
+; CHECK-NEXT: zext.b a2, a2
; CHECK-NEXT: maxu a0, a0, a1
; CHECK-NEXT: maxu a1, a2, a3
; CHECK-NEXT: maxu a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/machine-sink-load-immediate.ll b/llvm/test/CodeGen/RISCV/machine-sink-load-immediate.ll
index 6d3000a513538..f506d30e7b6f8 100644
--- a/llvm/test/CodeGen/RISCV/machine-sink-load-immediate.ll
+++ b/llvm/test/CodeGen/RISCV/machine-sink-load-immediate.ll
@@ -203,7 +203,7 @@ define signext i32 @switch_dispatch(i8 %a) {
; CHECK-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; CHECK-NEXT: .cfi_offset ra, -8
; CHECK-NEXT: .cfi_offset s0, -16
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: li a1, 31
; CHECK-NEXT: blt a1, a0, .LBB2_5
; CHECK-NEXT: # %bb.1: # %bb
@@ -293,7 +293,7 @@ define signext i32 @branch_dispatch(i8 %a) {
; CHECK-NEXT: .cfi_offset ra, -8
; CHECK-NEXT: .cfi_offset s0, -16
; CHECK-NEXT: .cfi_remember_state
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: beq a0, a1, .LBB3_7
; CHECK-NEXT: # %bb.1: # %case.1
diff --git a/llvm/test/CodeGen/RISCV/memset-inline.ll b/llvm/test/CodeGen/RISCV/memset-inline.ll
index 6ee6e1261e7e9..aa397504d909c 100644
--- a/llvm/test/CodeGen/RISCV/memset-inline.ll
+++ b/llvm/test/CodeGen/RISCV/memset-inline.ll
@@ -43,7 +43,7 @@ define void @memset_2(ptr %a, i8 %value) nounwind {
;
; RV32-FAST-LABEL: memset_2:
; RV32-FAST: # %bb.0:
-; RV32-FAST-NEXT: andi a2, a1, 255
+; RV32-FAST-NEXT: zext.b a2, a1
; RV32-FAST-NEXT: slli a1, a1, 8
; RV32-FAST-NEXT: or a1, a1, a2
; RV32-FAST-NEXT: sh a1, 0(a0)
@@ -51,7 +51,7 @@ define void @memset_2(ptr %a, i8 %value) nounwind {
;
; RV64-FAST-LABEL: memset_2:
; RV64-FAST: # %bb.0:
-; RV64-FAST-NEXT: andi a2, a1, 255
+; RV64-FAST-NEXT: zext.b a2, a1
; RV64-FAST-NEXT: slli a1, a1, 8
; RV64-FAST-NEXT: or a1, a1, a2
; RV64-FAST-NEXT: sh a1, 0(a0)
@@ -79,7 +79,7 @@ define void @memset_4(ptr %a, i8 %value) nounwind {
;
; RV32-FAST-LABEL: memset_4:
; RV32-FAST: # %bb.0:
-; RV32-FAST-NEXT: andi a1, a1, 255
+; RV32-FAST-NEXT: zext.b a1, a1
; RV32-FAST-NEXT: lui a2, 4112
; RV32-FAST-NEXT: addi a2, a2, 257
; RV32-FAST-NEXT: mul a1, a1, a2
@@ -126,7 +126,7 @@ define void @memset_8(ptr %a, i8 %value) nounwind {
;
; RV32-FAST-LABEL: memset_8:
; RV32-FAST: # %bb.0:
-; RV32-FAST-NEXT: andi a1, a1, 255
+; RV32-FAST-NEXT: zext.b a1, a1
; RV32-FAST-NEXT: lui a2, 4112
; RV32-FAST-NEXT: addi a2, a2, 257
; RV32-FAST-NEXT: mul a1, a1, a2
@@ -136,7 +136,7 @@ define void @memset_8(ptr %a, i8 %value) nounwind {
;
; RV64-FAST-LABEL: memset_8:
; RV64-FAST: # %bb.0:
-; RV64-FAST-NEXT: andi a1, a1, 255
+; RV64-FAST-NEXT: zext.b a1, a1
; RV64-FAST-NEXT: lui a2, 4112
; RV64-FAST-NEXT: addiw a2, a2, 257
; RV64-FAST-NEXT: slli a3, a2, 32
@@ -191,7 +191,7 @@ define void @memset_16(ptr %a, i8 %value) nounwind {
;
; RV32-FAST-LABEL: memset_16:
; RV32-FAST: # %bb.0:
-; RV32-FAST-NEXT: andi a1, a1, 255
+; RV32-FAST-NEXT: zext.b a1, a1
; RV32-FAST-NEXT: lui a2, 4112
; RV32-FAST-NEXT: addi a2, a2, 257
; RV32-FAST-NEXT: mul a1, a1, a2
@@ -203,7 +203,7 @@ define void @memset_16(ptr %a, i8 %value) nounwind {
;
; RV64-FAST-LABEL: memset_16:
; RV64-FAST: # %bb.0:
-; RV64-FAST-NEXT: andi a1, a1, 255
+; RV64-FAST-NEXT: zext.b a1, a1
; RV64-FAST-NEXT: lui a2, 4112
; RV64-FAST-NEXT: addiw a2, a2, 257
; RV64-FAST-NEXT: slli a3, a2, 32
@@ -291,7 +291,7 @@ define void @memset_32(ptr %a, i8 %value) nounwind {
;
; RV32-FAST-LABEL: memset_32:
; RV32-FAST: # %bb.0:
-; RV32-FAST-NEXT: andi a1, a1, 255
+; RV32-FAST-NEXT: zext.b a1, a1
; RV32-FAST-NEXT: lui a2, 4112
; RV32-FAST-NEXT: addi a2, a2, 257
; RV32-FAST-NEXT: mul a1, a1, a2
@@ -307,7 +307,7 @@ define void @memset_32(ptr %a, i8 %value) nounwind {
;
; RV64-FAST-LABEL: memset_32:
; RV64-FAST: # %bb.0:
-; RV64-FAST-NEXT: andi a1, a1, 255
+; RV64-FAST-NEXT: zext.b a1, a1
; RV64-FAST-NEXT: lui a2, 4112
; RV64-FAST-NEXT: addiw a2, a2, 257
; RV64-FAST-NEXT: slli a3, a2, 32
@@ -461,7 +461,7 @@ define void @memset_64(ptr %a, i8 %value) nounwind {
;
; RV32-FAST-LABEL: memset_64:
; RV32-FAST: # %bb.0:
-; RV32-FAST-NEXT: andi a1, a1, 255
+; RV32-FAST-NEXT: zext.b a1, a1
; RV32-FAST-NEXT: lui a2, 4112
; RV32-FAST-NEXT: addi a2, a2, 257
; RV32-FAST-NEXT: mul a1, a1, a2
@@ -485,7 +485,7 @@ define void @memset_64(ptr %a, i8 %value) nounwind {
;
; RV64-FAST-LABEL: memset_64:
; RV64-FAST: # %bb.0:
-; RV64-FAST-NEXT: andi a1, a1, 255
+; RV64-FAST-NEXT: zext.b a1, a1
; RV64-FAST-NEXT: lui a2, 4112
; RV64-FAST-NEXT: addiw a2, a2, 257
; RV64-FAST-NEXT: slli a3, a2, 32
@@ -509,7 +509,7 @@ define void @memset_64(ptr %a, i8 %value) nounwind {
define void @aligned_memset_2(ptr align 2 %a, i8 %value) nounwind {
; RV32-BOTH-LABEL: aligned_memset_2:
; RV32-BOTH: # %bb.0:
-; RV32-BOTH-NEXT: andi a2, a1, 255
+; RV32-BOTH-NEXT: zext.b a2, a1
; RV32-BOTH-NEXT: slli a1, a1, 8
; RV32-BOTH-NEXT: or a1, a1, a2
; RV32-BOTH-NEXT: sh a1, 0(a0)
@@ -517,7 +517,7 @@ define void @aligned_memset_2(ptr align 2 %a, i8 %value) nounwind {
;
; RV64-BOTH-LABEL: aligned_memset_2:
; RV64-BOTH: # %bb.0:
-; RV64-BOTH-NEXT: andi a2, a1, 255
+; RV64-BOTH-NEXT: zext.b a2, a1
; RV64-BOTH-NEXT: slli a1, a1, 8
; RV64-BOTH-NEXT: or a1, a1, a2
; RV64-BOTH-NEXT: sh a1, 0(a0)
@@ -529,7 +529,7 @@ define void @aligned_memset_2(ptr align 2 %a, i8 %value) nounwind {
define void @aligned_memset_4(ptr align 4 %a, i8 %value) nounwind {
; RV32-BOTH-LABEL: aligned_memset_4:
; RV32-BOTH: # %bb.0:
-; RV32-BOTH-NEXT: andi a1, a1, 255
+; RV32-BOTH-NEXT: zext.b a1, a1
; RV32-BOTH-NEXT: lui a2, 4112
; RV32-BOTH-NEXT: addi a2, a2, 257
; RV32-BOTH-NEXT: mul a1, a1, a2
@@ -552,7 +552,7 @@ define void @aligned_memset_4(ptr align 4 %a, i8 %value) nounwind {
define void @aligned_memset_8(ptr align 8 %a, i8 %value) nounwind {
; RV32-BOTH-LABEL: aligned_memset_8:
; RV32-BOTH: # %bb.0:
-; RV32-BOTH-NEXT: andi a1, a1, 255
+; RV32-BOTH-NEXT: zext.b a1, a1
; RV32-BOTH-NEXT: lui a2, 4112
; RV32-BOTH-NEXT: addi a2, a2, 257
; RV32-BOTH-NEXT: mul a1, a1, a2
@@ -562,7 +562,7 @@ define void @aligned_memset_8(ptr align 8 %a, i8 %value) nounwind {
;
; RV64-BOTH-LABEL: aligned_memset_8:
; RV64-BOTH: # %bb.0:
-; RV64-BOTH-NEXT: andi a1, a1, 255
+; RV64-BOTH-NEXT: zext.b a1, a1
; RV64-BOTH-NEXT: lui a2, 4112
; RV64-BOTH-NEXT: addiw a2, a2, 257
; RV64-BOTH-NEXT: slli a3, a2, 32
@@ -577,7 +577,7 @@ define void @aligned_memset_8(ptr align 8 %a, i8 %value) nounwind {
define void @aligned_memset_16(ptr align 16 %a, i8 %value) nounwind {
; RV32-BOTH-LABEL: aligned_memset_16:
; RV32-BOTH: # %bb.0:
-; RV32-BOTH-NEXT: andi a1, a1, 255
+; RV32-BOTH-NEXT: zext.b a1, a1
; RV32-BOTH-NEXT: lui a2, 4112
; RV32-BOTH-NEXT: addi a2, a2, 257
; RV32-BOTH-NEXT: mul a1, a1, a2
@@ -589,7 +589,7 @@ define void @aligned_memset_16(ptr align 16 %a, i8 %value) nounwind {
;
; RV64-BOTH-LABEL: aligned_memset_16:
; RV64-BOTH: # %bb.0:
-; RV64-BOTH-NEXT: andi a1, a1, 255
+; RV64-BOTH-NEXT: zext.b a1, a1
; RV64-BOTH-NEXT: lui a2, 4112
; RV64-BOTH-NEXT: addiw a2, a2, 257
; RV64-BOTH-NEXT: slli a3, a2, 32
@@ -605,7 +605,7 @@ define void @aligned_memset_16(ptr align 16 %a, i8 %value) nounwind {
define void @aligned_memset_32(ptr align 32 %a, i8 %value) nounwind {
; RV32-BOTH-LABEL: aligned_memset_32:
; RV32-BOTH: # %bb.0:
-; RV32-BOTH-NEXT: andi a1, a1, 255
+; RV32-BOTH-NEXT: zext.b a1, a1
; RV32-BOTH-NEXT: lui a2, 4112
; RV32-BOTH-NEXT: addi a2, a2, 257
; RV32-BOTH-NEXT: mul a1, a1, a2
@@ -621,7 +621,7 @@ define void @aligned_memset_32(ptr align 32 %a, i8 %value) nounwind {
;
; RV64-BOTH-LABEL: aligned_memset_32:
; RV64-BOTH: # %bb.0:
-; RV64-BOTH-NEXT: andi a1, a1, 255
+; RV64-BOTH-NEXT: zext.b a1, a1
; RV64-BOTH-NEXT: lui a2, 4112
; RV64-BOTH-NEXT: addiw a2, a2, 257
; RV64-BOTH-NEXT: slli a3, a2, 32
@@ -639,7 +639,7 @@ define void @aligned_memset_32(ptr align 32 %a, i8 %value) nounwind {
define void @aligned_memset_64(ptr align 64 %a, i8 %value) nounwind {
; RV32-BOTH-LABEL: aligned_memset_64:
; RV32-BOTH: # %bb.0:
-; RV32-BOTH-NEXT: andi a1, a1, 255
+; RV32-BOTH-NEXT: zext.b a1, a1
; RV32-BOTH-NEXT: lui a2, 4112
; RV32-BOTH-NEXT: addi a2, a2, 257
; RV32-BOTH-NEXT: mul a1, a1, a2
@@ -663,7 +663,7 @@ define void @aligned_memset_64(ptr align 64 %a, i8 %value) nounwind {
;
; RV64-BOTH-LABEL: aligned_memset_64:
; RV64-BOTH: # %bb.0:
-; RV64-BOTH-NEXT: andi a1, a1, 255
+; RV64-BOTH-NEXT: zext.b a1, a1
; RV64-BOTH-NEXT: lui a2, 4112
; RV64-BOTH-NEXT: addiw a2, a2, 257
; RV64-BOTH-NEXT: slli a3, a2, 32
diff --git a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll
index 5a01d43fea56b..a5426e560bd65 100644
--- a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll
@@ -551,7 +551,7 @@ define i1 @uaddo_i8_increment_noncanonical_1(i8 %x, ptr %p) {
; RV32-LABEL: uaddo_i8_increment_noncanonical_1:
; RV32: # %bb.0:
; RV32-NEXT: addi a2, a0, 1
-; RV32-NEXT: andi a0, a2, 255
+; RV32-NEXT: zext.b a0, a2
; RV32-NEXT: seqz a0, a0
; RV32-NEXT: sb a2, 0(a1)
; RV32-NEXT: ret
@@ -559,7 +559,7 @@ define i1 @uaddo_i8_increment_noncanonical_1(i8 %x, ptr %p) {
; RV64-LABEL: uaddo_i8_increment_noncanonical_1:
; RV64: # %bb.0:
; RV64-NEXT: addi a2, a0, 1
-; RV64-NEXT: andi a0, a2, 255
+; RV64-NEXT: zext.b a0, a2
; RV64-NEXT: seqz a0, a0
; RV64-NEXT: sb a2, 0(a1)
; RV64-NEXT: ret
@@ -851,7 +851,7 @@ define i1 @usubo_ugt_i32(i32 %x, i32 %y, ptr %p) {
define i1 @usubo_ugt_constant_op0_i8(i8 %x, ptr %p) {
; RV32-LABEL: usubo_ugt_constant_op0_i8:
; RV32: # %bb.0:
-; RV32-NEXT: andi a2, a0, 255
+; RV32-NEXT: zext.b a2, a0
; RV32-NEXT: li a3, 42
; RV32-NEXT: sub a3, a3, a0
; RV32-NEXT: sltiu a0, a2, 43
@@ -861,7 +861,7 @@ define i1 @usubo_ugt_constant_op0_i8(i8 %x, ptr %p) {
;
; RV64-LABEL: usubo_ugt_constant_op0_i8:
; RV64: # %bb.0:
-; RV64-NEXT: andi a2, a0, 255
+; RV64-NEXT: zext.b a2, a0
; RV64-NEXT: li a3, 42
; RV64-NEXT: subw a3, a3, a0
; RV64-NEXT: sltiu a0, a2, 43
@@ -933,7 +933,7 @@ define i1 @usubo_ult_constant_op1_i16(i16 %x, ptr %p) {
define i1 @usubo_ugt_constant_op1_i8(i8 %x, ptr %p) {
; RV32-LABEL: usubo_ugt_constant_op1_i8:
; RV32: # %bb.0:
-; RV32-NEXT: andi a2, a0, 255
+; RV32-NEXT: zext.b a2, a0
; RV32-NEXT: sltiu a2, a2, 45
; RV32-NEXT: addi a0, a0, -45
; RV32-NEXT: sb a0, 0(a1)
@@ -942,7 +942,7 @@ define i1 @usubo_ugt_constant_op1_i8(i8 %x, ptr %p) {
;
; RV64-LABEL: usubo_ugt_constant_op1_i8:
; RV64: # %bb.0:
-; RV64-NEXT: andi a2, a0, 255
+; RV64-NEXT: zext.b a2, a0
; RV64-NEXT: sltiu a2, a2, 45
; RV64-NEXT: addi a0, a0, -45
; RV64-NEXT: sb a0, 0(a1)
diff --git a/llvm/test/CodeGen/RISCV/pr65025.ll b/llvm/test/CodeGen/RISCV/pr65025.ll
index c6770b05da555..4eb6a478bbbcd 100644
--- a/llvm/test/CodeGen/RISCV/pr65025.ll
+++ b/llvm/test/CodeGen/RISCV/pr65025.ll
@@ -7,8 +7,8 @@ define ptr @cmpxchg_masked_and_branch1(ptr %ptr, i8 signext %cmp, i8 signext %va
; CHECK-NEXT: andi a3, a0, -4
; CHECK-NEXT: slli a4, a0, 3
; CHECK-NEXT: li a5, 255
-; CHECK-NEXT: andi a1, a1, 255
-; CHECK-NEXT: andi a2, a2, 255
+; CHECK-NEXT: zext.b a1, a1
+; CHECK-NEXT: zext.b a2, a2
; CHECK-NEXT: sllw a5, a5, a4
; CHECK-NEXT: sllw a1, a1, a4
; CHECK-NEXT: sllw a2, a2, a4
diff --git a/llvm/test/CodeGen/RISCV/rem.ll b/llvm/test/CodeGen/RISCV/rem.ll
index 2001262008237..612aaafc53bc8 100644
--- a/llvm/test/CodeGen/RISCV/rem.ll
+++ b/llvm/test/CodeGen/RISCV/rem.ll
@@ -370,8 +370,8 @@ define i8 @urem8(i8 %a, i8 %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: andi a0, a0, 255
-; RV32I-NEXT: andi a1, a1, 255
+; RV32I-NEXT: zext.b a0, a0
+; RV32I-NEXT: zext.b a1, a1
; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -379,8 +379,8 @@ define i8 @urem8(i8 %a, i8 %b) nounwind {
;
; RV32IM-LABEL: urem8:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: andi a1, a1, 255
-; RV32IM-NEXT: andi a0, a0, 255
+; RV32IM-NEXT: zext.b a1, a1
+; RV32IM-NEXT: zext.b a0, a0
; RV32IM-NEXT: remu a0, a0, a1
; RV32IM-NEXT: ret
;
@@ -388,8 +388,8 @@ define i8 @urem8(i8 %a, i8 %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: andi a1, a1, 255
+; RV64I-NEXT: zext.b a0, a0
+; RV64I-NEXT: zext.b a1, a1
; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -397,8 +397,8 @@ define i8 @urem8(i8 %a, i8 %b) nounwind {
;
; RV64IM-LABEL: urem8:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: andi a1, a1, 255
-; RV64IM-NEXT: andi a0, a0, 255
+; RV64IM-NEXT: zext.b a1, a1
+; RV64IM-NEXT: zext.b a0, a0
; RV64IM-NEXT: remuw a0, a0, a1
; RV64IM-NEXT: ret
%1 = urem i8 %a, %b
@@ -410,7 +410,7 @@ define i8 @urem8_constant_lhs(i8 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: andi a1, a0, 255
+; RV32I-NEXT: zext.b a1, a0
; RV32I-NEXT: li a0, 10
; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -419,7 +419,7 @@ define i8 @urem8_constant_lhs(i8 %a) nounwind {
;
; RV32IM-LABEL: urem8_constant_lhs:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: andi a0, a0, 255
+; RV32IM-NEXT: zext.b a0, a0
; RV32IM-NEXT: li a1, 10
; RV32IM-NEXT: remu a0, a1, a0
; RV32IM-NEXT: ret
@@ -428,7 +428,7 @@ define i8 @urem8_constant_lhs(i8 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: andi a1, a0, 255
+; RV64I-NEXT: zext.b a1, a0
; RV64I-NEXT: li a0, 10
; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -437,7 +437,7 @@ define i8 @urem8_constant_lhs(i8 %a) nounwind {
;
; RV64IM-LABEL: urem8_constant_lhs:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: andi a0, a0, 255
+; RV64IM-NEXT: zext.b a0, a0
; RV64IM-NEXT: li a1, 10
; RV64IM-NEXT: remuw a0, a1, a0
; RV64IM-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rv32zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbkb.ll
index b194c0ed52dfb..4aa6dd4dba6c2 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbkb.ll
@@ -108,7 +108,7 @@ define i64 @pack_i64_3(ptr %0, ptr %1) {
define i32 @packh_i32(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: packh_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: slli a1, a1, 24
; RV32I-NEXT: srli a1, a1, 16
; RV32I-NEXT: or a0, a1, a0
@@ -128,8 +128,8 @@ define i32 @packh_i32(i32 %a, i32 %b) nounwind {
define i32 @packh_i32_2(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: packh_i32_2:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a0, a0, 255
-; RV32I-NEXT: andi a1, a1, 255
+; RV32I-NEXT: zext.b a0, a0
+; RV32I-NEXT: zext.b a1, a1
; RV32I-NEXT: slli a1, a1, 8
; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: ret
@@ -148,7 +148,7 @@ define i32 @packh_i32_2(i32 %a, i32 %b) nounwind {
define i64 @packh_i64(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: packh_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: slli a2, a2, 24
; RV32I-NEXT: srli a2, a2, 16
; RV32I-NEXT: or a0, a2, a0
@@ -170,8 +170,8 @@ define i64 @packh_i64(i64 %a, i64 %b) nounwind {
define i64 @packh_i64_2(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: packh_i64_2:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a0, a0, 255
-; RV32I-NEXT: andi a1, a2, 255
+; RV32I-NEXT: zext.b a0, a0
+; RV32I-NEXT: zext.b a1, a2
; RV32I-NEXT: slli a1, a1, 8
; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: li a1, 0
diff --git a/llvm/test/CodeGen/RISCV/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbkb.ll
index 985837d05caa2..734bffee96f09 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbkb.ll
@@ -129,7 +129,7 @@ define i64 @pack_i64_3(ptr %0, ptr %1) {
define signext i32 @packh_i32(i32 signext %a, i32 signext %b) nounwind {
; RV64I-LABEL: packh_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: slli a1, a1, 56
; RV64I-NEXT: srli a1, a1, 48
; RV64I-NEXT: or a0, a1, a0
@@ -149,8 +149,8 @@ define signext i32 @packh_i32(i32 signext %a, i32 signext %b) nounwind {
define i32 @packh_i32_2(i32 %a, i32 %b) nounwind {
; RV64I-LABEL: packh_i32_2:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: andi a1, a1, 255
+; RV64I-NEXT: zext.b a0, a0
+; RV64I-NEXT: zext.b a1, a1
; RV64I-NEXT: slli a1, a1, 8
; RV64I-NEXT: or a0, a1, a0
; RV64I-NEXT: ret
@@ -169,7 +169,7 @@ define i32 @packh_i32_2(i32 %a, i32 %b) nounwind {
define i64 @packh_i64(i64 %a, i64 %b) nounwind {
; RV64I-LABEL: packh_i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: slli a1, a1, 56
; RV64I-NEXT: srli a1, a1, 48
; RV64I-NEXT: or a0, a1, a0
@@ -189,8 +189,8 @@ define i64 @packh_i64(i64 %a, i64 %b) nounwind {
define i64 @packh_i64_2(i64 %a, i64 %b) nounwind {
; RV64I-LABEL: packh_i64_2:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
-; RV64I-NEXT: andi a1, a1, 255
+; RV64I-NEXT: zext.b a0, a0
+; RV64I-NEXT: zext.b a1, a1
; RV64I-NEXT: slli a1, a1, 8
; RV64I-NEXT: or a0, a1, a0
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
index e53876d69b59b..2587411566a3f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
@@ -324,7 +324,7 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV32-NEXT: sw s0, 376(sp) # 4-byte Folded Spill
; RV32-NEXT: addi s0, sp, 384
; RV32-NEXT: andi sp, sp, -128
-; RV32-NEXT: andi a1, a1, 255
+; RV32-NEXT: zext.b a1, a1
; RV32-NEXT: mv a2, sp
; RV32-NEXT: li a3, 128
; RV32-NEXT: vsetvli zero, a3, e8, m8, ta, ma
@@ -355,7 +355,7 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV64-NEXT: sd s0, 368(sp) # 8-byte Folded Spill
; RV64-NEXT: addi s0, sp, 384
; RV64-NEXT: andi sp, sp, -128
-; RV64-NEXT: andi a1, a1, 255
+; RV64-NEXT: zext.b a1, a1
; RV64-NEXT: mv a2, sp
; RV64-NEXT: li a3, 128
; RV64-NEXT: vsetvli zero, a3, e8, m8, ta, ma
@@ -386,7 +386,7 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV32ZBS-NEXT: sw s0, 376(sp) # 4-byte Folded Spill
; RV32ZBS-NEXT: addi s0, sp, 384
; RV32ZBS-NEXT: andi sp, sp, -128
-; RV32ZBS-NEXT: andi a1, a1, 255
+; RV32ZBS-NEXT: zext.b a1, a1
; RV32ZBS-NEXT: mv a2, sp
; RV32ZBS-NEXT: li a3, 128
; RV32ZBS-NEXT: vsetvli zero, a3, e8, m8, ta, ma
@@ -417,7 +417,7 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV64ZBS-NEXT: sd s0, 368(sp) # 8-byte Folded Spill
; RV64ZBS-NEXT: addi s0, sp, 384
; RV64ZBS-NEXT: andi sp, sp, -128
-; RV64ZBS-NEXT: andi a1, a1, 255
+; RV64ZBS-NEXT: zext.b a1, a1
; RV64ZBS-NEXT: mv a2, sp
; RV64ZBS-NEXT: li a3, 128
; RV64ZBS-NEXT: vsetvli zero, a3, e8, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
index a91263e85e9e8..b9df34d04c043 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
@@ -3023,13 +3023,13 @@ define <8 x i8> @buildvec_v8i8_pack(i8 %e1, i8 %e2, i8 %e3, i8 %e4, i8 %e5, i8 %
; RV32VB-LABEL: buildvec_v8i8_pack:
; RV32VB: # %bb.0:
; RV32VB-NEXT: slli a7, a7, 24
-; RV32VB-NEXT: andi a6, a6, 255
-; RV32VB-NEXT: andi a4, a4, 255
-; RV32VB-NEXT: andi a5, a5, 255
+; RV32VB-NEXT: zext.b a6, a6
+; RV32VB-NEXT: zext.b a4, a4
+; RV32VB-NEXT: zext.b a5, a5
; RV32VB-NEXT: slli a3, a3, 24
-; RV32VB-NEXT: andi a2, a2, 255
-; RV32VB-NEXT: andi a0, a0, 255
-; RV32VB-NEXT: andi a1, a1, 255
+; RV32VB-NEXT: zext.b a2, a2
+; RV32VB-NEXT: zext.b a0, a0
+; RV32VB-NEXT: zext.b a1, a1
; RV32VB-NEXT: slli a6, a6, 16
; RV32VB-NEXT: slli a5, a5, 8
; RV32VB-NEXT: slli a2, a2, 16
@@ -3075,14 +3075,14 @@ define <8 x i8> @buildvec_v8i8_pack(i8 %e1, i8 %e2, i8 %e3, i8 %e4, i8 %e5, i8 %
;
; RVA22U64-LABEL: buildvec_v8i8_pack:
; RVA22U64: # %bb.0:
-; RVA22U64-NEXT: andi t0, a4, 255
-; RVA22U64-NEXT: andi a5, a5, 255
+; RVA22U64-NEXT: zext.b t0, a4
+; RVA22U64-NEXT: zext.b a5, a5
; RVA22U64-NEXT: slli a7, a7, 56
-; RVA22U64-NEXT: andi a4, a6, 255
-; RVA22U64-NEXT: andi a2, a2, 255
-; RVA22U64-NEXT: andi a3, a3, 255
-; RVA22U64-NEXT: andi a0, a0, 255
-; RVA22U64-NEXT: andi a1, a1, 255
+; RVA22U64-NEXT: zext.b a4, a6
+; RVA22U64-NEXT: zext.b a2, a2
+; RVA22U64-NEXT: zext.b a3, a3
+; RVA22U64-NEXT: zext.b a0, a0
+; RVA22U64-NEXT: zext.b a1, a1
; RVA22U64-NEXT: slli t0, t0, 32
; RVA22U64-NEXT: slli a5, a5, 40
; RVA22U64-NEXT: slli a4, a4, 48
@@ -3154,11 +3154,11 @@ define <6 x i8> @buildvec_v6i8_pack(i8 %e1, i8 %e2, i8 %e3, i8 %e4, i8 %e5, i8 %
; RV32VB-LABEL: buildvec_v6i8_pack:
; RV32VB: # %bb.0:
; RV32VB-NEXT: slli a3, a3, 24
-; RV32VB-NEXT: andi a2, a2, 255
-; RV32VB-NEXT: andi a0, a0, 255
-; RV32VB-NEXT: andi a1, a1, 255
-; RV32VB-NEXT: andi a4, a4, 255
-; RV32VB-NEXT: andi a5, a5, 255
+; RV32VB-NEXT: zext.b a2, a2
+; RV32VB-NEXT: zext.b a0, a0
+; RV32VB-NEXT: zext.b a1, a1
+; RV32VB-NEXT: zext.b a4, a4
+; RV32VB-NEXT: zext.b a5, a5
; RV32VB-NEXT: slli a2, a2, 16
; RV32VB-NEXT: slli a1, a1, 8
; RV32VB-NEXT: slli a5, a5, 8
@@ -3198,12 +3198,12 @@ define <6 x i8> @buildvec_v6i8_pack(i8 %e1, i8 %e2, i8 %e3, i8 %e4, i8 %e5, i8 %
;
; RVA22U64-LABEL: buildvec_v6i8_pack:
; RVA22U64: # %bb.0:
-; RVA22U64-NEXT: andi a2, a2, 255
-; RVA22U64-NEXT: andi a3, a3, 255
-; RVA22U64-NEXT: andi a0, a0, 255
-; RVA22U64-NEXT: andi a1, a1, 255
-; RVA22U64-NEXT: andi a4, a4, 255
-; RVA22U64-NEXT: andi a5, a5, 255
+; RVA22U64-NEXT: zext.b a2, a2
+; RVA22U64-NEXT: zext.b a3, a3
+; RVA22U64-NEXT: zext.b a0, a0
+; RVA22U64-NEXT: zext.b a1, a1
+; RVA22U64-NEXT: zext.b a4, a4
+; RVA22U64-NEXT: zext.b a5, a5
; RVA22U64-NEXT: slli a2, a2, 16
; RVA22U64-NEXT: slli a3, a3, 24
; RVA22U64-NEXT: slli a1, a1, 8
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 76590d47a3230..a9f6392800012 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -410,8 +410,8 @@ define <2 x i64> @mgather_v2i8_zextload_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, <2 x
; RV32ZVE32F-NEXT: vslidedown.vi v8, v9, 1
; RV32ZVE32F-NEXT: vmv.x.s a1, v9
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: andi a1, a1, 255
-; RV32ZVE32F-NEXT: andi a2, a2, 255
+; RV32ZVE32F-NEXT: zext.b a1, a1
+; RV32ZVE32F-NEXT: zext.b a2, a2
; RV32ZVE32F-NEXT: sw a1, 0(a0)
; RV32ZVE32F-NEXT: sw zero, 4(a0)
; RV32ZVE32F-NEXT: sw a2, 8(a0)
@@ -440,9 +440,9 @@ define <2 x i64> @mgather_v2i8_zextload_v2i64(<2 x ptr> %ptrs, <2 x i1> %m, <2 x
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a0, v8
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
-; RV64ZVE32F-NEXT: andi a0, a0, 255
+; RV64ZVE32F-NEXT: zext.b a0, a0
; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: andi a1, a1, 255
+; RV64ZVE32F-NEXT: zext.b a1, a1
; RV64ZVE32F-NEXT: ret
%v = call <2 x i8> @llvm.masked.gather.v2i8.v2p0(<2 x ptr> %ptrs, i32 1, <2 x i1> %m, <2 x i8> %passthru)
%ev = zext <2 x i8> %v to <2 x i64>
@@ -1708,7 +1708,7 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: beqz a2, .LBB25_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lh a2, 0(a2)
@@ -1721,7 +1721,7 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lh a2, 0(a2)
@@ -1749,7 +1749,7 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lh a2, 0(a2)
@@ -1764,7 +1764,7 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: beqz a2, .LBB25_11
; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lh a2, 0(a2)
@@ -1779,7 +1779,7 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: andi a1, a1, 255
+; RV64ZVE32F-NEXT: zext.b a1, a1
; RV64ZVE32F-NEXT: slli a1, a1, 1
; RV64ZVE32F-NEXT: add a0, a0, a1
; RV64ZVE32F-NEXT: lh a0, 0(a0)
@@ -1793,7 +1793,7 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB25_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lh a2, 0(a2)
@@ -1807,7 +1807,7 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lh a2, 0(a2)
@@ -1820,7 +1820,7 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB25_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lh a2, 0(a2)
@@ -2756,7 +2756,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: beqz a2, .LBB37_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lw a2, 0(a2)
@@ -2769,7 +2769,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lw a2, 0(a2)
@@ -2797,7 +2797,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lw a2, 0(a2)
@@ -2812,7 +2812,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: beqz a2, .LBB37_11
; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lw a2, 0(a2)
@@ -2827,7 +2827,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: andi a1, a1, 255
+; RV64ZVE32F-NEXT: zext.b a1, a1
; RV64ZVE32F-NEXT: slli a1, a1, 2
; RV64ZVE32F-NEXT: add a0, a0, a1
; RV64ZVE32F-NEXT: lw a0, 0(a0)
@@ -2841,7 +2841,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB37_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lw a2, 0(a2)
@@ -2855,7 +2855,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lw a2, 0(a2)
@@ -2868,7 +2868,7 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB37_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lw a2, 0(a2)
@@ -4806,7 +4806,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: beqz a3, .LBB50_3
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
; RV64ZVE32F-NEXT: vmv.x.s a3, v8
-; RV64ZVE32F-NEXT: andi a3, a3, 255
+; RV64ZVE32F-NEXT: zext.b a3, a3
; RV64ZVE32F-NEXT: slli a3, a3, 3
; RV64ZVE32F-NEXT: add a3, a1, a3
; RV64ZVE32F-NEXT: ld a3, 0(a3)
@@ -4823,7 +4823,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a4, v9
-; RV64ZVE32F-NEXT: andi a4, a4, 255
+; RV64ZVE32F-NEXT: zext.b a4, a4
; RV64ZVE32F-NEXT: slli a4, a4, 3
; RV64ZVE32F-NEXT: add a4, a1, a4
; RV64ZVE32F-NEXT: ld a4, 0(a4)
@@ -4836,7 +4836,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: beqz a6, .LBB50_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a6, v8
-; RV64ZVE32F-NEXT: andi a6, a6, 255
+; RV64ZVE32F-NEXT: zext.b a6, a6
; RV64ZVE32F-NEXT: slli a6, a6, 3
; RV64ZVE32F-NEXT: add a6, a1, a6
; RV64ZVE32F-NEXT: ld a6, 0(a6)
@@ -4860,7 +4860,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB50_11: # %cond.load7
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a7, v8
-; RV64ZVE32F-NEXT: andi a7, a7, 255
+; RV64ZVE32F-NEXT: zext.b a7, a7
; RV64ZVE32F-NEXT: slli a7, a7, 3
; RV64ZVE32F-NEXT: add a7, a1, a7
; RV64ZVE32F-NEXT: ld a7, 0(a7)
@@ -4868,7 +4868,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: beqz t0, .LBB50_8
; RV64ZVE32F-NEXT: .LBB50_12: # %cond.load10
; RV64ZVE32F-NEXT: vmv.x.s t0, v9
-; RV64ZVE32F-NEXT: andi t0, t0, 255
+; RV64ZVE32F-NEXT: zext.b t0, t0
; RV64ZVE32F-NEXT: slli t0, t0, 3
; RV64ZVE32F-NEXT: add t0, a1, t0
; RV64ZVE32F-NEXT: ld t0, 0(t0)
@@ -4877,7 +4877,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB50_13: # %cond.load13
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s t1, v8
-; RV64ZVE32F-NEXT: andi t1, t1, 255
+; RV64ZVE32F-NEXT: zext.b t1, t1
; RV64ZVE32F-NEXT: slli t1, t1, 3
; RV64ZVE32F-NEXT: add t1, a1, t1
; RV64ZVE32F-NEXT: ld t1, 0(t1)
@@ -4887,7 +4887,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: beqz t2, .LBB50_17
; RV64ZVE32F-NEXT: # %bb.15: # %cond.load16
; RV64ZVE32F-NEXT: vmv.x.s t2, v8
-; RV64ZVE32F-NEXT: andi t2, t2, 255
+; RV64ZVE32F-NEXT: zext.b t2, t2
; RV64ZVE32F-NEXT: slli t2, t2, 3
; RV64ZVE32F-NEXT: add t2, a1, t2
; RV64ZVE32F-NEXT: ld t2, 0(t2)
@@ -4903,7 +4903,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB50_18: # %cond.load19
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 3
; RV64ZVE32F-NEXT: add a1, a1, a2
; RV64ZVE32F-NEXT: ld a1, 0(a1)
@@ -7546,7 +7546,7 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: beqz a2, .LBB66_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lh a2, 0(a2)
@@ -7559,7 +7559,7 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lh a2, 0(a2)
@@ -7587,7 +7587,7 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lh a2, 0(a2)
@@ -7602,7 +7602,7 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: beqz a2, .LBB66_11
; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lh a2, 0(a2)
@@ -7617,7 +7617,7 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: andi a1, a1, 255
+; RV64ZVE32F-NEXT: zext.b a1, a1
; RV64ZVE32F-NEXT: slli a1, a1, 1
; RV64ZVE32F-NEXT: add a0, a0, a1
; RV64ZVE32F-NEXT: lh a0, 0(a0)
@@ -7631,7 +7631,7 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB66_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lh a2, 0(a2)
@@ -7645,7 +7645,7 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lh a2, 0(a2)
@@ -7658,7 +7658,7 @@ define <8 x bfloat> @mgather_baseidx_zext_v8i8_v8bf16(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB66_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lh a2, 0(a2)
@@ -8899,7 +8899,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB76_2
; RV64ZVE32F-ZVFH-NEXT: # %bb.1: # %cond.load
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFH-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2)
@@ -8912,7 +8912,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFH-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2)
@@ -8940,7 +8940,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v10, 1
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFH-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2)
@@ -8955,7 +8955,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB76_11
; RV64ZVE32F-ZVFH-NEXT: # %bb.10: # %cond.load16
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFH-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2)
@@ -8970,7 +8970,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, 255
+; RV64ZVE32F-ZVFH-NEXT: zext.b a1, a1
; RV64ZVE32F-ZVFH-NEXT: slli a1, a1, 1
; RV64ZVE32F-ZVFH-NEXT: add a0, a0, a1
; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a0)
@@ -8984,7 +8984,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: ret
; RV64ZVE32F-ZVFH-NEXT: .LBB76_14: # %cond.load4
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFH-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2)
@@ -8998,7 +8998,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFH-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2)
@@ -9011,7 +9011,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFH-NEXT: .LBB76_16: # %cond.load10
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFH-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFH-NEXT: flh fa5, 0(a2)
@@ -9031,7 +9031,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB76_2
; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.1: # %cond.load
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFHMIN-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2)
@@ -9044,7 +9044,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFHMIN-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2)
@@ -9072,7 +9072,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v10, 1
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFHMIN-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2)
@@ -9087,7 +9087,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: beqz a2, .LBB76_11
; RV64ZVE32F-ZVFHMIN-NEXT: # %bb.10: # %cond.load16
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFHMIN-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2)
@@ -9102,7 +9102,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, 255
+; RV64ZVE32F-ZVFHMIN-NEXT: zext.b a1, a1
; RV64ZVE32F-ZVFHMIN-NEXT: slli a1, a1, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a0, a0, a1
; RV64ZVE32F-ZVFHMIN-NEXT: lh a0, 0(a0)
@@ -9116,7 +9116,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: ret
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_14: # %cond.load4
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFHMIN-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2)
@@ -9130,7 +9130,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFHMIN-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2)
@@ -9143,7 +9143,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-ZVFHMIN-NEXT: .LBB76_16: # %cond.load10
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFHMIN-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFHMIN-NEXT: lh a2, 0(a2)
@@ -10067,7 +10067,7 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: beqz a2, .LBB86_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
@@ -10080,7 +10080,7 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
@@ -10108,7 +10108,7 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
@@ -10123,7 +10123,7 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: beqz a2, .LBB86_11
; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
@@ -10138,7 +10138,7 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: andi a1, a1, 255
+; RV64ZVE32F-NEXT: zext.b a1, a1
; RV64ZVE32F-NEXT: slli a1, a1, 2
; RV64ZVE32F-NEXT: add a0, a0, a1
; RV64ZVE32F-NEXT: flw fa5, 0(a0)
@@ -10152,7 +10152,7 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB86_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
@@ -10166,7 +10166,7 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
@@ -10179,7 +10179,7 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: .LBB86_16: # %cond.load10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
@@ -11836,7 +11836,7 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: beqz a3, .LBB99_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
; RV64ZVE32F-NEXT: vmv.x.s a3, v8
-; RV64ZVE32F-NEXT: andi a3, a3, 255
+; RV64ZVE32F-NEXT: zext.b a3, a3
; RV64ZVE32F-NEXT: slli a3, a3, 3
; RV64ZVE32F-NEXT: add a3, a1, a3
; RV64ZVE32F-NEXT: fld fa0, 0(a3)
@@ -11847,7 +11847,7 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a3, v9
-; RV64ZVE32F-NEXT: andi a3, a3, 255
+; RV64ZVE32F-NEXT: zext.b a3, a3
; RV64ZVE32F-NEXT: slli a3, a3, 3
; RV64ZVE32F-NEXT: add a3, a1, a3
; RV64ZVE32F-NEXT: fld fa1, 0(a3)
@@ -11870,7 +11870,7 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB99_8: # %cond.load13
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a3, v8
-; RV64ZVE32F-NEXT: andi a3, a3, 255
+; RV64ZVE32F-NEXT: zext.b a3, a3
; RV64ZVE32F-NEXT: slli a3, a3, 3
; RV64ZVE32F-NEXT: add a3, a1, a3
; RV64ZVE32F-NEXT: fld fa5, 0(a3)
@@ -11880,7 +11880,7 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: beqz a3, .LBB99_11
; RV64ZVE32F-NEXT: # %bb.10: # %cond.load16
; RV64ZVE32F-NEXT: vmv.x.s a3, v8
-; RV64ZVE32F-NEXT: andi a3, a3, 255
+; RV64ZVE32F-NEXT: zext.b a3, a3
; RV64ZVE32F-NEXT: slli a3, a3, 3
; RV64ZVE32F-NEXT: add a3, a1, a3
; RV64ZVE32F-NEXT: fld fa6, 0(a3)
@@ -11890,7 +11890,7 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 3
; RV64ZVE32F-NEXT: add a1, a1, a2
; RV64ZVE32F-NEXT: fld fa7, 0(a1)
@@ -11906,7 +11906,7 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB99_14: # %cond.load4
; RV64ZVE32F-NEXT: vmv.x.s a3, v8
-; RV64ZVE32F-NEXT: andi a3, a3, 255
+; RV64ZVE32F-NEXT: zext.b a3, a3
; RV64ZVE32F-NEXT: slli a3, a3, 3
; RV64ZVE32F-NEXT: add a3, a1, a3
; RV64ZVE32F-NEXT: fld fa2, 0(a3)
@@ -11915,7 +11915,7 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB99_15: # %cond.load7
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a3, v8
-; RV64ZVE32F-NEXT: andi a3, a3, 255
+; RV64ZVE32F-NEXT: zext.b a3, a3
; RV64ZVE32F-NEXT: slli a3, a3, 3
; RV64ZVE32F-NEXT: add a3, a1, a3
; RV64ZVE32F-NEXT: fld fa3, 0(a3)
@@ -11923,7 +11923,7 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: beqz a3, .LBB99_7
; RV64ZVE32F-NEXT: .LBB99_16: # %cond.load10
; RV64ZVE32F-NEXT: vmv.x.s a3, v9
-; RV64ZVE32F-NEXT: andi a3, a3, 255
+; RV64ZVE32F-NEXT: zext.b a3, a3
; RV64ZVE32F-NEXT: slli a3, a3, 3
; RV64ZVE32F-NEXT: add a3, a1, a3
; RV64ZVE32F-NEXT: fld fa4, 0(a3)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index 8f2672e8f40c1..4cd15f8a03d6d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -1281,7 +1281,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: beqz a2, .LBB20_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.store
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
@@ -1293,7 +1293,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
@@ -1319,7 +1319,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
@@ -1337,7 +1337,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB20_12: # %cond.store3
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
@@ -1349,7 +1349,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
@@ -1360,7 +1360,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: .LBB20_14: # %cond.store7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
@@ -1371,7 +1371,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: j .LBB20_9
; RV64ZVE32F-NEXT: .LBB20_15: # %cond.store11
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
@@ -1383,7 +1383,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a1, v9
-; RV64ZVE32F-NEXT: andi a1, a1, 255
+; RV64ZVE32F-NEXT: zext.b a1, a1
; RV64ZVE32F-NEXT: slli a1, a1, 1
; RV64ZVE32F-NEXT: add a0, a0, a1
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
@@ -2172,7 +2172,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: beqz a2, .LBB31_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.store
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
@@ -2184,7 +2184,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
@@ -2210,7 +2210,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -2229,7 +2229,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB31_12: # %cond.store3
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
@@ -2242,7 +2242,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
@@ -2253,7 +2253,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: .LBB31_14: # %cond.store7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -2265,7 +2265,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: j .LBB31_9
; RV64ZVE32F-NEXT: .LBB31_15: # %cond.store11
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -2278,7 +2278,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a1, v10
-; RV64ZVE32F-NEXT: andi a1, a1, 255
+; RV64ZVE32F-NEXT: zext.b a1, a1
; RV64ZVE32F-NEXT: slli a1, a1, 2
; RV64ZVE32F-NEXT: add a0, a0, a1
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4046,7 +4046,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: # %bb.1: # %cond.store
; RV64ZVE32F-NEXT: ld a0, 0(a0)
; RV64ZVE32F-NEXT: vmv.x.s t2, v8
-; RV64ZVE32F-NEXT: andi t2, t2, 255
+; RV64ZVE32F-NEXT: zext.b t2, t2
; RV64ZVE32F-NEXT: slli t2, t2, 3
; RV64ZVE32F-NEXT: add t2, a1, t2
; RV64ZVE32F-NEXT: sd a0, 0(t2)
@@ -4057,7 +4057,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a0, v9
-; RV64ZVE32F-NEXT: andi a0, a0, 255
+; RV64ZVE32F-NEXT: zext.b a0, a0
; RV64ZVE32F-NEXT: slli a0, a0, 3
; RV64ZVE32F-NEXT: add a0, a1, a0
; RV64ZVE32F-NEXT: sd t1, 0(a0)
@@ -4080,7 +4080,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: .LBB44_8: # %cond.store9
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a0, v8
-; RV64ZVE32F-NEXT: andi a0, a0, 255
+; RV64ZVE32F-NEXT: zext.b a0, a0
; RV64ZVE32F-NEXT: slli a0, a0, 3
; RV64ZVE32F-NEXT: add a0, a1, a0
; RV64ZVE32F-NEXT: sd a5, 0(a0)
@@ -4095,7 +4095,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB44_12: # %cond.store3
; RV64ZVE32F-NEXT: vmv.x.s a0, v8
-; RV64ZVE32F-NEXT: andi a0, a0, 255
+; RV64ZVE32F-NEXT: zext.b a0, a0
; RV64ZVE32F-NEXT: slli a0, a0, 3
; RV64ZVE32F-NEXT: add a0, a1, a0
; RV64ZVE32F-NEXT: sd t0, 0(a0)
@@ -4104,7 +4104,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: .LBB44_13: # %cond.store5
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a0, v8
-; RV64ZVE32F-NEXT: andi a0, a0, 255
+; RV64ZVE32F-NEXT: zext.b a0, a0
; RV64ZVE32F-NEXT: slli a0, a0, 3
; RV64ZVE32F-NEXT: add a0, a1, a0
; RV64ZVE32F-NEXT: sd a7, 0(a0)
@@ -4112,7 +4112,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: beqz a0, .LBB44_7
; RV64ZVE32F-NEXT: .LBB44_14: # %cond.store7
; RV64ZVE32F-NEXT: vmv.x.s a0, v9
-; RV64ZVE32F-NEXT: andi a0, a0, 255
+; RV64ZVE32F-NEXT: zext.b a0, a0
; RV64ZVE32F-NEXT: slli a0, a0, 3
; RV64ZVE32F-NEXT: add a0, a1, a0
; RV64ZVE32F-NEXT: sd a6, 0(a0)
@@ -4121,7 +4121,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: j .LBB44_9
; RV64ZVE32F-NEXT: .LBB44_15: # %cond.store11
; RV64ZVE32F-NEXT: vmv.x.s a0, v8
-; RV64ZVE32F-NEXT: andi a0, a0, 255
+; RV64ZVE32F-NEXT: zext.b a0, a0
; RV64ZVE32F-NEXT: slli a0, a0, 3
; RV64ZVE32F-NEXT: add a0, a1, a0
; RV64ZVE32F-NEXT: sd a3, 0(a0)
@@ -4130,7 +4130,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: .LBB44_16: # %cond.store13
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a0, v8
-; RV64ZVE32F-NEXT: andi a0, a0, 255
+; RV64ZVE32F-NEXT: zext.b a0, a0
; RV64ZVE32F-NEXT: slli a0, a0, 3
; RV64ZVE32F-NEXT: add a0, a1, a0
; RV64ZVE32F-NEXT: sd a2, 0(a0)
@@ -6630,7 +6630,7 @@ define void @mscatter_baseidx_zext_v8i8_v8bf16(<8 x bfloat> %val, ptr %base, <8
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a3, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: fmv.h.x fa5, a3
@@ -6645,7 +6645,7 @@ define void @mscatter_baseidx_zext_v8i8_v8bf16(<8 x bfloat> %val, ptr %base, <8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a3, v10
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: fmv.h.x fa5, a3
@@ -6673,7 +6673,7 @@ define void @mscatter_baseidx_zext_v8i8_v8bf16(<8 x bfloat> %val, ptr %base, <8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vmv.x.s a3, v9
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: fmv.h.x fa5, a3
@@ -6692,7 +6692,7 @@ define void @mscatter_baseidx_zext_v8i8_v8bf16(<8 x bfloat> %val, ptr %base, <8
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 2
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: vmv.x.s a3, v11
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -6707,7 +6707,7 @@ define void @mscatter_baseidx_zext_v8i8_v8bf16(<8 x bfloat> %val, ptr %base, <8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 3
; RV64ZVE32F-NEXT: vmv.x.s a3, v9
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: fmv.h.x fa5, a3
@@ -6719,7 +6719,7 @@ define void @mscatter_baseidx_zext_v8i8_v8bf16(<8 x bfloat> %val, ptr %base, <8
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: vmv.x.s a3, v9
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -6732,7 +6732,7 @@ define void @mscatter_baseidx_zext_v8i8_v8bf16(<8 x bfloat> %val, ptr %base, <8
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: vmv.x.s a3, v10
; RV64ZVE32F-NEXT: slli a2, a2, 1
; RV64ZVE32F-NEXT: add a2, a0, a2
@@ -6747,7 +6747,7 @@ define void @mscatter_baseidx_zext_v8i8_v8bf16(<8 x bfloat> %val, ptr %base, <8
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7
; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a1, v9
-; RV64ZVE32F-NEXT: andi a1, a1, 255
+; RV64ZVE32F-NEXT: zext.b a1, a1
; RV64ZVE32F-NEXT: slli a1, a1, 1
; RV64ZVE32F-NEXT: add a0, a0, a1
; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
@@ -7955,7 +7955,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-ZVFH-NEXT: beqz a2, .LBB70_2
; RV64ZVE32F-ZVFH-NEXT: # %bb.1: # %cond.store
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v9
-; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFH-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
@@ -7967,7 +7967,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v10, v9, 1
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFH-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
@@ -7993,7 +7993,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v9, v10, 1
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v9
-; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFH-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
@@ -8011,7 +8011,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-ZVFH-NEXT: ret
; RV64ZVE32F-ZVFH-NEXT: .LBB70_12: # %cond.store3
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v9
-; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFH-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
@@ -8023,7 +8023,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v9, v9, 1
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v9
-; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFH-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
@@ -8034,7 +8034,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-ZVFH-NEXT: .LBB70_14: # %cond.store7
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFH-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
@@ -8045,7 +8045,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-ZVFH-NEXT: j .LBB70_9
; RV64ZVE32F-ZVFH-NEXT: .LBB70_15: # %cond.store11
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a2, v9
-; RV64ZVE32F-ZVFH-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFH-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFH-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFH-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
@@ -8057,7 +8057,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-ZVFH-NEXT: vslidedown.vi v9, v9, 1
; RV64ZVE32F-ZVFH-NEXT: vmv.x.s a1, v9
-; RV64ZVE32F-ZVFH-NEXT: andi a1, a1, 255
+; RV64ZVE32F-ZVFH-NEXT: zext.b a1, a1
; RV64ZVE32F-ZVFH-NEXT: slli a1, a1, 1
; RV64ZVE32F-ZVFH-NEXT: add a0, a0, a1
; RV64ZVE32F-ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
@@ -8075,7 +8075,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a3, v8
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFHMIN-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFHMIN-NEXT: fmv.h.x fa5, a3
@@ -8090,7 +8090,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v10, v8, 1
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a3, v10
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFHMIN-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFHMIN-NEXT: fmv.h.x fa5, a3
@@ -8118,7 +8118,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a3, v9
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFHMIN-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFHMIN-NEXT: fmv.h.x fa5, a3
@@ -8137,7 +8137,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v11, v8, 2
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFHMIN-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a3, v11
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
@@ -8152,7 +8152,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v9, v8, 3
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a3, v9
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFHMIN-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
; RV64ZVE32F-ZVFHMIN-NEXT: fmv.h.x fa5, a3
@@ -8164,7 +8164,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFHMIN-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a3, v9
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
@@ -8177,7 +8177,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v10, v8, 6
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a2, a2, 255
+; RV64ZVE32F-ZVFHMIN-NEXT: zext.b a2, a2
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a3, v10
; RV64ZVE32F-ZVFHMIN-NEXT: slli a2, a2, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a2, a0, a2
@@ -8192,7 +8192,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-ZVFHMIN-NEXT: vslidedown.vi v8, v8, 7
; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV64ZVE32F-ZVFHMIN-NEXT: vmv.x.s a1, v9
-; RV64ZVE32F-ZVFHMIN-NEXT: andi a1, a1, 255
+; RV64ZVE32F-ZVFHMIN-NEXT: zext.b a1, a1
; RV64ZVE32F-ZVFHMIN-NEXT: slli a1, a1, 1
; RV64ZVE32F-ZVFHMIN-NEXT: add a0, a0, a1
; RV64ZVE32F-ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
@@ -9052,7 +9052,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: beqz a2, .LBB80_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.store
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
@@ -9064,7 +9064,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
@@ -9090,7 +9090,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -9109,7 +9109,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB80_12: # %cond.store3
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
@@ -9122,7 +9122,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
@@ -9133,7 +9133,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: .LBB80_14: # %cond.store7
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -9145,7 +9145,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: j .LBB80_9
; RV64ZVE32F-NEXT: .LBB80_15: # %cond.store11
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -9158,7 +9158,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1
; RV64ZVE32F-NEXT: vmv.x.s a1, v10
-; RV64ZVE32F-NEXT: andi a1, a1, 255
+; RV64ZVE32F-NEXT: zext.b a1, a1
; RV64ZVE32F-NEXT: slli a1, a1, 2
; RV64ZVE32F-NEXT: add a0, a0, a1
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -10669,7 +10669,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: beqz a2, .LBB93_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.store
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 3
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: fsd fa0, 0(a2)
@@ -10680,7 +10680,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 3
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: fsd fa1, 0(a2)
@@ -10703,7 +10703,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: .LBB93_8: # %cond.store9
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 3
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: fsd fa5, 0(a2)
@@ -10718,7 +10718,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: ret
; RV64ZVE32F-NEXT: .LBB93_12: # %cond.store3
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 3
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: fsd fa2, 0(a2)
@@ -10727,7 +10727,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: .LBB93_13: # %cond.store5
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 3
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: fsd fa3, 0(a2)
@@ -10735,7 +10735,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: beqz a2, .LBB93_7
; RV64ZVE32F-NEXT: .LBB93_14: # %cond.store7
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 3
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: fsd fa4, 0(a2)
@@ -10744,7 +10744,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: j .LBB93_9
; RV64ZVE32F-NEXT: .LBB93_15: # %cond.store11
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
-; RV64ZVE32F-NEXT: andi a2, a2, 255
+; RV64ZVE32F-NEXT: zext.b a2, a2
; RV64ZVE32F-NEXT: slli a2, a2, 3
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: fsd fa6, 0(a2)
@@ -10753,7 +10753,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: .LBB93_16: # %cond.store13
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a1, v8
-; RV64ZVE32F-NEXT: andi a1, a1, 255
+; RV64ZVE32F-NEXT: zext.b a1, a1
; RV64ZVE32F-NEXT: slli a1, a1, 3
; RV64ZVE32F-NEXT: add a0, a0, a1
; RV64ZVE32F-NEXT: fsd fa7, 0(a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
index 52dd87068b0c8..722a1186facab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
@@ -1828,7 +1828,7 @@ define zeroext i8 @front_ele_v4i8(<4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vredand.vs v8, v8, v8, v0.t
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: ret
%s = extractelement <4 x i8> %v, i64 0
%r = call i8 @llvm.vp.reduce.and.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl)
@@ -1843,7 +1843,7 @@ define zeroext i8 @front_ele_v32i8(<32 x i8> %v, <32 x i1> %m, i32 zeroext %evl)
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vredand.vs v8, v8, v8, v0.t
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: ret
%s = extractelement <32 x i8> %v, i64 0
%r = call i8 @llvm.vp.reduce.and.v32i8(i8 %s, <32 x i8> %v, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll b/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
index 5e657a93ec0d6..919c2fd518578 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
@@ -24,7 +24,7 @@ define i32 @test(i32 %size, ptr %add.ptr, i64 %const) {
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV32-NEXT: vmseq.vi v9, v9, 0
; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: andi a3, a0, 255
+; RV32-NEXT: zext.b a3, a0
; RV32-NEXT: mv a0, a2
; RV32-NEXT: bnez a3, .LBB0_1
; RV32-NEXT: # %bb.2: # %if.then381
@@ -51,7 +51,7 @@ define i32 @test(i32 %size, ptr %add.ptr, i64 %const) {
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64-NEXT: vmseq.vi v9, v9, 0
; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: andi a3, a0, 255
+; RV64-NEXT: zext.b a3, a0
; RV64-NEXT: mv a0, a2
; RV64-NEXT: bnez a3, .LBB0_1
; RV64-NEXT: # %bb.2: # %if.then381
diff --git a/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll b/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
index 6c68051ff38c3..6bed05dcda154 100644
--- a/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
@@ -43,7 +43,7 @@ define void @memset_2(ptr %a, i8 %value) nounwind {
;
; RV32-FAST-LABEL: memset_2:
; RV32-FAST: # %bb.0:
-; RV32-FAST-NEXT: andi a2, a1, 255
+; RV32-FAST-NEXT: zext.b a2, a1
; RV32-FAST-NEXT: slli a1, a1, 8
; RV32-FAST-NEXT: or a1, a1, a2
; RV32-FAST-NEXT: sh a1, 0(a0)
@@ -51,7 +51,7 @@ define void @memset_2(ptr %a, i8 %value) nounwind {
;
; RV64-FAST-LABEL: memset_2:
; RV64-FAST: # %bb.0:
-; RV64-FAST-NEXT: andi a2, a1, 255
+; RV64-FAST-NEXT: zext.b a2, a1
; RV64-FAST-NEXT: slli a1, a1, 8
; RV64-FAST-NEXT: or a1, a1, a2
; RV64-FAST-NEXT: sh a1, 0(a0)
@@ -79,7 +79,7 @@ define void @memset_4(ptr %a, i8 %value) nounwind {
;
; RV32-FAST-LABEL: memset_4:
; RV32-FAST: # %bb.0:
-; RV32-FAST-NEXT: andi a1, a1, 255
+; RV32-FAST-NEXT: zext.b a1, a1
; RV32-FAST-NEXT: lui a2, 4112
; RV32-FAST-NEXT: addi a2, a2, 257
; RV32-FAST-NEXT: mul a1, a1, a2
@@ -126,7 +126,7 @@ define void @memset_8(ptr %a, i8 %value) nounwind {
;
; RV32-FAST-LABEL: memset_8:
; RV32-FAST: # %bb.0:
-; RV32-FAST-NEXT: andi a1, a1, 255
+; RV32-FAST-NEXT: zext.b a1, a1
; RV32-FAST-NEXT: lui a2, 4112
; RV32-FAST-NEXT: addi a2, a2, 257
; RV32-FAST-NEXT: mul a1, a1, a2
@@ -136,7 +136,7 @@ define void @memset_8(ptr %a, i8 %value) nounwind {
;
; RV64-FAST-LABEL: memset_8:
; RV64-FAST: # %bb.0:
-; RV64-FAST-NEXT: andi a1, a1, 255
+; RV64-FAST-NEXT: zext.b a1, a1
; RV64-FAST-NEXT: lui a2, 4112
; RV64-FAST-NEXT: addiw a2, a2, 257
; RV64-FAST-NEXT: slli a3, a2, 32
@@ -223,7 +223,7 @@ define void @memset_64(ptr %a, i8 %value) nounwind {
define void @aligned_memset_2(ptr align 2 %a, i8 %value) nounwind {
; RV32-BOTH-LABEL: aligned_memset_2:
; RV32-BOTH: # %bb.0:
-; RV32-BOTH-NEXT: andi a2, a1, 255
+; RV32-BOTH-NEXT: zext.b a2, a1
; RV32-BOTH-NEXT: slli a1, a1, 8
; RV32-BOTH-NEXT: or a1, a1, a2
; RV32-BOTH-NEXT: sh a1, 0(a0)
@@ -231,7 +231,7 @@ define void @aligned_memset_2(ptr align 2 %a, i8 %value) nounwind {
;
; RV64-BOTH-LABEL: aligned_memset_2:
; RV64-BOTH: # %bb.0:
-; RV64-BOTH-NEXT: andi a2, a1, 255
+; RV64-BOTH-NEXT: zext.b a2, a1
; RV64-BOTH-NEXT: slli a1, a1, 8
; RV64-BOTH-NEXT: or a1, a1, a2
; RV64-BOTH-NEXT: sh a1, 0(a0)
@@ -243,7 +243,7 @@ define void @aligned_memset_2(ptr align 2 %a, i8 %value) nounwind {
define void @aligned_memset_4(ptr align 4 %a, i8 %value) nounwind {
; RV32-BOTH-LABEL: aligned_memset_4:
; RV32-BOTH: # %bb.0:
-; RV32-BOTH-NEXT: andi a1, a1, 255
+; RV32-BOTH-NEXT: zext.b a1, a1
; RV32-BOTH-NEXT: lui a2, 4112
; RV32-BOTH-NEXT: addi a2, a2, 257
; RV32-BOTH-NEXT: mul a1, a1, a2
@@ -266,7 +266,7 @@ define void @aligned_memset_4(ptr align 4 %a, i8 %value) nounwind {
define void @aligned_memset_8(ptr align 8 %a, i8 %value) nounwind {
; RV32-BOTH-LABEL: aligned_memset_8:
; RV32-BOTH: # %bb.0:
-; RV32-BOTH-NEXT: andi a1, a1, 255
+; RV32-BOTH-NEXT: zext.b a1, a1
; RV32-BOTH-NEXT: lui a2, 4112
; RV32-BOTH-NEXT: addi a2, a2, 257
; RV32-BOTH-NEXT: mul a1, a1, a2
@@ -276,7 +276,7 @@ define void @aligned_memset_8(ptr align 8 %a, i8 %value) nounwind {
;
; RV64-BOTH-LABEL: aligned_memset_8:
; RV64-BOTH: # %bb.0:
-; RV64-BOTH-NEXT: andi a1, a1, 255
+; RV64-BOTH-NEXT: zext.b a1, a1
; RV64-BOTH-NEXT: lui a2, 4112
; RV64-BOTH-NEXT: addiw a2, a2, 257
; RV64-BOTH-NEXT: slli a3, a2, 32
diff --git a/llvm/test/CodeGen/RISCV/rvv/vec3-setcc-crash.ll b/llvm/test/CodeGen/RISCV/rvv/vec3-setcc-crash.ll
index 8e3cedfbeeb03..3740737ba2989 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vec3-setcc-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vec3-setcc-crash.ll
@@ -30,7 +30,7 @@ define void @vec3_setcc_crash(ptr %in, ptr %out) {
; RV32-NEXT: and a0, a4, a0
; RV32-NEXT: and a2, a5, a2
; RV32-NEXT: slli a3, a3, 8
-; RV32-NEXT: andi a0, a0, 255
+; RV32-NEXT: zext.b a0, a0
; RV32-NEXT: or a0, a0, a3
; RV32-NEXT: sh a0, 0(a1)
; RV32-NEXT: sb a2, 2(a1)
@@ -57,7 +57,7 @@ define void @vec3_setcc_crash(ptr %in, ptr %out) {
; RV64-NEXT: and a0, a4, a0
; RV64-NEXT: and a2, a5, a2
; RV64-NEXT: slli a3, a3, 8
-; RV64-NEXT: andi a0, a0, 255
+; RV64-NEXT: zext.b a0, a0
; RV64-NEXT: or a0, a0, a3
; RV64-NEXT: sh a0, 0(a1)
; RV64-NEXT: sb a2, 2(a1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll b/llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll
index 16074250a8351..1df4076aa2069 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll
@@ -15,7 +15,7 @@ define i8 @extract_last_i8(<16 x i8> %data, <16 x i8> %mask, i8 %passthru) {
; CHECK-NEXT: vid.v v9, v0.t
; CHECK-NEXT: vredmaxu.vs v9, v9, v9
; CHECK-NEXT: vmv.x.s a0, v9
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: .LBB0_2:
@@ -38,7 +38,7 @@ define i16 @extract_last_i16(<8 x i16> %data, <8 x i16> %mask, i16 %passthru) {
; CHECK-NEXT: vid.v v9, v0.t
; CHECK-NEXT: vredmaxu.vs v9, v9, v9
; CHECK-NEXT: vmv.x.s a0, v9
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vmv.x.s a0, v8
@@ -62,7 +62,7 @@ define i32 @extract_last_i32(<4 x i32> %data, <4 x i32> %mask, i32 %passthru) {
; CHECK-NEXT: vid.v v9, v0.t
; CHECK-NEXT: vredmaxu.vs v9, v9, v9
; CHECK-NEXT: vmv.x.s a0, v9
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vmv.x.s a0, v8
@@ -87,7 +87,7 @@ define i64 @extract_last_i64(<2 x i64> %data, <2 x i64> %mask, i64 %passthru) {
; RV32-NEXT: vid.v v9, v0.t
; RV32-NEXT: vredmaxu.vs v9, v9, v9
; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: andi a0, a0, 255
+; RV32-NEXT: zext.b a0, a0
; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; RV32-NEXT: vslidedown.vx v8, v8, a0
; RV32-NEXT: vmv.x.s a0, v8
@@ -109,7 +109,7 @@ define i64 @extract_last_i64(<2 x i64> %data, <2 x i64> %mask, i64 %passthru) {
; RV64-NEXT: vid.v v9, v0.t
; RV64-NEXT: vredmaxu.vs v9, v9, v9
; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: andi a0, a0, 255
+; RV64-NEXT: zext.b a0, a0
; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; RV64-NEXT: vslidedown.vx v8, v8, a0
; RV64-NEXT: vmv.x.s a0, v8
@@ -133,7 +133,7 @@ define float @extract_last_float(<4 x float> %data, <4 x i32> %mask, float %pass
; CHECK-NEXT: vid.v v9, v0.t
; CHECK-NEXT: vredmaxu.vs v9, v9, v9
; CHECK-NEXT: vmv.x.s a0, v9
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vfmv.f.s fa0, v8
@@ -157,7 +157,7 @@ define double @extract_last_double(<2 x double> %data, <2 x i64> %mask, double %
; CHECK-NEXT: vid.v v9, v0.t
; CHECK-NEXT: vredmaxu.vs v9, v9, v9
; CHECK-NEXT: vmv.x.s a0, v9
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vfmv.f.s fa0, v8
@@ -180,7 +180,7 @@ define i8 @extract_last_i8_scalable(<vscale x 16 x i8> %data, <vscale x 16 x i1>
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: vredmaxu.vs v10, v10, v10
; CHECK-NEXT: vmv.x.s a0, v10
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vmv.x.s a0, v8
@@ -202,7 +202,7 @@ define i16 @extract_last_i16_scalable(<vscale x 8 x i16> %data, <vscale x 8 x i1
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: vredmaxu.vs v10, v10, v10
; CHECK-NEXT: vmv.x.s a0, v10
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vmv.x.s a0, v8
@@ -224,7 +224,7 @@ define i32 @extract_last_i32_scalable(<vscale x 4 x i32> %data, <vscale x 4 x i1
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: vredmaxu.vs v10, v10, v10
; CHECK-NEXT: vmv.x.s a0, v10
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vmv.x.s a0, v8
@@ -247,7 +247,7 @@ define i64 @extract_last_i64_scalable(<vscale x 2 x i64> %data, <vscale x 2 x i1
; RV32-NEXT: vid.v v10, v0.t
; RV32-NEXT: vredmaxu.vs v10, v10, v10
; RV32-NEXT: vmv.x.s a0, v10
-; RV32-NEXT: andi a0, a0, 255
+; RV32-NEXT: zext.b a0, a0
; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vx v8, v8, a0
; RV32-NEXT: vmv.x.s a0, v8
@@ -268,7 +268,7 @@ define i64 @extract_last_i64_scalable(<vscale x 2 x i64> %data, <vscale x 2 x i1
; RV64-NEXT: vid.v v10, v0.t
; RV64-NEXT: vredmaxu.vs v10, v10, v10
; RV64-NEXT: vmv.x.s a0, v10
-; RV64-NEXT: andi a0, a0, 255
+; RV64-NEXT: zext.b a0, a0
; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vx v8, v8, a0
; RV64-NEXT: vmv.x.s a0, v8
@@ -290,7 +290,7 @@ define float @extract_last_float_scalable(<vscale x 4 x float> %data, <vscale x
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: vredmaxu.vs v10, v10, v10
; CHECK-NEXT: vmv.x.s a0, v10
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vfmv.f.s fa0, v8
@@ -312,7 +312,7 @@ define double @extract_last_double_scalable(<vscale x 2 x double> %data, <vscale
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: vredmaxu.vs v10, v10, v10
; CHECK-NEXT: vmv.x.s a0, v10
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vfmv.f.s fa0, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll
index 39d73bed25926..27d76bf41912e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll
@@ -438,7 +438,7 @@ define i64 @vsetvl_e32mf2_and8bits(i64 %avl) {
; CHECK-LABEL: vsetvl_e32mf2_and8bits:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, a0, e32, mf8, ta, ma
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: ret
%a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 5)
%b = and i64 %a, 255
@@ -608,7 +608,7 @@ define i64 @vsetvl_e64mf4_and8bits(i64 %avl) {
; CHECK-LABEL: vsetvl_e64mf4_and8bits:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, a0, e64, mf4, ta, ma
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: ret
%a = call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 6)
%b = and i64 %a, 255
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvlmax-ext.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvlmax-ext.ll
index b2a676dc0daf4..09162b55c7079 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvlmax-ext.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvlmax-ext.ll
@@ -427,7 +427,7 @@ define i64 @vsetvlmax_e32mf2_and8bits() {
; CHECK-LABEL: vsetvlmax_e32mf2_and8bits:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf8, ta, ma
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: ret
%a = call i64 @llvm.riscv.vsetvlimax(i64 2, i64 5)
%b = and i64 %a, 255
@@ -597,7 +597,7 @@ define i64 @vsetvlmax_e64mf4_and8bits() {
; CHECK-LABEL: vsetvlmax_e64mf4_and8bits:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, mf4, ta, ma
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: ret
%a = call i64 @llvm.riscv.vsetvlimax(i64 3, i64 6)
%b = and i64 %a, 255
diff --git a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
index 87f2a6306bd60..bdbe4ed216919 100644
--- a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
+++ b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
@@ -252,12 +252,12 @@ define i64 @zext_i1_to_i64(i1 %a) nounwind {
define i16 @zext_i8_to_i16(i8 %a) nounwind {
; RV32I-LABEL: zext_i8_to_i16:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: ret
;
; RV64-LABEL: zext_i8_to_i16:
; RV64: # %bb.0:
-; RV64-NEXT: andi a0, a0, 255
+; RV64-NEXT: zext.b a0, a0
; RV64-NEXT: ret
%1 = zext i8 %a to i16
ret i16 %1
@@ -266,12 +266,12 @@ define i16 @zext_i8_to_i16(i8 %a) nounwind {
define i32 @zext_i8_to_i32(i8 %a) nounwind {
; RV32I-LABEL: zext_i8_to_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: ret
;
; RV64-LABEL: zext_i8_to_i32:
; RV64: # %bb.0:
-; RV64-NEXT: andi a0, a0, 255
+; RV64-NEXT: zext.b a0, a0
; RV64-NEXT: ret
%1 = zext i8 %a to i32
ret i32 %1
@@ -280,13 +280,13 @@ define i32 @zext_i8_to_i32(i8 %a) nounwind {
define i64 @zext_i8_to_i64(i8 %a) nounwind {
; RV32I-LABEL: zext_i8_to_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: ret
;
; RV64-LABEL: zext_i8_to_i64:
; RV64: # %bb.0:
-; RV64-NEXT: andi a0, a0, 255
+; RV64-NEXT: zext.b a0, a0
; RV64-NEXT: ret
%1 = zext i8 %a to i64
ret i64 %1
@@ -410,12 +410,12 @@ define i64 @zext_nneg_i1_to_i64(i1 %a) nounwind {
define i16 @zext_nneg_i8_to_i16(i8 %a) nounwind {
; RV32I-LABEL: zext_nneg_i8_to_i16:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: ret
;
; RV64-LABEL: zext_nneg_i8_to_i16:
; RV64: # %bb.0:
-; RV64-NEXT: andi a0, a0, 255
+; RV64-NEXT: zext.b a0, a0
; RV64-NEXT: ret
%1 = zext nneg i8 %a to i16
ret i16 %1
@@ -424,12 +424,12 @@ define i16 @zext_nneg_i8_to_i16(i8 %a) nounwind {
define i32 @zext_nneg_i8_to_i32(i8 %a) nounwind {
; RV32I-LABEL: zext_nneg_i8_to_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: ret
;
; RV64-LABEL: zext_nneg_i8_to_i32:
; RV64: # %bb.0:
-; RV64-NEXT: andi a0, a0, 255
+; RV64-NEXT: zext.b a0, a0
; RV64-NEXT: ret
%1 = zext nneg i8 %a to i32
ret i32 %1
@@ -438,13 +438,13 @@ define i32 @zext_nneg_i8_to_i32(i8 %a) nounwind {
define i64 @zext_nneg_i8_to_i64(i8 %a) nounwind {
; RV32I-LABEL: zext_nneg_i8_to_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: ret
;
; RV64-LABEL: zext_nneg_i8_to_i64:
; RV64: # %bb.0:
-; RV64-NEXT: andi a0, a0, 255
+; RV64-NEXT: zext.b a0, a0
; RV64-NEXT: ret
%1 = zext nneg i8 %a to i64
ret i64 %1
diff --git a/llvm/test/CodeGen/RISCV/simplify-condbr.ll b/llvm/test/CodeGen/RISCV/simplify-condbr.ll
index 3f9a73607103a..6dabd7d93cbc1 100644
--- a/llvm/test/CodeGen/RISCV/simplify-condbr.ll
+++ b/llvm/test/CodeGen/RISCV/simplify-condbr.ll
@@ -20,7 +20,7 @@ define fastcc i32 @S_regrepeat(ptr %startposp, i32 %max, i8 %0, i1 %cmp343) noun
; CHECK-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; CHECK-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; CHECK-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
-; CHECK-NEXT: andi a2, a2, 255
+; CHECK-NEXT: zext.b a2, a2
; CHECK-NEXT: addi a4, a2, -19
; CHECK-NEXT: li a5, 2
; CHECK-NEXT: mv a0, a1
diff --git a/llvm/test/CodeGen/RISCV/split-store.ll b/llvm/test/CodeGen/RISCV/split-store.ll
index a9d29f1840cfc..4aab869561a2d 100644
--- a/llvm/test/CodeGen/RISCV/split-store.ll
+++ b/llvm/test/CodeGen/RISCV/split-store.ll
@@ -94,14 +94,14 @@ define void @int16_float_pair(i16 signext %tmp1, float %tmp2, ptr %ref.tmp) {
define void @int8_float_pair(i8 signext %tmp1, float %tmp2, ptr %ref.tmp) {
; RV32-RV64-LABEL: int8_float_pair:
; RV32-RV64: # %bb.0:
-; RV32-RV64-NEXT: andi a0, a0, 255
+; RV32-RV64-NEXT: zext.b a0, a0
; RV32-RV64-NEXT: sw a0, 0(a2)
; RV32-RV64-NEXT: sw a1, 4(a2)
; RV32-RV64-NEXT: ret
;
; RV32D-RV64D-LABEL: int8_float_pair:
; RV32D-RV64D: # %bb.0:
-; RV32D-RV64D-NEXT: andi a0, a0, 255
+; RV32D-RV64D-NEXT: zext.b a0, a0
; RV32D-RV64D-NEXT: sw a0, 0(a1)
; RV32D-RV64D-NEXT: fsw fa0, 4(a1)
; RV32D-RV64D-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll b/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
index ae1aabed49805..3007c3574cf78 100644
--- a/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
+++ b/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
@@ -93,7 +93,7 @@ define i32 @overflow_add_no_consts(i8 zeroext %a, i8 zeroext %b, i8 zeroext %lim
; CHECK-LABEL: overflow_add_no_consts:
; CHECK: # %bb.0:
; CHECK-NEXT: add a0, a1, a0
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: bltu a2, a0, .LBB4_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
@@ -111,7 +111,7 @@ define i32 @overflow_add_const_limit(i8 zeroext %a, i8 zeroext %b) {
; CHECK-LABEL: overflow_add_const_limit:
; CHECK: # %bb.0:
; CHECK-NEXT: add a0, a1, a0
-; CHECK-NEXT: andi a0, a0, 255
+; CHECK-NEXT: zext.b a0, a0
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: bltu a1, a0, .LBB5_2
; CHECK-NEXT: # %bb.1:
@@ -378,7 +378,7 @@ define i8 @underflow_if_sub_signext(i32 %arg, i8 signext %arg1) {
; CHECK-LABEL: underflow_if_sub_signext:
; CHECK: # %bb.0:
; CHECK-NEXT: sext.w a2, a0
-; CHECK-NEXT: andi a1, a1, 255
+; CHECK-NEXT: zext.b a1, a1
; CHECK-NEXT: sgtz a2, a2
; CHECK-NEXT: and a0, a2, a0
; CHECK-NEXT: addi a0, a0, 245
diff --git a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
index 409114f8a9612..65b3d763fd72f 100644
--- a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
+++ b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
@@ -159,9 +159,9 @@ define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
; RV32I-LABEL: func8:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: mul a1, a1, a2
-; RV32I-NEXT: andi a1, a1, 255
+; RV32I-NEXT: zext.b a1, a1
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: li a1, 255
; RV32I-NEXT: bltu a0, a1, .LBB3_2
@@ -172,9 +172,9 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
;
; RV64I-LABEL: func8:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: andi a1, a1, 255
+; RV64I-NEXT: zext.b a1, a1
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: li a1, 255
; RV64I-NEXT: bltu a0, a1, .LBB3_2
@@ -185,9 +185,9 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
;
; RV32IZbb-LABEL: func8:
; RV32IZbb: # %bb.0:
-; RV32IZbb-NEXT: andi a0, a0, 255
+; RV32IZbb-NEXT: zext.b a0, a0
; RV32IZbb-NEXT: mul a1, a1, a2
-; RV32IZbb-NEXT: andi a1, a1, 255
+; RV32IZbb-NEXT: zext.b a1, a1
; RV32IZbb-NEXT: add a0, a0, a1
; RV32IZbb-NEXT: li a1, 255
; RV32IZbb-NEXT: minu a0, a0, a1
@@ -195,9 +195,9 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
;
; RV64IZbb-LABEL: func8:
; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: andi a0, a0, 255
+; RV64IZbb-NEXT: zext.b a0, a0
; RV64IZbb-NEXT: mul a1, a1, a2
-; RV64IZbb-NEXT: andi a1, a1, 255
+; RV64IZbb-NEXT: zext.b a1, a1
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: li a1, 255
; RV64IZbb-NEXT: minu a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll
index b09ff9805eb97..aa42568c539ba 100644
--- a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll
+++ b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll
@@ -156,9 +156,9 @@ define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
; RV32I-LABEL: func8:
; RV32I: # %bb.0:
-; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: zext.b a0, a0
; RV32I-NEXT: mul a1, a1, a2
-; RV32I-NEXT: andi a1, a1, 255
+; RV32I-NEXT: zext.b a1, a1
; RV32I-NEXT: sub a1, a0, a1
; RV32I-NEXT: sltu a0, a0, a1
; RV32I-NEXT: addi a0, a0, -1
@@ -167,9 +167,9 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
;
; RV64I-LABEL: func8:
; RV64I: # %bb.0:
-; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: zext.b a0, a0
; RV64I-NEXT: mul a1, a1, a2
-; RV64I-NEXT: andi a1, a1, 255
+; RV64I-NEXT: zext.b a1, a1
; RV64I-NEXT: sub a1, a0, a1
; RV64I-NEXT: sltu a0, a0, a1
; RV64I-NEXT: addi a0, a0, -1
@@ -178,18 +178,18 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
;
; RV32IZbb-LABEL: func8:
; RV32IZbb: # %bb.0:
-; RV32IZbb-NEXT: andi a0, a0, 255
+; RV32IZbb-NEXT: zext.b a0, a0
; RV32IZbb-NEXT: mul a1, a1, a2
-; RV32IZbb-NEXT: andi a1, a1, 255
+; RV32IZbb-NEXT: zext.b a1, a1
; RV32IZbb-NEXT: maxu a0, a0, a1
; RV32IZbb-NEXT: sub a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func8:
; RV64IZbb: # %bb.0:
-; RV64IZbb-NEXT: andi a0, a0, 255
+; RV64IZbb-NEXT: zext.b a0, a0
; RV64IZbb-NEXT: mul a1, a1, a2
-; RV64IZbb-NEXT: andi a1, a1, 255
+; RV64IZbb-NEXT: zext.b a1, a1
; RV64IZbb-NEXT: maxu a0, a0, a1
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/zbb-logic-neg-imm.ll b/llvm/test/CodeGen/RISCV/zbb-logic-neg-imm.ll
index d953d34e2d7b9..449e983fb6b52 100644
--- a/llvm/test/CodeGen/RISCV/zbb-logic-neg-imm.ll
+++ b/llvm/test/CodeGen/RISCV/zbb-logic-neg-imm.ll
@@ -325,7 +325,7 @@ define i64 @andnofff(i64 %x) {
; RV32: # %bb.0:
; RV32-NEXT: lui a2, 1044480
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: andi a0, a0, 255
+; RV32-NEXT: zext.b a0, a0
; RV32-NEXT: ret
;
; RV64-LABEL: andnofff:
diff --git a/llvm/test/CodeGen/RISCV/zcb-regalloc-hints.ll b/llvm/test/CodeGen/RISCV/zcb-regalloc-hints.ll
index 545d6c6aca041..6b47f0c46caeb 100644
--- a/llvm/test/CodeGen/RISCV/zcb-regalloc-hints.ll
+++ b/llvm/test/CodeGen/RISCV/zcb-regalloc-hints.ll
@@ -52,7 +52,7 @@ define i64 @c_sext_h(i64 %x, i16 %y, i64 %z) {
define i64 @c_zext_b(i64 %x, i8 %y, i64 %z) {
; CHECK-LABEL: c_zext_b:
; CHECK: # %bb.0:
-; CHECK-NEXT: andi a1, a1, 255
+; CHECK-NEXT: zext.b a1, a1
; CHECK-NEXT: lui a0, 1
; CHECK-NEXT: or a0, a0, a1
; CHECK-NEXT: ret
diff --git a/llvm/test/MC/RISCV/rv32i-aliases-valid.s b/llvm/test/MC/RISCV/rv32i-aliases-valid.s
index 20deda4ec5eaf..43e29da0a38d8 100644
--- a/llvm/test/MC/RISCV/rv32i-aliases-valid.s
+++ b/llvm/test/MC/RISCV/rv32i-aliases-valid.s
@@ -254,7 +254,7 @@ sext.b x10, x11
sext.h x10, x11
# CHECK-INST: andi a0, a1, 255
-# CHECK-ALIAS: andi a0, a1, 255
+# CHECK-ALIAS: zext.b a0, a1
zext.b x10, x11
# CHECK-EXPAND: slli a0, a1, 16
diff --git a/llvm/test/MC/RISCV/rv64i-aliases-valid.s b/llvm/test/MC/RISCV/rv64i-aliases-valid.s
index dde8dbe43a6ce..b7029225be2ab 100644
--- a/llvm/test/MC/RISCV/rv64i-aliases-valid.s
+++ b/llvm/test/MC/RISCV/rv64i-aliases-valid.s
@@ -484,7 +484,7 @@ sext.b x10, x11
sext.h x10, x11
# CHECK-INST: andi a0, a1, 255
-# CHECK-ALIAS: andi a0, a1, 255
+# CHECK-ALIAS: zext.b a0, a1
zext.b x10, x11
# CHECK-EXPAND: slli a0, a1, 48
More information about the llvm-commits
mailing list