[llvm] c58be85 - [SVE] Prefer zero-extending loads when lowering ISD::EXTLOAD.
Paul Walker via llvm-commits
llvm-commits at lists.llvm.org
Thu Feb 10 06:33:54 PST 2022
Author: Paul Walker
Date: 2022-02-10T14:30:28Z
New Revision: c58be8572001253870e0c268585cb1322dcaa74b
URL: https://github.com/llvm/llvm-project/commit/c58be8572001253870e0c268585cb1322dcaa74b
DIFF: https://github.com/llvm/llvm-project/commit/c58be8572001253870e0c268585cb1322dcaa74b.diff
LOG: [SVE] Prefer zero-extending loads when lowering ISD::EXTLOAD.
The decision is perhaps arbitrary but I figure zeroing has no
dependency on the value being loaded.
Differential Revision: https://reviews.llvm.org/D119327
Added:
Modified:
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/lib/Target/AArch64/AArch64InstrInfo.td
llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll
llvm/test/CodeGen/AArch64/sve-fixed-length-mask-opt.ll
llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll
llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll
llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll
llvm/test/CodeGen/AArch64/sve-split-load.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d31a0e2a88cb2..41c042cac69a3 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -4604,7 +4604,6 @@ SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
bool IdxNeedsExtend =
getGatherScatterIndexIsExtended(Index) ||
Index.getSimpleValueType().getVectorElementType() == MVT::i32;
- bool ResNeedsSignExtend = ExtTy == ISD::EXTLOAD || ExtTy == ISD::SEXTLOAD;
EVT VT = PassThru.getSimpleValueType();
EVT IndexVT = Index.getSimpleValueType();
@@ -4652,7 +4651,7 @@ SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
selectGatherScatterAddrMode(BasePtr, Index, MemVT, Opcode,
/*isGather=*/true, DAG);
- if (ResNeedsSignExtend)
+ if (ExtTy == ISD::SEXTLOAD)
Opcode = getSignExtendedGatherOpcode(Opcode);
if (IsFixedLength) {
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 42c1afcff6a90..788043b916f0e 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -350,49 +350,49 @@ def nonext_masked_load :
cast<MaskedLoadSDNode>(N)->isUnindexed() &&
!cast<MaskedLoadSDNode>(N)->isNonTemporal();
}]>;
-// sign extending masked load fragments.
-def asext_masked_load :
+// Any/Zero extending masked load fragments.
+def azext_masked_load :
PatFrag<(ops node:$ptr, node:$pred, node:$def),
(masked_ld node:$ptr, undef, node:$pred, node:$def),[{
return (cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD ||
- cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD) &&
+ cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD) &&
cast<MaskedLoadSDNode>(N)->isUnindexed();
}]>;
-def asext_masked_load_i8 :
+def azext_masked_load_i8 :
PatFrag<(ops node:$ptr, node:$pred, node:$def),
- (asext_masked_load node:$ptr, node:$pred, node:$def), [{
+ (azext_masked_load node:$ptr, node:$pred, node:$def), [{
return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
}]>;
-def asext_masked_load_i16 :
+def azext_masked_load_i16 :
PatFrag<(ops node:$ptr, node:$pred, node:$def),
- (asext_masked_load node:$ptr, node:$pred, node:$def), [{
+ (azext_masked_load node:$ptr, node:$pred, node:$def), [{
return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
}]>;
-def asext_masked_load_i32 :
+def azext_masked_load_i32 :
PatFrag<(ops node:$ptr, node:$pred, node:$def),
- (asext_masked_load node:$ptr, node:$pred, node:$def), [{
+ (azext_masked_load node:$ptr, node:$pred, node:$def), [{
return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
}]>;
-// zero extending masked load fragments.
-def zext_masked_load :
+// Sign extending masked load fragments.
+def sext_masked_load :
PatFrag<(ops node:$ptr, node:$pred, node:$def),
(masked_ld node:$ptr, undef, node:$pred, node:$def), [{
- return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD &&
+ return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD &&
cast<MaskedLoadSDNode>(N)->isUnindexed();
}]>;
-def zext_masked_load_i8 :
+def sext_masked_load_i8 :
PatFrag<(ops node:$ptr, node:$pred, node:$def),
- (zext_masked_load node:$ptr, node:$pred, node:$def), [{
+ (sext_masked_load node:$ptr, node:$pred, node:$def), [{
return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
}]>;
-def zext_masked_load_i16 :
+def sext_masked_load_i16 :
PatFrag<(ops node:$ptr, node:$pred, node:$def),
- (zext_masked_load node:$ptr, node:$pred, node:$def), [{
+ (sext_masked_load node:$ptr, node:$pred, node:$def), [{
return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
}]>;
-def zext_masked_load_i32 :
+def sext_masked_load_i32 :
PatFrag<(ops node:$ptr, node:$pred, node:$def),
- (zext_masked_load node:$ptr, node:$pred, node:$def), [{
+ (sext_masked_load node:$ptr, node:$pred, node:$def), [{
return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
}]>;
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 6d5f56006a2fd..06486a66de20b 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -2150,12 +2150,12 @@ let Predicates = [HasSVEorStreamingSVE] in {
}
// 2-element contiguous loads
- defm : pred_load<nxv2i64, nxv2i1, zext_masked_load_i8, LD1B_D, LD1B_D_IMM, am_sve_regreg_lsl0>;
- defm : pred_load<nxv2i64, nxv2i1, asext_masked_load_i8, LD1SB_D, LD1SB_D_IMM, am_sve_regreg_lsl0>;
- defm : pred_load<nxv2i64, nxv2i1, zext_masked_load_i16, LD1H_D, LD1H_D_IMM, am_sve_regreg_lsl1>;
- defm : pred_load<nxv2i64, nxv2i1, asext_masked_load_i16, LD1SH_D, LD1SH_D_IMM, am_sve_regreg_lsl1>;
- defm : pred_load<nxv2i64, nxv2i1, zext_masked_load_i32, LD1W_D, LD1W_D_IMM, am_sve_regreg_lsl2>;
- defm : pred_load<nxv2i64, nxv2i1, asext_masked_load_i32, LD1SW_D, LD1SW_D_IMM, am_sve_regreg_lsl2>;
+ defm : pred_load<nxv2i64, nxv2i1, azext_masked_load_i8, LD1B_D, LD1B_D_IMM, am_sve_regreg_lsl0>;
+ defm : pred_load<nxv2i64, nxv2i1, sext_masked_load_i8, LD1SB_D, LD1SB_D_IMM, am_sve_regreg_lsl0>;
+ defm : pred_load<nxv2i64, nxv2i1, azext_masked_load_i16, LD1H_D, LD1H_D_IMM, am_sve_regreg_lsl1>;
+ defm : pred_load<nxv2i64, nxv2i1, sext_masked_load_i16, LD1SH_D, LD1SH_D_IMM, am_sve_regreg_lsl1>;
+ defm : pred_load<nxv2i64, nxv2i1, azext_masked_load_i32, LD1W_D, LD1W_D_IMM, am_sve_regreg_lsl2>;
+ defm : pred_load<nxv2i64, nxv2i1, sext_masked_load_i32, LD1SW_D, LD1SW_D_IMM, am_sve_regreg_lsl2>;
defm : pred_load<nxv2i64, nxv2i1, nonext_masked_load, LD1D, LD1D_IMM, am_sve_regreg_lsl3>;
defm : pred_load<nxv2f16, nxv2i1, nonext_masked_load, LD1H_D, LD1H_D_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv2bf16, nxv2i1, nonext_masked_load, LD1H_D, LD1H_D_IMM, am_sve_regreg_lsl1>;
@@ -2163,18 +2163,18 @@ let Predicates = [HasSVEorStreamingSVE] in {
defm : pred_load<nxv2f64, nxv2i1, nonext_masked_load, LD1D, LD1D_IMM, am_sve_regreg_lsl3>;
// 4-element contiguous loads
- defm : pred_load<nxv4i32, nxv4i1, zext_masked_load_i8, LD1B_S, LD1B_S_IMM, am_sve_regreg_lsl0>;
- defm : pred_load<nxv4i32, nxv4i1, asext_masked_load_i8, LD1SB_S, LD1SB_S_IMM, am_sve_regreg_lsl0>;
- defm : pred_load<nxv4i32, nxv4i1, zext_masked_load_i16, LD1H_S, LD1H_S_IMM, am_sve_regreg_lsl1>;
- defm : pred_load<nxv4i32, nxv4i1, asext_masked_load_i16, LD1SH_S, LD1SH_S_IMM, am_sve_regreg_lsl1>;
+ defm : pred_load<nxv4i32, nxv4i1, azext_masked_load_i8, LD1B_S, LD1B_S_IMM, am_sve_regreg_lsl0>;
+ defm : pred_load<nxv4i32, nxv4i1, sext_masked_load_i8, LD1SB_S, LD1SB_S_IMM, am_sve_regreg_lsl0>;
+ defm : pred_load<nxv4i32, nxv4i1, azext_masked_load_i16, LD1H_S, LD1H_S_IMM, am_sve_regreg_lsl1>;
+ defm : pred_load<nxv4i32, nxv4i1, sext_masked_load_i16, LD1SH_S, LD1SH_S_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv4i32, nxv4i1, nonext_masked_load, LD1W, LD1W_IMM, am_sve_regreg_lsl2>;
defm : pred_load<nxv4f16, nxv4i1, nonext_masked_load, LD1H_S, LD1H_S_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv4bf16, nxv4i1, nonext_masked_load, LD1H_S, LD1H_S_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv4f32, nxv4i1, nonext_masked_load, LD1W, LD1W_IMM, am_sve_regreg_lsl2>;
// 8-element contiguous loads
- defm : pred_load<nxv8i16, nxv8i1, zext_masked_load_i8, LD1B_H, LD1B_H_IMM, am_sve_regreg_lsl0>;
- defm : pred_load<nxv8i16, nxv8i1, asext_masked_load_i8, LD1SB_H, LD1SB_H_IMM, am_sve_regreg_lsl0>;
+ defm : pred_load<nxv8i16, nxv8i1, azext_masked_load_i8, LD1B_H, LD1B_H_IMM, am_sve_regreg_lsl0>;
+ defm : pred_load<nxv8i16, nxv8i1, sext_masked_load_i8, LD1SB_H, LD1SB_H_IMM, am_sve_regreg_lsl0>;
defm : pred_load<nxv8i16, nxv8i1, nonext_masked_load, LD1H, LD1H_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv8f16, nxv8i1, nonext_masked_load, LD1H, LD1H_IMM, am_sve_regreg_lsl1>;
defm : pred_load<nxv8bf16, nxv8i1, nonext_masked_load, LD1H, LD1H_IMM, am_sve_regreg_lsl1>;
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll
index 12a1a0088fe19..51342d80ce565 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll
@@ -56,7 +56,7 @@ define void @fcvt_v8f16_v8f32(<8 x half>* %a, <8 x float>* %b) #0 {
; CHECK-LABEL: fcvt_v8f16_v8f32:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s, vl8
-; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
; CHECK-NEXT: fcvt z0.s, p0/m, z0.h
; CHECK-NEXT: st1w { z0.s }, p0, [x1]
; CHECK-NEXT: ret
@@ -72,8 +72,8 @@ define void @fcvt_v16f16_v16f32(<16 x half>* %a, <16 x float>* %b) #0 {
; VBITS_EQ_256: // %bb.0:
; VBITS_EQ_256-NEXT: mov x8, #8
; VBITS_EQ_256-NEXT: ptrue p0.s, vl8
-; VBITS_EQ_256-NEXT: ld1sh { z0.s }, p0/z, [x0, x8, lsl #1]
-; VBITS_EQ_256-NEXT: ld1sh { z1.s }, p0/z, [x0]
+; VBITS_EQ_256-NEXT: ld1h { z0.s }, p0/z, [x0, x8, lsl #1]
+; VBITS_EQ_256-NEXT: ld1h { z1.s }, p0/z, [x0]
; VBITS_EQ_256-NEXT: fcvt z0.s, p0/m, z0.h
; VBITS_EQ_256-NEXT: fcvt z1.s, p0/m, z1.h
; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2]
@@ -83,7 +83,7 @@ define void @fcvt_v16f16_v16f32(<16 x half>* %a, <16 x float>* %b) #0 {
; VBITS_GE_512-LABEL: fcvt_v16f16_v16f32:
; VBITS_GE_512: // %bb.0:
; VBITS_GE_512-NEXT: ptrue p0.s, vl16
-; VBITS_GE_512-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT: ld1h { z0.s }, p0/z, [x0]
; VBITS_GE_512-NEXT: fcvt z0.s, p0/m, z0.h
; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x1]
; VBITS_GE_512-NEXT: ret
@@ -98,7 +98,7 @@ define void @fcvt_v32f16_v32f32(<32 x half>* %a, <32 x float>* %b) #0 {
; VBITS_GE_1024-LABEL: fcvt_v32f16_v32f32:
; VBITS_GE_1024: // %bb.0:
; VBITS_GE_1024-NEXT: ptrue p0.s, vl32
-; VBITS_GE_1024-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT: ld1h { z0.s }, p0/z, [x0]
; VBITS_GE_1024-NEXT: fcvt z0.s, p0/m, z0.h
; VBITS_GE_1024-NEXT: st1w { z0.s }, p0, [x1]
; VBITS_GE_1024-NEXT: ret
@@ -112,7 +112,7 @@ define void @fcvt_v64f16_v64f32(<64 x half>* %a, <64 x float>* %b) #0 {
; VBITS_GE_2048-LABEL: fcvt_v64f16_v64f32:
; VBITS_GE_2048: // %bb.0:
; VBITS_GE_2048-NEXT: ptrue p0.s, vl64
-; VBITS_GE_2048-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT: ld1h { z0.s }, p0/z, [x0]
; VBITS_GE_2048-NEXT: fcvt z0.s, p0/m, z0.h
; VBITS_GE_2048-NEXT: st1w { z0.s }, p0, [x1]
; VBITS_GE_2048-NEXT: ret
@@ -161,7 +161,7 @@ define void @fcvt_v4f16_v4f64(<4 x half>* %a, <4 x double>* %b) #0 {
; CHECK-LABEL: fcvt_v4f16_v4f64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d, vl4
-; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
; CHECK-NEXT: fcvt z0.d, p0/m, z0.h
; CHECK-NEXT: st1d { z0.d }, p0, [x1]
; CHECK-NEXT: ret
@@ -176,8 +176,8 @@ define void @fcvt_v8f16_v8f64(<8 x half>* %a, <8 x double>* %b) #0 {
; VBITS_EQ_256: // %bb.0:
; VBITS_EQ_256-NEXT: mov x8, #4
; VBITS_EQ_256-NEXT: ptrue p0.d, vl4
-; VBITS_EQ_256-NEXT: ld1sh { z0.d }, p0/z, [x0, x8, lsl #1]
-; VBITS_EQ_256-NEXT: ld1sh { z1.d }, p0/z, [x0]
+; VBITS_EQ_256-NEXT: ld1h { z0.d }, p0/z, [x0, x8, lsl #1]
+; VBITS_EQ_256-NEXT: ld1h { z1.d }, p0/z, [x0]
; VBITS_EQ_256-NEXT: fcvt z0.d, p0/m, z0.h
; VBITS_EQ_256-NEXT: fcvt z1.d, p0/m, z1.h
; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x1, x8, lsl #3]
@@ -187,7 +187,7 @@ define void @fcvt_v8f16_v8f64(<8 x half>* %a, <8 x double>* %b) #0 {
; VBITS_GE_512-LABEL: fcvt_v8f16_v8f64:
; VBITS_GE_512: // %bb.0:
; VBITS_GE_512-NEXT: ptrue p0.d, vl8
-; VBITS_GE_512-NEXT: ld1sh { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT: ld1h { z0.d }, p0/z, [x0]
; VBITS_GE_512-NEXT: fcvt z0.d, p0/m, z0.h
; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x1]
; VBITS_GE_512-NEXT: ret
@@ -202,7 +202,7 @@ define void @fcvt_v16f16_v16f64(<16 x half>* %a, <16 x double>* %b) #0 {
; VBITS_GE_1024-LABEL: fcvt_v16f16_v16f64:
; VBITS_GE_1024: // %bb.0:
; VBITS_GE_1024-NEXT: ptrue p0.d, vl16
-; VBITS_GE_1024-NEXT: ld1sh { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT: ld1h { z0.d }, p0/z, [x0]
; VBITS_GE_1024-NEXT: fcvt z0.d, p0/m, z0.h
; VBITS_GE_1024-NEXT: st1d { z0.d }, p0, [x1]
; VBITS_GE_1024-NEXT: ret
@@ -216,7 +216,7 @@ define void @fcvt_v32f16_v32f64(<32 x half>* %a, <32 x double>* %b) #0 {
; VBITS_GE_2048-LABEL: fcvt_v32f16_v32f64:
; VBITS_GE_2048: // %bb.0:
; VBITS_GE_2048-NEXT: ptrue p0.d, vl32
-; VBITS_GE_2048-NEXT: ld1sh { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT: ld1h { z0.d }, p0/z, [x0]
; VBITS_GE_2048-NEXT: fcvt z0.d, p0/m, z0.h
; VBITS_GE_2048-NEXT: st1d { z0.d }, p0, [x1]
; VBITS_GE_2048-NEXT: ret
@@ -262,7 +262,7 @@ define void @fcvt_v4f32_v4f64(<4 x float>* %a, <4 x double>* %b) #0 {
; CHECK-LABEL: fcvt_v4f32_v4f64:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d, vl4
-; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
; CHECK-NEXT: fcvt z0.d, p0/m, z0.s
; CHECK-NEXT: st1d { z0.d }, p0, [x1]
; CHECK-NEXT: ret
@@ -278,8 +278,8 @@ define void @fcvt_v8f32_v8f64(<8 x float>* %a, <8 x double>* %b) #0 {
; VBITS_EQ_256: // %bb.0:
; VBITS_EQ_256-NEXT: mov x8, #4
; VBITS_EQ_256-NEXT: ptrue p0.d, vl4
-; VBITS_EQ_256-NEXT: ld1sw { z0.d }, p0/z, [x0, x8, lsl #2]
-; VBITS_EQ_256-NEXT: ld1sw { z1.d }, p0/z, [x0]
+; VBITS_EQ_256-NEXT: ld1w { z0.d }, p0/z, [x0, x8, lsl #2]
+; VBITS_EQ_256-NEXT: ld1w { z1.d }, p0/z, [x0]
; VBITS_EQ_256-NEXT: fcvt z0.d, p0/m, z0.s
; VBITS_EQ_256-NEXT: fcvt z1.d, p0/m, z1.s
; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x1, x8, lsl #3]
@@ -289,7 +289,7 @@ define void @fcvt_v8f32_v8f64(<8 x float>* %a, <8 x double>* %b) #0 {
; VBITS_GE_512-LABEL: fcvt_v8f32_v8f64:
; VBITS_GE_512: // %bb.0:
; VBITS_GE_512-NEXT: ptrue p0.d, vl8
-; VBITS_GE_512-NEXT: ld1sw { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT: ld1w { z0.d }, p0/z, [x0]
; VBITS_GE_512-NEXT: fcvt z0.d, p0/m, z0.s
; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x1]
; VBITS_GE_512-NEXT: ret
@@ -303,7 +303,7 @@ define void @fcvt_v16f32_v16f64(<16 x float>* %a, <16 x double>* %b) #0 {
; VBITS_GE_1024-LABEL: fcvt_v16f32_v16f64:
; VBITS_GE_1024: // %bb.0:
; VBITS_GE_1024-NEXT: ptrue p0.d, vl16
-; VBITS_GE_1024-NEXT: ld1sw { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT: ld1w { z0.d }, p0/z, [x0]
; VBITS_GE_1024-NEXT: fcvt z0.d, p0/m, z0.s
; VBITS_GE_1024-NEXT: st1d { z0.d }, p0, [x1]
; VBITS_GE_1024-NEXT: ret
@@ -317,7 +317,7 @@ define void @fcvt_v32f32_v32f64(<32 x float>* %a, <32 x double>* %b) #0 {
; VBITS_GE_2048-LABEL: fcvt_v32f32_v32f64:
; VBITS_GE_2048: // %bb.0:
; VBITS_GE_2048-NEXT: ptrue p0.d, vl32
-; VBITS_GE_2048-NEXT: ld1sw { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT: ld1w { z0.d }, p0/z, [x0]
; VBITS_GE_2048-NEXT: fcvt z0.d, p0/m, z0.s
; VBITS_GE_2048-NEXT: st1d { z0.d }, p0, [x1]
; VBITS_GE_2048-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-mask-opt.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-mask-opt.ll
index 6d7609d244a3c..8e8500348be07 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-mask-opt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-mask-opt.ll
@@ -25,7 +25,7 @@ define void @masked_gather_v2i8(<2 x i8>* %a, <2 x i8*>* %b) #0 {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x1]
; CHECK-NEXT: ptrue p0.d, vl2
-; CHECK-NEXT: ld1sb { z0.d }, p0/z, [z0.d]
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ptrue p0.s, vl2
; CHECK-NEXT: xtn v0.2s, v0.2d
; CHECK-NEXT: st1b { z0.s }, p0, [x0]
@@ -41,7 +41,7 @@ define void @masked_gather_v4i8(<4 x i8>* %a, <4 x i8*>* %b) #0 {
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d, vl4
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x1]
-; CHECK-NEXT: ld1sb { z0.d }, p0/z, [z0.d]
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ptrue p0.h, vl4
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
@@ -60,8 +60,8 @@ define void @masked_gather_v8i8(<8 x i8>* %a, <8 x i8*>* %b) #0 {
; VBITS_EQ_256-NEXT: ptrue p0.d, vl4
; VBITS_EQ_256-NEXT: ld1d { z0.d }, p0/z, [x1, x8, lsl #3]
; VBITS_EQ_256-NEXT: ld1d { z1.d }, p0/z, [x1]
-; VBITS_EQ_256-NEXT: ld1sb { z0.d }, p0/z, [z0.d]
-; VBITS_EQ_256-NEXT: ld1sb { z1.d }, p0/z, [z1.d]
+; VBITS_EQ_256-NEXT: ld1b { z0.d }, p0/z, [z0.d]
+; VBITS_EQ_256-NEXT: ld1b { z1.d }, p0/z, [z1.d]
; VBITS_EQ_256-NEXT: uzp1 z0.s, z0.s, z0.s
; VBITS_EQ_256-NEXT: uzp1 z1.s, z1.s, z1.s
; VBITS_EQ_256-NEXT: uzp1 z0.h, z0.h, z0.h
@@ -134,7 +134,7 @@ define void @masked_gather_v2i16(<2 x i16>* %a, <2 x i16*>* %b) #0 {
; CHECK: // %bb.0:
; CHECK-NEXT: ldr q0, [x1]
; CHECK-NEXT: ptrue p0.d, vl2
-; CHECK-NEXT: ld1sh { z0.d }, p0/z, [z0.d]
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ptrue p0.s, vl2
; CHECK-NEXT: xtn v0.2s, v0.2d
; CHECK-NEXT: st1h { z0.s }, p0, [x0]
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
index 95ae416573b8c..b79db68bd3db9 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
@@ -36,7 +36,7 @@ define void @masked_gather_v2i8(<2 x i8>* %a, <2 x i8*>* %b) #0 {
; CHECK-NEXT: cmeq v0.2s, v0.2s, #0
; CHECK-NEXT: sshll v0.2d, v0.2s, #0
; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0
-; CHECK-NEXT: ld1sb { z0.d }, p0/z, [z1.d]
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [z1.d]
; CHECK-NEXT: ptrue p0.s, vl2
; CHECK-NEXT: xtn v0.2s, v0.2d
; CHECK-NEXT: st1b { z0.s }, p0, [x0]
@@ -60,7 +60,7 @@ define void @masked_gather_v4i8(<4 x i8>* %a, <4 x i8*>* %b) #0 {
; CHECK-NEXT: sunpklo z0.s, z0.h
; CHECK-NEXT: sunpklo z0.d, z0.s
; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0
-; CHECK-NEXT: ld1sb { z0.d }, p0/z, [z1.d]
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [z1.d]
; CHECK-NEXT: ptrue p0.h, vl4
; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
@@ -96,8 +96,8 @@ define void @masked_gather_v8i8(<8 x i8>* %a, <8 x i8*>* %b) #0 {
; VBITS_EQ_256-NEXT: sunpklo z0.d, z0.s
; VBITS_EQ_256-NEXT: cmpne p1.d, p0/z, z1.d, #0
; VBITS_EQ_256-NEXT: cmpne p0.d, p0/z, z0.d, #0
-; VBITS_EQ_256-NEXT: ld1sb { z0.d }, p1/z, [z2.d]
-; VBITS_EQ_256-NEXT: ld1sb { z1.d }, p0/z, [z3.d]
+; VBITS_EQ_256-NEXT: ld1b { z0.d }, p1/z, [z2.d]
+; VBITS_EQ_256-NEXT: ld1b { z1.d }, p0/z, [z3.d]
; VBITS_EQ_256-NEXT: uzp1 z0.s, z0.s, z0.s
; VBITS_EQ_256-NEXT: uzp1 z1.s, z1.s, z1.s
; VBITS_EQ_256-NEXT: uzp1 z0.h, z0.h, z0.h
@@ -196,7 +196,7 @@ define void @masked_gather_v2i16(<2 x i16>* %a, <2 x i16*>* %b) #0 {
; CHECK-NEXT: cmeq v0.2s, v0.2s, #0
; CHECK-NEXT: sshll v0.2d, v0.2s, #0
; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0
-; CHECK-NEXT: ld1sh { z0.d }, p0/z, [z1.d]
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [z1.d]
; CHECK-NEXT: ptrue p0.s, vl2
; CHECK-NEXT: xtn v0.2s, v0.2d
; CHECK-NEXT: st1h { z0.s }, p0, [x0]
diff --git a/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll b/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
index 7da4240052211..dd5a1264f8700 100644
--- a/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
+++ b/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
@@ -50,7 +50,7 @@ define <vscale x 4 x i8> @gather_i8_index_offset_8(i8* %base, i64 %offset, <vsca
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, x1
; CHECK-NEXT: index z0.s, #0, #1
-; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x8, z0.s, sxtw]
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x8, z0.s, sxtw]
; CHECK-NEXT: ret
%splat.insert0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
%splat0 = shufflevector <vscale x 4 x i64> %splat.insert0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
@@ -209,7 +209,7 @@ define <vscale x 4 x i8> @gather_8i8_index_offset_8([8 x i8]* %base, i64 %offset
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, x1, lsl #3
; CHECK-NEXT: index z0.s, #0, #8
-; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x8, z0.s, sxtw]
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x8, z0.s, sxtw]
; CHECK-NEXT: ret
%t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
%t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
index 58438e4fb457c..8244b5fa805b2 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
@@ -9,7 +9,7 @@ target triple = "aarch64-linux-gnu"
define <vscale x 2 x i64> @masked_sgather_sext(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask, <vscale x 2 x i8> %vals) #0 {
; CHECK-LABEL: masked_sgather_sext:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0, z0.d]
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, z0.d]
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: movprfx z2, z0
; CHECK-NEXT: sxtb z2.d, p0/m, z0.d
@@ -29,7 +29,7 @@ define <vscale x 2 x i64> @masked_sgather_sext(i8* %base, <vscale x 2 x i64> %of
define <vscale x 2 x i64> @masked_sgather_zext(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask, <vscale x 2 x i8> %vals) #0 {
; CHECK-LABEL: masked_sgather_zext:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0, z0.d]
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, z0.d]
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: add z1.d, z0.d, z1.d
; CHECK-NEXT: and z0.d, z0.d, #0xff
@@ -51,7 +51,7 @@ define <vscale x 2 x i64> @masked_sgather_zext(i8* %base, <vscale x 2 x i64> %of
define <vscale x 2 x i8> @masked_gather_nxv2i8(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %mask) #0 {
; CHECK-LABEL: masked_gather_nxv2i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1sb { z0.d }, p0/z, [z0.d]
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ret
%data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
ret <vscale x 2 x i8> %data
@@ -61,7 +61,7 @@ define <vscale x 2 x i8> @masked_gather_nxv2i8(<vscale x 2 x i8*> %ptrs, <vscale
define <vscale x 2 x i16> @masked_gather_nxv2i16(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %mask) #0 {
; CHECK-LABEL: masked_gather_nxv2i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1sh { z0.d }, p0/z, [z0.d]
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ret
%data = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
ret <vscale x 2 x i16> %data
@@ -71,7 +71,7 @@ define <vscale x 2 x i16> @masked_gather_nxv2i16(<vscale x 2 x i16*> %ptrs, <vsc
define <vscale x 2 x i32> @masked_gather_nxv2i32(<vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %mask) #0 {
; CHECK-LABEL: masked_gather_nxv2i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1sw { z0.d }, p0/z, [z0.d]
+; CHECK-NEXT: ld1w { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ret
%data = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
ret <vscale x 2 x i32> %data
@@ -181,8 +181,8 @@ define <vscale x 16 x i8> @masked_gather_nxv16i8(i8* %base, <vscale x 16 x i8> %
; CHECK-NEXT: punpkhi p2.h, p1.b
; CHECK-NEXT: sunpklo z1.s, z1.h
; CHECK-NEXT: punpklo p1.h, p1.b
-; CHECK-NEXT: ld1sb { z2.s }, p2/z, [x0, z2.s, sxtw]
-; CHECK-NEXT: ld1sb { z1.s }, p1/z, [x0, z1.s, sxtw]
+; CHECK-NEXT: ld1b { z2.s }, p2/z, [x0, z2.s, sxtw]
+; CHECK-NEXT: ld1b { z1.s }, p1/z, [x0, z1.s, sxtw]
; CHECK-NEXT: sunpklo z0.h, z0.b
; CHECK-NEXT: punpklo p0.h, p0.b
; CHECK-NEXT: punpkhi p1.h, p0.b
@@ -190,8 +190,8 @@ define <vscale x 16 x i8> @masked_gather_nxv16i8(i8* %base, <vscale x 16 x i8> %
; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h
; CHECK-NEXT: sunpkhi z2.s, z0.h
; CHECK-NEXT: sunpklo z0.s, z0.h
-; CHECK-NEXT: ld1sb { z2.s }, p1/z, [x0, z2.s, sxtw]
-; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0, z0.s, sxtw]
+; CHECK-NEXT: ld1b { z2.s }, p1/z, [x0, z2.s, sxtw]
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, z0.s, sxtw]
; CHECK-NEXT: uzp1 z0.h, z0.h, z2.h
; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b
; CHECK-NEXT: ret
@@ -239,8 +239,8 @@ define <vscale x 4 x i32> @masked_sgather_nxv4i8(<vscale x 4 x i8*> %ptrs, <vsca
; CHECK: // %bb.0:
; CHECK-NEXT: punpkhi p1.h, p0.b
; CHECK-NEXT: punpklo p0.h, p0.b
-; CHECK-NEXT: ld1sb { z1.d }, p1/z, [z1.d]
-; CHECK-NEXT: ld1sb { z0.d }, p0/z, [z0.d]
+; CHECK-NEXT: ld1b { z1.d }, p1/z, [z1.d]
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [z0.d]
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
; CHECK-NEXT: sxtb z0.s, p0/m, z0.s
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll b/llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll
index 70b4f5b1b0534..5c4e744ba108e 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll
@@ -120,7 +120,7 @@ define <vscale x 4 x i32> @masked_load_passthru(<vscale x 4 x i32> *%a, <vscale
; Masked load requires promotion
define <vscale x 2 x i16> @masked_load_nxv2i16(<vscale x 2 x i16>* noalias %in, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: masked_load_nxv2i16
-; CHECK: ld1sh { z0.d }, p0/z, [x0]
+; CHECK: ld1h { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%wide.load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %in, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
ret <vscale x 2 x i16> %wide.load
diff --git a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll
index 7909efdcadc00..c85779b99b048 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll
@@ -32,7 +32,7 @@ define void @imm_out_of_range(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mas
define void @test_masked_ldst_sv2i8(<vscale x 2 x i8> * %base, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: test_masked_ldst_sv2i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0, #-8, mul vl]
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, #-8, mul vl]
; CHECK-NEXT: st1b { z0.d }, p0, [x0, #-7, mul vl]
; CHECK-NEXT: ret
%base_load = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base, i64 -8
@@ -51,7 +51,7 @@ define void @test_masked_ldst_sv2i8(<vscale x 2 x i8> * %base, <vscale x 2 x i1>
define void @test_masked_ldst_sv2i16(<vscale x 2 x i16> * %base, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: test_masked_ldst_sv2i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, #-8, mul vl]
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #-8, mul vl]
; CHECK-NEXT: st1h { z0.d }, p0, [x0, #-7, mul vl]
; CHECK-NEXT: ret
%base_load = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base, i64 -8
@@ -71,7 +71,7 @@ define void @test_masked_ldst_sv2i16(<vscale x 2 x i16> * %base, <vscale x 2 x i
define void @test_masked_ldst_sv2i32(<vscale x 2 x i32> * %base, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: test_masked_ldst_sv2i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, #-8, mul vl]
+; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, #-8, mul vl]
; CHECK-NEXT: st1w { z0.d }, p0, [x0, #-7, mul vl]
; CHECK-NEXT: ret
%base_load = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base, i64 -8
@@ -300,7 +300,7 @@ define void @masked_trunc_store_sv2i64_to_sv2i32(<vscale x 2 x i64> %val, <vscal
define void @test_masked_ldst_sv4i8(<vscale x 4 x i8> * %base, <vscale x 4 x i1> %mask) nounwind {
; CHECK-LABEL: test_masked_ldst_sv4i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0, #-1, mul vl]
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, #-1, mul vl]
; CHECK-NEXT: st1b { z0.s }, p0, [x0, #2, mul vl]
; CHECK-NEXT: ret
%base_load = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base, i64 -1
@@ -319,7 +319,7 @@ define void @test_masked_ldst_sv4i8(<vscale x 4 x i8> * %base, <vscale x 4 x i1>
define void @test_masked_ldst_sv4i16(<vscale x 4 x i16> * %base, <vscale x 4 x i1> %mask) nounwind {
; CHECK-LABEL: test_masked_ldst_sv4i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0, #-1, mul vl]
+; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, #-1, mul vl]
; CHECK-NEXT: st1h { z0.s }, p0, [x0, #2, mul vl]
; CHECK-NEXT: ret
%base_load = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base, i64 -1
@@ -486,7 +486,7 @@ define void @masked_trunc_store_sv4i32_to_sv4i16(<vscale x 4 x i32> %val, <vscal
define void @test_masked_ldst_sv8i8(<vscale x 8 x i8> * %base, <vscale x 8 x i1> %mask) nounwind {
; CHECK-LABEL: test_masked_ldst_sv8i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0, #6, mul vl]
+; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0, #6, mul vl]
; CHECK-NEXT: st1b { z0.h }, p0, [x0, #7, mul vl]
; CHECK-NEXT: ret
%base_load = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base, i64 6
diff --git a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll
index 8186bf472c6f6..a8bfe75bac6e2 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll
@@ -4,7 +4,7 @@
define void @test_masked_ldst_sv2i8(i8 * %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
; CHECK-LABEL: test_masked_ldst_sv2i8:
-; CHECK-NEXT: ld1sb { z[[DATA:[0-9]+]].d }, p0/z, [x0, x1]
+; CHECK-NEXT: ld1b { z[[DATA:[0-9]+]].d }, p0/z, [x0, x1]
; CHECK-NEXT: st1b { z[[DATA]].d }, p0, [x0, x1]
; CHECK-NEXT: ret
%base_i8 = getelementptr i8, i8* %base, i64 %offset
@@ -22,7 +22,7 @@ define void @test_masked_ldst_sv2i8(i8 * %base, <vscale x 2 x i1> %mask, i64 %of
define void @test_masked_ldst_sv2i16(i16 * %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
; CHECK-LABEL: test_masked_ldst_sv2i16:
-; CHECK-NEXT: ld1sh { z[[DATA:[0-9]+]].d }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ld1h { z[[DATA:[0-9]+]].d }, p0/z, [x0, x1, lsl #1]
; CHECK-NEXT: st1h { z[[DATA]].d }, p0, [x0, x1, lsl #1]
; CHECK-NEXT: ret
%base_i16 = getelementptr i16, i16* %base, i64 %offset
@@ -40,7 +40,7 @@ define void @test_masked_ldst_sv2i16(i16 * %base, <vscale x 2 x i1> %mask, i64 %
define void @test_masked_ldst_sv2i32(i32 * %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
; CHECK-LABEL: test_masked_ldst_sv2i32:
-; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, x1, lsl #2]
; CHECK-NEXT: st1w { z0.d }, p0, [x0, x1, lsl #2]
; CHECK-NEXT: ret
%base_i32 = getelementptr i32, i32* %base, i64 %offset
@@ -263,7 +263,7 @@ define void @masked_trunc_store_sv2i64_to_sv2i32(<vscale x 2 x i64> %val, i32 *%
define void @test_masked_ldst_sv4i8(i8 * %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
; CHECK-LABEL: test_masked_ldst_sv4i8:
-; CHECK-NEXT: ld1sb { z[[DATA:[0-9]+]].s }, p0/z, [x0, x1]
+; CHECK-NEXT: ld1b { z[[DATA:[0-9]+]].s }, p0/z, [x0, x1]
; CHECK-NEXT: st1b { z[[DATA]].s }, p0, [x0, x1]
; CHECK-NEXT: ret
%base_i8 = getelementptr i8, i8* %base, i64 %offset
@@ -281,7 +281,7 @@ define void @test_masked_ldst_sv4i8(i8 * %base, <vscale x 4 x i1> %mask, i64 %of
define void @test_masked_ldst_sv4i16(i16 * %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
; CHECK-LABEL: test_masked_ldst_sv4i16:
-; CHECK-NEXT: ld1sh { z[[DATA:[0-9]+]].s }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ld1h { z[[DATA:[0-9]+]].s }, p0/z, [x0, x1, lsl #1]
; CHECK-NEXT: st1h { z[[DATA]].s }, p0, [x0, x1, lsl #1]
; CHECK-NEXT: ret
%base_i16 = getelementptr i16, i16* %base, i64 %offset
@@ -443,7 +443,7 @@ define void @masked_trunc_store_sv4i32_to_sv4i16(<vscale x 4 x i32> %val, i16 *%
define void @test_masked_ldst_sv8i8(i8 * %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind {
; CHECK-LABEL: test_masked_ldst_sv8i8:
-; CHECK-NEXT: ld1sb { z[[DATA:[0-9]+]].h }, p0/z, [x0, x1]
+; CHECK-NEXT: ld1b { z[[DATA:[0-9]+]].h }, p0/z, [x0, x1]
; CHECK-NEXT: st1b { z[[DATA]].h }, p0, [x0, x1]
; CHECK-NEXT: ret
%base_i8 = getelementptr i8, i8* %base, i64 %offset
diff --git a/llvm/test/CodeGen/AArch64/sve-split-load.ll b/llvm/test/CodeGen/AArch64/sve-split-load.ll
index 63d47ea45445c..50f6bff8f4670 100644
--- a/llvm/test/CodeGen/AArch64/sve-split-load.ll
+++ b/llvm/test/CodeGen/AArch64/sve-split-load.ll
@@ -71,7 +71,7 @@ define <vscale x 16 x i64> @load_split_16i64(<vscale x 16 x i64>* %a) {
define <vscale x 2 x i32> @masked_load_promote_2i32(<vscale x 2 x i32> *%a, <vscale x 2 x i1> %pg) {
; CHECK-LABEL: masked_load_promote_2i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32> *%a, i32 1, <vscale x 2 x i1> %pg, <vscale x 2 x i32> undef)
ret <vscale x 2 x i32> %load
More information about the llvm-commits
mailing list