[llvm] 9bdcd9b - [llvm][SVE] Addressing mode for FF/NF loads.
Francesco Petrogalli via llvm-commits
llvm-commits at lists.llvm.org
Wed Mar 18 05:46:29 PDT 2020
Author: Francesco Petrogalli
Date: 2020-03-18T12:46:07Z
New Revision: 9bdcd9bf4438d8a52ab8142fdac2ad3d99d607e6
URL: https://github.com/llvm/llvm-project/commit/9bdcd9bf4438d8a52ab8142fdac2ad3d99d607e6
DIFF: https://github.com/llvm/llvm-project/commit/9bdcd9bf4438d8a52ab8142fdac2ad3d99d607e6.diff
LOG: [llvm][SVE] Addressing mode for FF/NF loads.
Summary:
This patch adds addressing mode computation for the following SVE
instructions:
* ldff1{s}<T1> { <Zt>.<T2> }, <Pg>/Z, [<Xn|SP>{, <Xm>{, lsl #imm}}]
* ldnf1{s}<T1> { <Zt>.<T2> }, <Pg>/Z, [<Xn|SP>{, #<imm>, mul vl}]
Reviewers: andwar, sdesmalen, rengolin, efriedma
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D76209
Added:
Modified:
llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
llvm/test/CodeGen/AArch64/sve-intrinsics-loads-ff.ll
llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 05675503c96f..bae5c7f4eef8 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -4509,6 +4509,16 @@ static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) {
return cast<MemSDNode>(Root)->getMemoryVT();
const unsigned Opcode = Root->getOpcode();
+ // For custom ISD nodes, we have to look at them individually to extract the
+ // type of the data moved to/from memory.
+ switch (Opcode) {
+ case AArch64ISD::LDNF1:
+ case AArch64ISD::LDNF1S:
+ return cast<VTSDNode>(Root->getOperand(3))->getVT();
+ default:
+ break;
+ }
+
if (Opcode != ISD::INTRINSIC_VOID)
return EVT();
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index a83e23832ba1..d3a541d0246b 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -1538,6 +1538,12 @@ multiclass sve_prefetch<SDPatternOperator prefetch, ValueType PredTy, Instructio
defm Pat_Load_P2 : unpred_load_predicate<nxv2i1, LDR_PXI>;
multiclass ldnf1<Instruction I, ValueType Ty, SDPatternOperator Load, ValueType PredTy, ValueType MemVT> {
+ // scalar + immediate (mul vl)
+ let AddedComplexity = 1 in {
+ def : Pat<(Ty (Load (PredTy PPR:$gp), (am_sve_indexed_s4 GPR64sp:$base, simm4s1:$offset), MemVT)),
+ (I PPR:$gp, GPR64sp:$base, simm4s1:$offset)>;
+ }
+
// base
def : Pat<(Ty (Load (PredTy PPR:$gp), GPR64:$base, MemVT)),
(I PPR:$gp, GPR64sp:$base, (i64 0))>;
@@ -1570,40 +1576,45 @@ multiclass sve_prefetch<SDPatternOperator prefetch, ValueType PredTy, Instructio
// 16-element contiguous non-faulting loads
defm : ldnf1<LDNF1B_IMM, nxv16i8, AArch64ldnf1, nxv16i1, nxv16i8>;
- multiclass ldff1<Instruction I, ValueType Ty, SDPatternOperator Load, ValueType PredTy, ValueType MemVT> {
- // Add more complex addressing modes here as required.
+ multiclass ldff1<Instruction I, ValueType Ty, SDPatternOperator Load, ValueType PredTy, ValueType MemVT, ComplexPattern AddrCP> {
+ // reg + reg
+ let AddedComplexity = 1 in {
+ def : Pat<(Ty (Load (PredTy PPR:$gp), (AddrCP GPR64:$base, GPR64:$offset), MemVT)),
+ (I PPR:$gp, GPR64sp:$base, GPR64:$offset)>;
+ }
+
// Base
def : Pat<(Ty (Load (PredTy PPR:$gp), GPR64:$base, MemVT)),
(I PPR:$gp, GPR64sp:$base, XZR)>;
}
// 2-element contiguous first faulting loads
- defm : ldff1<LDFF1B_D, nxv2i64, AArch64ldff1, nxv2i1, nxv2i8>;
- defm : ldff1<LDFF1SB_D, nxv2i64, AArch64ldff1s, nxv2i1, nxv2i8>;
- defm : ldff1<LDFF1H_D, nxv2i64, AArch64ldff1, nxv2i1, nxv2i16>;
- defm : ldff1<LDFF1SH_D, nxv2i64, AArch64ldff1s, nxv2i1, nxv2i16>;
- defm : ldff1<LDFF1W_D, nxv2i64, AArch64ldff1, nxv2i1, nxv2i32>;
- defm : ldff1<LDFF1SW_D, nxv2i64, AArch64ldff1s, nxv2i1, nxv2i32>;
- defm : ldff1<LDFF1D, nxv2i64, AArch64ldff1, nxv2i1, nxv2i64>;
- defm : ldff1<LDFF1W_D, nxv2f32, AArch64ldff1, nxv2i1, nxv2f32>;
- defm : ldff1<LDFF1D, nxv2f64, AArch64ldff1, nxv2i1, nxv2f64>;
+ defm : ldff1<LDFF1B_D, nxv2i64, AArch64ldff1, nxv2i1, nxv2i8, am_sve_regreg_lsl0>;
+ defm : ldff1<LDFF1SB_D, nxv2i64, AArch64ldff1s, nxv2i1, nxv2i8, am_sve_regreg_lsl0>;
+ defm : ldff1<LDFF1H_D, nxv2i64, AArch64ldff1, nxv2i1, nxv2i16, am_sve_regreg_lsl1>;
+ defm : ldff1<LDFF1SH_D, nxv2i64, AArch64ldff1s, nxv2i1, nxv2i16, am_sve_regreg_lsl1>;
+ defm : ldff1<LDFF1W_D, nxv2i64, AArch64ldff1, nxv2i1, nxv2i32, am_sve_regreg_lsl2>;
+ defm : ldff1<LDFF1SW_D, nxv2i64, AArch64ldff1s, nxv2i1, nxv2i32, am_sve_regreg_lsl2>;
+ defm : ldff1<LDFF1D, nxv2i64, AArch64ldff1, nxv2i1, nxv2i64, am_sve_regreg_lsl3>;
+ defm : ldff1<LDFF1W_D, nxv2f32, AArch64ldff1, nxv2i1, nxv2f32, am_sve_regreg_lsl2>;
+ defm : ldff1<LDFF1D, nxv2f64, AArch64ldff1, nxv2i1, nxv2f64, am_sve_regreg_lsl3>;
// 4-element contiguous first faulting loads
- defm : ldff1<LDFF1B_S, nxv4i32, AArch64ldff1, nxv4i1, nxv4i8>;
- defm : ldff1<LDFF1SB_S, nxv4i32, AArch64ldff1s, nxv4i1, nxv4i8>;
- defm : ldff1<LDFF1H_S, nxv4i32, AArch64ldff1, nxv4i1, nxv4i16>;
- defm : ldff1<LDFF1SH_S, nxv4i32, AArch64ldff1s, nxv4i1, nxv4i16>;
- defm : ldff1<LDFF1W, nxv4i32, AArch64ldff1, nxv4i1, nxv4i32>;
- defm : ldff1<LDFF1W, nxv4f32, AArch64ldff1, nxv4i1, nxv4f32>;
+ defm : ldff1<LDFF1B_S, nxv4i32, AArch64ldff1, nxv4i1, nxv4i8, am_sve_regreg_lsl0>;
+ defm : ldff1<LDFF1SB_S, nxv4i32, AArch64ldff1s, nxv4i1, nxv4i8, am_sve_regreg_lsl0>;
+ defm : ldff1<LDFF1H_S, nxv4i32, AArch64ldff1, nxv4i1, nxv4i16, am_sve_regreg_lsl1>;
+ defm : ldff1<LDFF1SH_S, nxv4i32, AArch64ldff1s, nxv4i1, nxv4i16, am_sve_regreg_lsl1>;
+ defm : ldff1<LDFF1W, nxv4i32, AArch64ldff1, nxv4i1, nxv4i32, am_sve_regreg_lsl2>;
+ defm : ldff1<LDFF1W, nxv4f32, AArch64ldff1, nxv4i1, nxv4f32, am_sve_regreg_lsl2>;
// 8-element contiguous first faulting loads
- defm : ldff1<LDFF1B_H, nxv8i16, AArch64ldff1, nxv8i1, nxv8i8>;
- defm : ldff1<LDFF1SB_H, nxv8i16, AArch64ldff1s, nxv8i1, nxv8i8>;
- defm : ldff1<LDFF1H, nxv8i16, AArch64ldff1, nxv8i1, nxv8i16>;
- defm : ldff1<LDFF1H, nxv8f16, AArch64ldff1, nxv8i1, nxv8f16>;
+ defm : ldff1<LDFF1B_H, nxv8i16, AArch64ldff1, nxv8i1, nxv8i8, am_sve_regreg_lsl0>;
+ defm : ldff1<LDFF1SB_H, nxv8i16, AArch64ldff1s, nxv8i1, nxv8i8, am_sve_regreg_lsl0>;
+ defm : ldff1<LDFF1H, nxv8i16, AArch64ldff1, nxv8i1, nxv8i16, am_sve_regreg_lsl1>;
+ defm : ldff1<LDFF1H, nxv8f16, AArch64ldff1, nxv8i1, nxv8f16, am_sve_regreg_lsl1>;
// 16-element contiguous first faulting loads
- defm : ldff1<LDFF1B, nxv16i8, AArch64ldff1, nxv16i1, nxv16i8>;
+ defm : ldff1<LDFF1B, nxv16i8, AArch64ldff1, nxv16i1, nxv16i8, am_sve_regreg_lsl0>;
}
let Predicates = [HasSVE2] in {
diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-ff.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-ff.ll
index 801cf4296282..a27faf3a7220 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-ff.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-ff.ll
@@ -12,6 +12,15 @@ define <vscale x 16 x i8> @ldff1b(<vscale x 16 x i1> %pg, i8* %a) {
ret <vscale x 16 x i8> %load
}
+define <vscale x 16 x i8> @ldff1b_reg(<vscale x 16 x i1> %pg, i8* %a, i64 %offset) {
+; CHECK-LABEL: ldff1b_reg:
+; CHECK: ldff1b { z0.b }, p0/z, [x0, x1]
+; CHECK-NEXT: ret
+ %base = getelementptr i8, i8* %a, i64 %offset
+ %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldff1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base)
+ ret <vscale x 16 x i8> %load
+}
+
define <vscale x 8 x i16> @ldff1b_h(<vscale x 8 x i1> %pg, i8* %a) {
; CHECK-LABEL: ldff1b_h:
; CHECK: ldff1b { z0.h }, p0/z, [x0]
@@ -21,6 +30,16 @@ define <vscale x 8 x i16> @ldff1b_h(<vscale x 8 x i1> %pg, i8* %a) {
ret <vscale x 8 x i16> %res
}
+define <vscale x 8 x i16> @ldff1b_h_reg(<vscale x 8 x i1> %pg, i8* %a, i64 %offset) {
+; CHECK-LABEL: ldff1b_h_reg:
+; CHECK: ldff1b { z0.h }, p0/z, [x0, x1]
+; CHECK-NEXT: ret
+ %base = getelementptr i8, i8* %a, i64 %offset
+ %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base)
+ %res = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %res
+}
+
define <vscale x 4 x i32> @ldff1b_s(<vscale x 4 x i1> %pg, i8* %a) {
; CHECK-LABEL: ldff1b_s:
; CHECK: ldff1b { z0.s }, p0/z, [x0]
@@ -30,6 +49,16 @@ define <vscale x 4 x i32> @ldff1b_s(<vscale x 4 x i1> %pg, i8* %a) {
ret <vscale x 4 x i32> %res
}
+define <vscale x 4 x i32> @ldff1b_s_reg(<vscale x 4 x i1> %pg, i8* %a, i64 %offset) {
+; CHECK-LABEL: ldff1b_s_reg:
+; CHECK: ldff1b { z0.s }, p0/z, [x0, x1]
+; CHECK-NEXT: ret
+ %base = getelementptr i8, i8* %a, i64 %offset
+ %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.nxv4i8(<vscale x 4 x i1> %pg, i8* %base)
+ %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %res
+}
+
define <vscale x 2 x i64> @ldff1b_d(<vscale x 2 x i1> %pg, i8* %a) {
; CHECK-LABEL: ldff1b_d:
; CHECK: ldff1b { z0.d }, p0/z, [x0]
@@ -39,6 +68,16 @@ define <vscale x 2 x i64> @ldff1b_d(<vscale x 2 x i1> %pg, i8* %a) {
ret <vscale x 2 x i64> %res
}
+define <vscale x 2 x i64> @ldff1b_d_reg(<vscale x 2 x i1> %pg, i8* %a, i64 %offset) {
+; CHECK-LABEL: ldff1b_d_reg:
+; CHECK: ldff1b { z0.d }, p0/z, [x0, x1]
+; CHECK-NEXT: ret
+ %base = getelementptr i8, i8* %a, i64 %offset
+ %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.nxv2i8(<vscale x 2 x i1> %pg, i8* %base)
+ %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %res
+}
+
;
; LDFF1SB
;
@@ -52,6 +91,16 @@ define <vscale x 8 x i16> @ldff1sb_h(<vscale x 8 x i1> %pg, i8* %a) {
ret <vscale x 8 x i16> %res
}
+define <vscale x 8 x i16> @ldff1sb_h_reg(<vscale x 8 x i1> %pg, i8* %a, i64 %offset) {
+; CHECK-LABEL: ldff1sb_h_reg:
+; CHECK: ldff1sb { z0.h }, p0/z, [x0, x1]
+; CHECK-NEXT: ret
+ %base = getelementptr i8, i8* %a, i64 %offset
+ %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base)
+ %res = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %res
+}
+
define <vscale x 4 x i32> @ldff1sb_s(<vscale x 4 x i1> %pg, i8* %a) {
; CHECK-LABEL: ldff1sb_s:
; CHECK: ldff1sb { z0.s }, p0/z, [x0]
@@ -61,6 +110,16 @@ define <vscale x 4 x i32> @ldff1sb_s(<vscale x 4 x i1> %pg, i8* %a) {
ret <vscale x 4 x i32> %res
}
+define <vscale x 4 x i32> @ldff1sb_s_reg(<vscale x 4 x i1> %pg, i8* %a, i64 %offset) {
+; CHECK-LABEL: ldff1sb_s_reg:
+; CHECK: ldff1sb { z0.s }, p0/z, [x0, x1]
+; CHECK-NEXT: ret
+ %base = getelementptr i8, i8* %a, i64 %offset
+ %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.nxv4i8(<vscale x 4 x i1> %pg, i8* %base)
+ %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %res
+}
+
define <vscale x 2 x i64> @ldff1sb_d(<vscale x 2 x i1> %pg, i8* %a) {
; CHECK-LABEL: ldff1sb_d:
; CHECK: ldff1sb { z0.d }, p0/z, [x0]
@@ -70,6 +129,16 @@ define <vscale x 2 x i64> @ldff1sb_d(<vscale x 2 x i1> %pg, i8* %a) {
ret <vscale x 2 x i64> %res
}
+define <vscale x 2 x i64> @ldff1sb_d_reg(<vscale x 2 x i1> %pg, i8* %a, i64 %offset) {
+; CHECK-LABEL: ldff1sb_d_reg:
+; CHECK: ldff1sb { z0.d }, p0/z, [x0, x1]
+; CHECK-NEXT: ret
+ %base = getelementptr i8, i8* %a, i64 %offset
+ %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.nxv2i8(<vscale x 2 x i1> %pg, i8* %base)
+ %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %res
+}
+
;
; LDFF1H
;
@@ -82,6 +151,15 @@ define <vscale x 8 x i16> @ldff1h(<vscale x 8 x i1> %pg, i16* %a) {
ret <vscale x 8 x i16> %load
}
+define <vscale x 8 x i16> @ldff1h_reg(<vscale x 8 x i1> %pg, i16* %a, i64 %offset) {
+; CHECK-LABEL: ldff1h_reg:
+; CHECK: ldff1h { z0.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+ %base = getelementptr i16, i16* %a, i64 %offset
+ %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ldff1.nxv8i16(<vscale x 8 x i1> %pg, i16* %base)
+ ret <vscale x 8 x i16> %load
+}
+
define <vscale x 4 x i32> @ldff1h_s(<vscale x 4 x i1> %pg, i16* %a) {
; CHECK-LABEL: ldff1h_s:
; CHECK: ldff1h { z0.s }, p0/z, [x0]
@@ -91,6 +169,16 @@ define <vscale x 4 x i32> @ldff1h_s(<vscale x 4 x i1> %pg, i16* %a) {
ret <vscale x 4 x i32> %res
}
+define <vscale x 4 x i32> @ldff1h_s_reg(<vscale x 4 x i1> %pg, i16* %a, i64 %offset) {
+; CHECK-LABEL: ldff1h_s_reg:
+; CHECK: ldff1h { z0.s }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+ %base = getelementptr i16, i16* %a, i64 %offset
+ %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1> %pg, i16* %base)
+ %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %res
+}
+
define <vscale x 2 x i64> @ldff1h_d(<vscale x 2 x i1> %pg, i16* %a) {
; CHECK-LABEL: ldff1h_d:
; CHECK: ldff1h { z0.d }, p0/z, [x0]
@@ -100,6 +188,16 @@ define <vscale x 2 x i64> @ldff1h_d(<vscale x 2 x i1> %pg, i16* %a) {
ret <vscale x 2 x i64> %res
}
+define <vscale x 2 x i64> @ldff1h_d_reg(<vscale x 2 x i1> %pg, i16* %a, i64 %offset) {
+; CHECK-LABEL: ldff1h_d_reg:
+; CHECK: ldff1h { z0.d }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+ %base = getelementptr i16, i16* %a, i64 %offset
+ %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1> %pg, i16* %base)
+ %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %res
+}
+
define <vscale x 8 x half> @ldff1h_f16(<vscale x 8 x i1> %pg, half* %a) {
; CHECK-LABEL: ldff1h_f16:
; CHECK: ldff1h { z0.h }, p0/z, [x0]
@@ -108,6 +206,15 @@ define <vscale x 8 x half> @ldff1h_f16(<vscale x 8 x i1> %pg, half* %a) {
ret <vscale x 8 x half> %load
}
+define <vscale x 8 x half> @ldff1h_f16_reg(<vscale x 8 x i1> %pg, half* %a, i64 %offset) {
+; CHECK-LABEL: ldff1h_f16_reg:
+; CHECK: ldff1h { z0.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+ %base = getelementptr half, half* %a, i64 %offset
+ %load = call <vscale x 8 x half> @llvm.aarch64.sve.ldff1.nxv8f16(<vscale x 8 x i1> %pg, half* %base)
+ ret <vscale x 8 x half> %load
+}
+
;
; LDFF1SH
;
@@ -121,6 +228,16 @@ define <vscale x 4 x i32> @ldff1sh_s(<vscale x 4 x i1> %pg, i16* %a) {
ret <vscale x 4 x i32> %res
}
+define <vscale x 4 x i32> @ldff1sh_s_reg(<vscale x 4 x i1> %pg, i16* %a, i64 %offset) {
+; CHECK-LABEL: ldff1sh_s_reg:
+; CHECK: ldff1sh { z0.s }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+ %base = getelementptr i16, i16* %a, i64 %offset
+ %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1> %pg, i16* %base)
+ %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %res
+}
+
define <vscale x 2 x i64> @ldff1sh_d(<vscale x 2 x i1> %pg, i16* %a) {
; CHECK-LABEL: ldff1sh_d:
; CHECK: ldff1sh { z0.d }, p0/z, [x0]
@@ -130,6 +247,16 @@ define <vscale x 2 x i64> @ldff1sh_d(<vscale x 2 x i1> %pg, i16* %a) {
ret <vscale x 2 x i64> %res
}
+define <vscale x 2 x i64> @ldff1sh_d_reg(<vscale x 2 x i1> %pg, i16* %a, i64 %offset) {
+; CHECK-LABEL: ldff1sh_d_reg:
+; CHECK: ldff1sh { z0.d }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+ %base = getelementptr i16, i16* %a, i64 %offset
+ %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1> %pg, i16* %base)
+ %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %res
+}
+
;
; LDFF1W
;
@@ -142,6 +269,15 @@ define <vscale x 4 x i32> @ldff1w(<vscale x 4 x i1> %pg, i32* %a) {
ret <vscale x 4 x i32> %load
}
+define <vscale x 4 x i32> @ldff1w_reg(<vscale x 4 x i1> %pg, i32* %a, i64 %offset) {
+; CHECK-LABEL: ldff1w_reg:
+; CHECK: ldff1w { z0.s }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ret
+ %base = getelementptr i32, i32* %a, i64 %offset
+ %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.nxv4i32(<vscale x 4 x i1> %pg, i32* %base)
+ ret <vscale x 4 x i32> %load
+}
+
define <vscale x 2 x i64> @ldff1w_d(<vscale x 2 x i1> %pg, i32* %a) {
; CHECK-LABEL: ldff1w_d:
; CHECK: ldff1w { z0.d }, p0/z, [x0]
@@ -151,6 +287,16 @@ define <vscale x 2 x i64> @ldff1w_d(<vscale x 2 x i1> %pg, i32* %a) {
ret <vscale x 2 x i64> %res
}
+define <vscale x 2 x i64> @ldff1w_d_reg(<vscale x 2 x i1> %pg, i32* %a, i64 %offset) {
+; CHECK-LABEL: ldff1w_d_reg:
+; CHECK: ldff1w { z0.d }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ret
+ %base = getelementptr i32, i32* %a, i64 %offset
+ %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.nxv2i32(<vscale x 2 x i1> %pg, i32* %base)
+ %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %res
+}
+
define <vscale x 4 x float> @ldff1w_f32(<vscale x 4 x i1> %pg, float* %a) {
; CHECK-LABEL: ldff1w_f32:
; CHECK: ldff1w { z0.s }, p0/z, [x0]
@@ -159,6 +305,15 @@ define <vscale x 4 x float> @ldff1w_f32(<vscale x 4 x i1> %pg, float* %a) {
ret <vscale x 4 x float> %load
}
+define <vscale x 4 x float> @ldff1w_f32_reg(<vscale x 4 x i1> %pg, float* %a, i64 %offset) {
+; CHECK-LABEL: ldff1w_f32_reg:
+; CHECK: ldff1w { z0.s }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ret
+ %base = getelementptr float, float* %a, i64 %offset
+ %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldff1.nxv4f32(<vscale x 4 x i1> %pg, float* %base)
+ ret <vscale x 4 x float> %load
+}
+
define <vscale x 2 x float> @ldff1w_2f32(<vscale x 2 x i1> %pg, float* %a) {
; CHECK-LABEL: ldff1w_2f32:
; CHECK: ldff1w { z0.d }, p0/z, [x0]
@@ -167,6 +322,15 @@ define <vscale x 2 x float> @ldff1w_2f32(<vscale x 2 x i1> %pg, float* %a) {
ret <vscale x 2 x float> %load
}
+define <vscale x 2 x float> @ldff1w_2f32_reg(<vscale x 2 x i1> %pg, float* %a, i64 %offset) {
+; CHECK-LABEL: ldff1w_2f32_reg:
+; CHECK: ldff1w { z0.d }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ret
+ %base = getelementptr float, float* %a, i64 %offset
+ %load = call <vscale x 2 x float> @llvm.aarch64.sve.ldff1.nxv2f32(<vscale x 2 x i1> %pg, float* %base)
+ ret <vscale x 2 x float> %load
+}
+
;
; LDFF1SW
;
@@ -180,6 +344,16 @@ define <vscale x 2 x i64> @ldff1sw_d(<vscale x 2 x i1> %pg, i32* %a) {
ret <vscale x 2 x i64> %res
}
+define <vscale x 2 x i64> @ldff1sw_d_reg(<vscale x 2 x i1> %pg, i32* %a, i64 %offset) {
+; CHECK-LABEL: ldff1sw_d_reg:
+; CHECK: ldff1sw { z0.d }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ret
+ %base = getelementptr i32, i32* %a, i64 %offset
+ %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.nxv2i32(<vscale x 2 x i1> %pg, i32* %base)
+ %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %res
+}
+
;
; LDFF1D
;
@@ -192,6 +366,15 @@ define <vscale x 2 x i64> @ldff1d(<vscale x 2 x i1> %pg, i64* %a) {
ret <vscale x 2 x i64> %load
}
+define <vscale x 2 x i64> @ldff1d_reg(<vscale x 2 x i1> %pg, i64* %a, i64 %offset) {
+; CHECK-LABEL: ldff1d_reg:
+; CHECK: ldff1d { z0.d }, p0/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ret
+ %base = getelementptr i64, i64* %a, i64 %offset
+ %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.nxv2i64(<vscale x 2 x i1> %pg, i64* %base)
+ ret <vscale x 2 x i64> %load
+}
+
define <vscale x 2 x double> @ldff1d_f64(<vscale x 2 x i1> %pg, double* %a) {
; CHECK-LABEL: ldff1d_f64:
@@ -201,6 +384,15 @@ define <vscale x 2 x double> @ldff1d_f64(<vscale x 2 x i1> %pg, double* %a) {
ret <vscale x 2 x double> %load
}
+define <vscale x 2 x double> @ldff1d_f64_reg(<vscale x 2 x i1> %pg, double* %a, i64 %offset) {
+; CHECK-LABEL: ldff1d_f64_reg:
+; CHECK: ldff1d { z0.d }, p0/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ret
+ %base = getelementptr double, double* %a, i64 %offset
+ %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldff1.nxv2f64(<vscale x 2 x i1> %pg, double* %base)
+ ret <vscale x 2 x double> %load
+}
+
declare <vscale x 16 x i8> @llvm.aarch64.sve.ldff1.nxv16i8(<vscale x 16 x i1>, i8*)
declare <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1>, i8*)
diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll
index 162ade5aca4d..917d7ccab69e 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll
@@ -1,5 +1,9 @@
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
+; Range testing for the immediate in the reg+imm(mulvl) addressing
+; mode is done only for one instruction. The rest of the instrucions
+; test only one immediate value in bound.
+
define <vscale x 16 x i8> @ldnf1b(<vscale x 16 x i1> %pg, i8* %a) {
; CHECK-LABEL: ldnf1b:
; CHECK: ldnf1b { z0.b }, p0/z, [x0]
@@ -8,6 +12,65 @@ define <vscale x 16 x i8> @ldnf1b(<vscale x 16 x i1> %pg, i8* %a) {
ret <vscale x 16 x i8> %load
}
+define <vscale x 16 x i8> @ldnf1b_out_of_lower_bound(<vscale x 16 x i1> %pg, i8* %a) {
+; CHECK-LABEL: ldnf1b_out_of_lower_bound:
+; CHECK: rdvl x[[OFFSET:[0-9]+]], #-9
+; CHECK-NEXT: add x[[BASE:[0-9]+]], x0, x[[OFFSET]]
+; CHECK-NEXT: ldnf1b { z0.b }, p0/z, [x[[BASE]]]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
+ %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 -9
+ %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
+ %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
+ ret <vscale x 16 x i8> %load
+}
+
+define <vscale x 16 x i8> @ldnf1b_lower_bound(<vscale x 16 x i1> %pg, i8* %a) {
+; CHECK-LABEL: ldnf1b_lower_bound:
+; CHECK: ldnf1b { z0.b }, p0/z, [x0, #-8, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
+ %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 -8
+ %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
+ %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
+ ret <vscale x 16 x i8> %load
+}
+
+define <vscale x 16 x i8> @ldnf1b_inbound(<vscale x 16 x i1> %pg, i8* %a) {
+; CHECK-LABEL: ldnf1b_inbound:
+; CHECK: ldnf1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
+ %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 1
+ %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
+ %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
+ ret <vscale x 16 x i8> %load
+}
+
+define <vscale x 16 x i8> @ldnf1b_upper_bound(<vscale x 16 x i1> %pg, i8* %a) {
+; CHECK-LABEL: ldnf1b_upper_bound:
+; CHECK: ldnf1b { z0.b }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
+ %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 7
+ %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
+ %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
+ ret <vscale x 16 x i8> %load
+}
+
+define <vscale x 16 x i8> @ldnf1b_out_of_upper_bound(<vscale x 16 x i1> %pg, i8* %a) {
+; CHECK-LABEL: ldnf1b_out_of_upper_bound:
+; CHECK: rdvl x[[OFFSET:[0-9]+]], #8
+; CHECK-NEXT: add x[[BASE:[0-9]+]], x0, x[[OFFSET]]
+; CHECK-NEXT: ldnf1b { z0.b }, p0/z, [x[[BASE]]]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
+ %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 8
+ %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
+ %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
+ ret <vscale x 16 x i8> %load
+}
+
define <vscale x 8 x i16> @ldnf1b_h(<vscale x 8 x i1> %pg, i8* %a) {
; CHECK-LABEL: ldnf1b_h:
; CHECK: ldnf1b { z0.h }, p0/z, [x0]
@@ -17,6 +80,18 @@ define <vscale x 8 x i16> @ldnf1b_h(<vscale x 8 x i1> %pg, i8* %a) {
ret <vscale x 8 x i16> %res
}
+define <vscale x 8 x i16> @ldnf1b_h_inbound(<vscale x 8 x i1> %pg, i8* %a) {
+; CHECK-LABEL: ldnf1b_h_inbound:
+; CHECK: ldnf1b { z0.h }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i8* %a to <vscale x 8 x i8>*
+ %base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base_scalable, i64 7
+ %base_scalar = bitcast <vscale x 8 x i8>* %base to i8*
+ %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base_scalar)
+ %res = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %res
+}
+
define <vscale x 8 x i16> @ldnf1sb_h(<vscale x 8 x i1> %pg, i8* %a) {
; CHECK-LABEL: ldnf1sb_h:
; CHECK: ldnf1sb { z0.h }, p0/z, [x0]
@@ -26,6 +101,18 @@ define <vscale x 8 x i16> @ldnf1sb_h(<vscale x 8 x i1> %pg, i8* %a) {
ret <vscale x 8 x i16> %res
}
+define <vscale x 8 x i16> @ldnf1sb_h_inbound(<vscale x 8 x i1> %pg, i8* %a) {
+; CHECK-LABEL: ldnf1sb_h_inbound:
+; CHECK: ldnf1sb { z0.h }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i8* %a to <vscale x 8 x i8>*
+ %base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base_scalable, i64 7
+ %base_scalar = bitcast <vscale x 8 x i8>* %base to i8*
+ %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base_scalar)
+ %res = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %res
+}
+
define <vscale x 8 x i16> @ldnf1h(<vscale x 8 x i1> %pg, i16* %a) {
; CHECK-LABEL: ldnf1h:
; CHECK: ldnf1h { z0.h }, p0/z, [x0]
@@ -34,6 +121,17 @@ define <vscale x 8 x i16> @ldnf1h(<vscale x 8 x i1> %pg, i16* %a) {
ret <vscale x 8 x i16> %load
}
+define <vscale x 8 x i16> @ldnf1h_inbound(<vscale x 8 x i1> %pg, i16* %a) {
+; CHECK-LABEL: ldnf1h_inbound:
+; CHECK: ldnf1h { z0.h }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i16* %a to <vscale x 8 x i16>*
+ %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %base_scalable, i64 1
+ %base_scalar = bitcast <vscale x 8 x i16>* %base to i16*
+ %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ldnf1.nxv8i16(<vscale x 8 x i1> %pg, i16* %base_scalar)
+ ret <vscale x 8 x i16> %load
+}
+
define <vscale x 8 x half> @ldnf1h_f16(<vscale x 8 x i1> %pg, half* %a) {
; CHECK-LABEL: ldnf1h_f16:
; CHECK: ldnf1h { z0.h }, p0/z, [x0]
@@ -42,6 +140,17 @@ define <vscale x 8 x half> @ldnf1h_f16(<vscale x 8 x i1> %pg, half* %a) {
ret <vscale x 8 x half> %load
}
+define <vscale x 8 x half> @ldnf1h_f16_inbound(<vscale x 8 x i1> %pg, half* %a) {
+; CHECK-LABEL: ldnf1h_f16_inbound:
+; CHECK: ldnf1h { z0.h }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast half* %a to <vscale x 8 x half>*
+ %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %base_scalable, i64 1
+ %base_scalar = bitcast <vscale x 8 x half>* %base to half*
+ %load = call <vscale x 8 x half> @llvm.aarch64.sve.ldnf1.nxv8f16(<vscale x 8 x i1> %pg, half* %base_scalar)
+ ret <vscale x 8 x half> %load
+}
+
define <vscale x 4 x i32> @ldnf1b_s(<vscale x 4 x i1> %pg, i8* %a) {
; CHECK-LABEL: ldnf1b_s:
; CHECK: ldnf1b { z0.s }, p0/z, [x0]
@@ -51,6 +160,18 @@ define <vscale x 4 x i32> @ldnf1b_s(<vscale x 4 x i1> %pg, i8* %a) {
ret <vscale x 4 x i32> %res
}
+define <vscale x 4 x i32> @ldnf1b_s_inbound(<vscale x 4 x i1> %pg, i8* %a) {
+; CHECK-LABEL: ldnf1b_s_inbound:
+; CHECK: ldnf1b { z0.s }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i8* %a to <vscale x 4 x i8>*
+ %base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base_scalable, i64 7
+ %base_scalar = bitcast <vscale x 4 x i8>* %base to i8*
+ %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1> %pg, i8* %base_scalar)
+ %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %res
+}
+
define <vscale x 4 x i32> @ldnf1sb_s(<vscale x 4 x i1> %pg, i8* %a) {
; CHECK-LABEL: ldnf1sb_s:
; CHECK: ldnf1sb { z0.s }, p0/z, [x0]
@@ -60,6 +181,18 @@ define <vscale x 4 x i32> @ldnf1sb_s(<vscale x 4 x i1> %pg, i8* %a) {
ret <vscale x 4 x i32> %res
}
+define <vscale x 4 x i32> @ldnf1sb_s_inbound(<vscale x 4 x i1> %pg, i8* %a) {
+; CHECK-LABEL: ldnf1sb_s_inbound:
+; CHECK: ldnf1sb { z0.s }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i8* %a to <vscale x 4 x i8>*
+ %base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base_scalable, i64 7
+ %base_scalar = bitcast <vscale x 4 x i8>* %base to i8*
+ %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1> %pg, i8* %base_scalar)
+ %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %res
+}
+
define <vscale x 4 x i32> @ldnf1h_s(<vscale x 4 x i1> %pg, i16* %a) {
; CHECK-LABEL: ldnf1h_s:
; CHECK: ldnf1h { z0.s }, p0/z, [x0]
@@ -69,6 +202,18 @@ define <vscale x 4 x i32> @ldnf1h_s(<vscale x 4 x i1> %pg, i16* %a) {
ret <vscale x 4 x i32> %res
}
+define <vscale x 4 x i32> @ldnf1h_s_inbound(<vscale x 4 x i1> %pg, i16* %a) {
+; CHECK-LABEL: ldnf1h_s_inbound:
+; CHECK: ldnf1h { z0.s }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i16* %a to <vscale x 4 x i16>*
+ %base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base_scalable, i64 7
+ %base_scalar = bitcast <vscale x 4 x i16>* %base to i16*
+ %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %pg, i16* %base_scalar)
+ %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %res
+}
+
define <vscale x 4 x i32> @ldnf1sh_s(<vscale x 4 x i1> %pg, i16* %a) {
; CHECK-LABEL: ldnf1sh_s:
; CHECK: ldnf1sh { z0.s }, p0/z, [x0]
@@ -78,6 +223,18 @@ define <vscale x 4 x i32> @ldnf1sh_s(<vscale x 4 x i1> %pg, i16* %a) {
ret <vscale x 4 x i32> %res
}
+define <vscale x 4 x i32> @ldnf1sh_s_inbound(<vscale x 4 x i1> %pg, i16* %a) {
+; CHECK-LABEL: ldnf1sh_s_inbound:
+; CHECK: ldnf1sh { z0.s }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i16* %a to <vscale x 4 x i16>*
+ %base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base_scalable, i64 7
+ %base_scalar = bitcast <vscale x 4 x i16>* %base to i16*
+ %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %pg, i16* %base_scalar)
+ %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %res
+}
+
define <vscale x 4 x i32> @ldnf1w(<vscale x 4 x i1> %pg, i32* %a) {
; CHECK-LABEL: ldnf1w:
; CHECK: ldnf1w { z0.s }, p0/z, [x0]
@@ -86,6 +243,17 @@ define <vscale x 4 x i32> @ldnf1w(<vscale x 4 x i1> %pg, i32* %a) {
ret <vscale x 4 x i32> %load
}
+define <vscale x 4 x i32> @ldnf1w_inbound(<vscale x 4 x i1> %pg, i32* %a) {
+; CHECK-LABEL: ldnf1w_inbound:
+; CHECK: ldnf1w { z0.s }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i32* %a to <vscale x 4 x i32>*
+ %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %base_scalable, i64 7
+ %base_scalar = bitcast <vscale x 4 x i32>* %base to i32*
+ %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnf1.nxv4i32(<vscale x 4 x i1> %pg, i32* %base_scalar)
+ ret <vscale x 4 x i32> %load
+}
+
define <vscale x 4 x float> @ldnf1w_f32(<vscale x 4 x i1> %pg, float* %a) {
; CHECK-LABEL: ldnf1w_f32:
; CHECK: ldnf1w { z0.s }, p0/z, [x0]
@@ -94,6 +262,17 @@ define <vscale x 4 x float> @ldnf1w_f32(<vscale x 4 x i1> %pg, float* %a) {
ret <vscale x 4 x float> %load
}
+define <vscale x 4 x float> @ldnf1w_f32_inbound(<vscale x 4 x i1> %pg, float* %a) {
+; CHECK-LABEL: ldnf1w_f32_inbound:
+; CHECK: ldnf1w { z0.s }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast float* %a to <vscale x 4 x float>*
+ %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %base_scalable, i64 7
+ %base_scalar = bitcast <vscale x 4 x float>* %base to float*
+ %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldnf1.nxv4f32(<vscale x 4 x i1> %pg, float* %base_scalar)
+ ret <vscale x 4 x float> %load
+}
+
define <vscale x 2 x i64> @ldnf1b_d(<vscale x 2 x i1> %pg, i8* %a) {
; CHECK-LABEL: ldnf1b_d:
; CHECK: ldnf1b { z0.d }, p0/z, [x0]
@@ -103,6 +282,18 @@ define <vscale x 2 x i64> @ldnf1b_d(<vscale x 2 x i1> %pg, i8* %a) {
ret <vscale x 2 x i64> %res
}
+define <vscale x 2 x i64> @ldnf1b_d_inbound(<vscale x 2 x i1> %pg, i8* %a) {
+; CHECK-LABEL: ldnf1b_d_inbound:
+; CHECK: ldnf1b { z0.d }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i8* %a to <vscale x 2 x i8>*
+ %base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base_scalable, i64 7
+ %base_scalar = bitcast <vscale x 2 x i8>* %base to i8*
+ %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1> %pg, i8* %base_scalar)
+ %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %res
+}
+
define <vscale x 2 x i64> @ldnf1sb_d(<vscale x 2 x i1> %pg, i8* %a) {
; CHECK-LABEL: ldnf1sb_d:
; CHECK: ldnf1sb { z0.d }, p0/z, [x0]
@@ -112,6 +303,18 @@ define <vscale x 2 x i64> @ldnf1sb_d(<vscale x 2 x i1> %pg, i8* %a) {
ret <vscale x 2 x i64> %res
}
+define <vscale x 2 x i64> @ldnf1sb_d_inbound(<vscale x 2 x i1> %pg, i8* %a) {
+; CHECK-LABEL: ldnf1sb_d_inbound:
+; CHECK: ldnf1sb { z0.d }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i8* %a to <vscale x 2 x i8>*
+ %base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base_scalable, i64 7
+ %base_scalar = bitcast <vscale x 2 x i8>* %base to i8*
+ %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1> %pg, i8* %base_scalar)
+ %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %res
+}
+
define <vscale x 2 x i64> @ldnf1h_d(<vscale x 2 x i1> %pg, i16* %a) {
; CHECK-LABEL: ldnf1h_d:
; CHECK: ldnf1h { z0.d }, p0/z, [x0]
@@ -121,6 +324,18 @@ define <vscale x 2 x i64> @ldnf1h_d(<vscale x 2 x i1> %pg, i16* %a) {
ret <vscale x 2 x i64> %res
}
+define <vscale x 2 x i64> @ldnf1h_d_inbound(<vscale x 2 x i1> %pg, i16* %a) {
+; CHECK-LABEL: ldnf1h_d_inbound:
+; CHECK: ldnf1h { z0.d }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i16* %a to <vscale x 2 x i16>*
+ %base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base_scalable, i64 7
+ %base_scalar = bitcast <vscale x 2 x i16>* %base to i16*
+ %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %pg, i16* %base_scalar)
+ %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %res
+}
+
define <vscale x 2 x i64> @ldnf1sh_d(<vscale x 2 x i1> %pg, i16* %a) {
; CHECK-LABEL: ldnf1sh_d:
; CHECK: ldnf1sh { z0.d }, p0/z, [x0]
@@ -130,6 +345,18 @@ define <vscale x 2 x i64> @ldnf1sh_d(<vscale x 2 x i1> %pg, i16* %a) {
ret <vscale x 2 x i64> %res
}
+define <vscale x 2 x i64> @ldnf1sh_d_inbound(<vscale x 2 x i1> %pg, i16* %a) {
+; CHECK-LABEL: ldnf1sh_d_inbound:
+; CHECK: ldnf1sh { z0.d }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i16* %a to <vscale x 2 x i16>*
+ %base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base_scalable, i64 7
+ %base_scalar = bitcast <vscale x 2 x i16>* %base to i16*
+ %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %pg, i16* %base_scalar)
+ %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %res
+}
+
define <vscale x 2 x i64> @ldnf1w_d(<vscale x 2 x i1> %pg, i32* %a) {
; CHECK-LABEL: ldnf1w_d:
; CHECK: ldnf1w { z0.d }, p0/z, [x0]
@@ -139,6 +366,18 @@ define <vscale x 2 x i64> @ldnf1w_d(<vscale x 2 x i1> %pg, i32* %a) {
ret <vscale x 2 x i64> %res
}
+define <vscale x 2 x i64> @ldnf1w_d_inbound(<vscale x 2 x i1> %pg, i32* %a) {
+; CHECK-LABEL: ldnf1w_d_inbound:
+; CHECK: ldnf1w { z0.d }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i32* %a to <vscale x 2 x i32>*
+ %base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base_scalable, i64 7
+ %base_scalar = bitcast <vscale x 2 x i32>* %base to i32*
+ %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %pg, i32* %base_scalar)
+ %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %res
+}
+
define <vscale x 2 x i64> @ldnf1sw_d(<vscale x 2 x i1> %pg, i32* %a) {
; CHECK-LABEL: ldnf1sw_d:
; CHECK: ldnf1sw { z0.d }, p0/z, [x0]
@@ -148,6 +387,18 @@ define <vscale x 2 x i64> @ldnf1sw_d(<vscale x 2 x i1> %pg, i32* %a) {
ret <vscale x 2 x i64> %res
}
+define <vscale x 2 x i64> @ldnf1sw_d_inbound(<vscale x 2 x i1> %pg, i32* %a) {
+; CHECK-LABEL: ldnf1sw_d_inbound:
+; CHECK: ldnf1sw { z0.d }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i32* %a to <vscale x 2 x i32>*
+ %base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base_scalable, i64 7
+ %base_scalar = bitcast <vscale x 2 x i32>* %base to i32*
+ %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %pg, i32* %base_scalar)
+ %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %res
+}
+
define <vscale x 2 x i64> @ldnf1d(<vscale x 2 x i1> %pg, i64* %a) {
; CHECK-LABEL: ldnf1d:
; CHECK: ldnf1d { z0.d }, p0/z, [x0]
@@ -156,6 +407,17 @@ define <vscale x 2 x i64> @ldnf1d(<vscale x 2 x i1> %pg, i64* %a) {
ret <vscale x 2 x i64> %load
}
+define <vscale x 2 x i64> @ldnf1d_inbound(<vscale x 2 x i1> %pg, i64* %a) {
+; CHECK-LABEL: ldnf1d_inbound:
+; CHECK: ldnf1d { z0.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast i64* %a to <vscale x 2 x i64>*
+ %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base_scalable, i64 1
+ %base_scalar = bitcast <vscale x 2 x i64>* %base to i64*
+ %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnf1.nxv2i64(<vscale x 2 x i1> %pg, i64* %base_scalar)
+ ret <vscale x 2 x i64> %load
+}
+
define <vscale x 2 x double> @ldnf1d_f64(<vscale x 2 x i1> %pg, double* %a) {
; CHECK-LABEL: ldnf1d_f64:
; CHECK: ldnf1d { z0.d }, p0/z, [x0]
@@ -164,6 +426,17 @@ define <vscale x 2 x double> @ldnf1d_f64(<vscale x 2 x i1> %pg, double* %a) {
ret <vscale x 2 x double> %load
}
+define <vscale x 2 x double> @ldnf1d_f64_inbound(<vscale x 2 x i1> %pg, double* %a) {
+; CHECK-LABEL: ldnf1d_f64_inbound:
+; CHECK: ldnf1d { z0.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %base_scalable = bitcast double* %a to <vscale x 2 x double>*
+ %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %base_scalable, i64 1
+ %base_scalar = bitcast <vscale x 2 x double>* %base to double*
+ %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldnf1.nxv2f64(<vscale x 2 x i1> %pg, double* %base_scalar)
+ ret <vscale x 2 x double> %load
+}
+
declare <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1>, i8*)
declare <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1>, i8*)
More information about the llvm-commits
mailing list