[llvm] fa58aa8 - [SVE2p1][SME2] Add scalar addressing mode for LD1
Matt Devereau via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 12 06:08:51 PDT 2023
Author: Matt Devereau
Date: 2023-07-12T13:08:38Z
New Revision: fa58aa8e91146f30878f20174b74912b4e115256
URL: https://github.com/llvm/llvm-project/commit/fa58aa8e91146f30878f20174b74912b4e115256
DIFF: https://github.com/llvm/llvm-project/commit/fa58aa8e91146f30878f20174b74912b4e115256.diff
LOG: [SVE2p1][SME2] Add scalar addressing mode for LD1
Add the scalar addressing mode for multi vector LD1 instructions.
Differential Revision: https://reviews.llvm.org/D154829
Added:
Modified:
llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 7bb7480b8a2a01..c2005c6ff84778 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -371,8 +371,8 @@ class AArch64DAGToDAGISel : public SelectionDAGISel {
unsigned Opc_rr, unsigned Opc_ri,
bool IsIntr = false);
void SelectContiguousMultiVectorLoad(SDNode *N, unsigned NumVecs,
- unsigned Scale, unsigned Opc_rr,
- unsigned Opc_ri);
+ unsigned Scale, unsigned Opc_ri,
+ unsigned Opc_rr);
void SelectDestructiveMultiIntrinsic(SDNode *N, unsigned NumVecs,
bool IsZmMulti, unsigned Opcode,
bool HasPred = false);
@@ -1792,10 +1792,12 @@ void AArch64DAGToDAGISel::SelectContiguousMultiVectorLoad(SDNode *N,
EVT VT = N->getValueType(0);
SDValue Chain = N->getOperand(0);
- // Use simplest addressing mode for now - base + 0 offset
SDValue PNg = N->getOperand(2);
SDValue Base = N->getOperand(3);
SDValue Offset = CurDAG->getTargetConstant(0, DL, MVT::i64);
+ unsigned Opc;
+ std::tie(Opc, Base, Offset) =
+ findAddrModeSVELoadStore(N, Opc_rr, Opc_ri, Base, Offset, Scale);
SDValue Ops[] = {PNg, // Predicate-as-counter
Base, // Memory operand
@@ -1803,7 +1805,7 @@ void AArch64DAGToDAGISel::SelectContiguousMultiVectorLoad(SDNode *N,
const EVT ResTys[] = {MVT::Untyped, MVT::Other};
- SDNode *Load = CurDAG->getMachineNode(Opc_ri, DL, ResTys, Ops);
+ SDNode *Load = CurDAG->getMachineNode(Opc, DL, ResTys, Ops);
SDValue SuperReg = SDValue(Load, 0);
for (unsigned i = 0; i < NumVecs; ++i)
ReplaceUses(SDValue(N, i), CurDAG->getTargetExtractSubreg(
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll
index 827a3c28e1c983..3af91fcd95546d 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll
@@ -20,6 +20,23 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld1_x2_i8(target("aarch64.svc
ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
}
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld1_x2_i8_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ld1_x2_i8_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ld1b { z0.b, z1.b }, pn8/z, [x0, x1]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i8, ptr %ptr, i64 %index
+ %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
define { <vscale x 8 x i16>, <vscale x 8 x i16> } @ld1_x2_i16(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ld1_x2_i16:
; CHECK: // %bb.0:
@@ -36,6 +53,23 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16> } @ld1_x2_i16(target("aarch64.sv
ret { <vscale x 8 x i16>, <vscale x 8 x i16> } %res
}
+define { <vscale x 8 x i16>, <vscale x 8 x i16> } @ld1_x2_i16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ld1_x2_i16_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ld1h { z0.h, z1.h }, pn8/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i16, ptr %ptr, i64 %index
+ %res = call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld1.pn.x2.nxv8i16(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 8 x i16>, <vscale x 8 x i16> } %res
+}
+
define { <vscale x 4 x i32>, <vscale x 4 x i32> } @ld1_x2_i32(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ld1_x2_i32:
; CHECK: // %bb.0:
@@ -52,6 +86,23 @@ define { <vscale x 4 x i32>, <vscale x 4 x i32> } @ld1_x2_i32(target("aarch64.sv
ret { <vscale x 4 x i32>, <vscale x 4 x i32> } %res
}
+define { <vscale x 4 x i32>, <vscale x 4 x i32> } @ld1_x2_i32_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ld1_x2_i32_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ld1w { z0.s, z1.s }, pn8/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i32, ptr %ptr, i64 %index
+ %res = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld1.pn.x2.nxv4i32(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 4 x i32>, <vscale x 4 x i32> } %res
+}
+
define { <vscale x 2 x i64>, <vscale x 2 x i64> } @ld1_x2_i64(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ld1_x2_i64:
; CHECK: // %bb.0:
@@ -68,6 +119,23 @@ define { <vscale x 2 x i64>, <vscale x 2 x i64> } @ld1_x2_i64(target("aarch64.sv
ret { <vscale x 2 x i64>, <vscale x 2 x i64> } %res
}
+define { <vscale x 2 x i64>, <vscale x 2 x i64> } @ld1_x2_i64_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ld1_x2_i64_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ld1d { z0.d, z1.d }, pn8/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i64, ptr %ptr, i64 %index
+ %res = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld1.pn.x2.nxv2i64(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 2 x i64>, <vscale x 2 x i64> } %res
+}
+
define { <vscale x 8 x half>, <vscale x 8 x half> } @ld1_x2_f16(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ld1_x2_f16:
; CHECK: // %bb.0:
@@ -84,6 +152,23 @@ define { <vscale x 8 x half>, <vscale x 8 x half> } @ld1_x2_f16(target("aarch64.
ret { <vscale x 8 x half>, <vscale x 8 x half> } %res
}
+define { <vscale x 8 x half>, <vscale x 8 x half> } @ld1_x2_f16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ld1_x2_f16_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ld1h { z0.h, z1.h }, pn8/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr half, ptr %ptr, i64 %index
+ %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld1.pn.x2.nxv8f16(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 8 x half>, <vscale x 8 x half> } %res
+}
+
define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld1_x2_bf16(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ld1_x2_bf16:
; CHECK: // %bb.0:
@@ -100,6 +185,23 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld1_x2_bf16(target("aar
ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
}
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld1_x2_bf16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ld1_x2_bf16_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ld1h { z0.h, z1.h }, pn8/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr bfloat, ptr %ptr, i64 %index
+ %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld1.pn.x2.nxv8bf16(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
+}
+
define { <vscale x 4 x float>, <vscale x 4 x float> } @ld1_x2_f32(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ld1_x2_f32:
; CHECK: // %bb.0:
@@ -116,6 +218,23 @@ define { <vscale x 4 x float>, <vscale x 4 x float> } @ld1_x2_f32(target("aarch6
ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
}
+define { <vscale x 4 x float>, <vscale x 4 x float> } @ld1_x2_f32_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ld1_x2_f32_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ld1w { z0.s, z1.s }, pn8/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr float, ptr %ptr, i64 %index
+ %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld1.pn.x2.nxv4f32(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
+}
+
define { <vscale x 2 x double>, <vscale x 2 x double> } @ld1_x2_f64(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ld1_x2_f64:
; CHECK: // %bb.0:
@@ -132,6 +251,23 @@ define { <vscale x 2 x double>, <vscale x 2 x double> } @ld1_x2_f64(target("aarc
ret { <vscale x 2 x double>, <vscale x 2 x double> } %res
}
+define { <vscale x 2 x double>, <vscale x 2 x double> } @ld1_x2_f64_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ld1_x2_f64_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ld1d { z0.d, z1.d }, pn8/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr double, ptr %ptr, i64 %index
+ %res = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld1.pn.x2.nxv2f64(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 2 x double>, <vscale x 2 x double> } %res
+}
+
; Test to ensure we load into the correct registers for the instruction
define <vscale x 16 x i8> @ld1_x2_i8_z0_taken(target("aarch64.svcount") %pn, ptr %ptr, <vscale x 16 x i8> %val) {
; CHECK-LABEL: ld1_x2_i8_z0_taken:
@@ -154,6 +290,29 @@ define <vscale x 16 x i8> @ld1_x2_i8_z0_taken(target("aarch64.svcount") %pn, ptr
ret <vscale x 16 x i8> %res
}
+; Test to ensure we load into the correct registers for the instruction
+define <vscale x 16 x i8> @ld1_x2_i8_z0_taken_scalar(target("aarch64.svcount") %pn, ptr %ptr, <vscale x 16 x i8> %val, i64 %index) {
+; CHECK-LABEL: ld1_x2_i8_z0_taken_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ld1b { z2.b, z3.b }, pn8/z, [x0, x1]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: add z0.b, z0.b, z2.b
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i8, ptr %ptr, i64 %index
+ %ld1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %pn, ptr %base);
+ %ld1_0 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %ld1, 0
+ %res = add <vscale x 16 x i8> %val, %ld1_0
+ ret <vscale x 16 x i8> %res
+}
+
define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld1_x4_i8(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ld1_x4_i8:
; CHECK: // %bb.0:
@@ -170,6 +329,23 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
}
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld1_x4_i8_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ld1_x4_i8_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ld1b { z0.b - z3.b }, pn8/z, [x0, x1]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i8, ptr %ptr, i64 %index
+ %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld1_x4_i16(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ld1_x4_i16:
; CHECK: // %bb.0:
@@ -186,6 +362,23 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8
ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
}
+define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld1_x4_i16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ld1_x4_i16_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ld1h { z0.h - z3.h }, pn8/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i16, ptr %ptr, i64 %index
+ %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
+}
+
define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld1_x4_i32(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ld1_x4_i32:
; CHECK: // %bb.0:
@@ -202,6 +395,23 @@ define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4
ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
}
+define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld1_x4_i32_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ld1_x4_i32_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ld1w { z0.s - z3.s }, pn8/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i32, ptr %ptr, i64 %index
+ %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld1.pn.x4.nxv4i32(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
+}
+
define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld1_x4_i64(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ld1_x4_i64:
; CHECK: // %bb.0:
@@ -218,6 +428,23 @@ define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2
ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
}
+define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld1_x4_i64_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ld1_x4_i64_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ld1d { z0.d - z3.d }, pn8/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i64, ptr %ptr, i64 %index
+ %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld1.pn.x4.nxv2i64(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
+}
+
define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld1_x4_f16(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ld1_x4_f16:
; CHECK: // %bb.0:
@@ -234,6 +461,23 @@ define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale
ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
}
+define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld1_x4_f16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ld1_x4_f16_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ld1h { z0.h - z3.h }, pn8/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr half, ptr %ptr, i64 %index
+ %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld1.pn.x4.nxv8f16(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
+}
+
define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld1_x4_bf16(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ld1_x4_bf16:
; CHECK: // %bb.0:
@@ -250,6 +494,23 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <v
ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
}
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld1_x4_bf16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ld1_x4_bf16_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ld1h { z0.h - z3.h }, pn8/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr bfloat, ptr %ptr, i64 %index
+ %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld1.pn.x4.nxv8bf16(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
+}
+
define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld1_x4_f32(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ld1_x4_f32:
; CHECK: // %bb.0:
@@ -266,6 +527,23 @@ define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vsca
ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
}
+define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld1_x4_f32_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ld1_x4_f32_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ld1w { z0.s - z3.s }, pn8/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr float, ptr %ptr, i64 %index
+ %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld1.pn.x4.nxv4f32(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
+}
+
define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld1_x4_f64(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ld1_x4_f64:
; CHECK: // %bb.0:
@@ -282,6 +560,23 @@ define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <v
ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
}
+define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld1_x4_f64_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ld1_x4_f64_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ld1d { z0.d - z3.d }, pn8/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr double, ptr %ptr, i64 %index
+ %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld1.pn.x4.nxv2f64(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
+}
+
; Test to ensure we load into the correct registers for the instruction
define <vscale x 8 x i16> @ld1_x4_i16_z0_taken(target("aarch64.svcount") %pn, ptr %ptr, <vscale x 8 x i16> %val) {
; CHECK-LABEL: ld1_x4_i16_z0_taken:
@@ -304,6 +599,28 @@ define <vscale x 8 x i16> @ld1_x4_i16_z0_taken(target("aarch64.svcount") %pn, pt
ret <vscale x 8 x i16> %res
}
+; Test to ensure we load into the correct registers for the instruction
+define <vscale x 8 x i16> @ld1_x4_i16_z0_taken_scalar(target("aarch64.svcount") %pn, ptr %ptr, <vscale x 8 x i16> %val, i64 %index) {
+; CHECK-LABEL: ld1_x4_i16_z0_taken_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ld1h { z4.h - z7.h }, pn8/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: add z0.h, z0.h, z4.h
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i16, ptr %ptr, i64 %index
+ %ld1 = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %pn, ptr %base);
+ %ld1_0 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %ld1, 0
+ %res = add <vscale x 8 x i16> %val, %ld1_0
+ ret <vscale x 8 x i16> %res
+}
; == Non-temporal Multi-Vector Consecutive Loads ==
@@ -323,6 +640,23 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ldnt1_x2_i8(target("aarch64.s
ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
}
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ldnt1_x2_i8_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ldnt1_x2_i8_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ldnt1b { z0.b, z1.b }, pn8/z, [x0, x1]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i8, ptr %ptr, i64 %index
+ %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv16i8(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
define { <vscale x 8 x i16>, <vscale x 8 x i16> } @ldnt1_x2_i16(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ldnt1_x2_i16:
; CHECK: // %bb.0:
@@ -339,6 +673,23 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16> } @ldnt1_x2_i16(target("aarch64.
ret { <vscale x 8 x i16>, <vscale x 8 x i16> } %res
}
+define { <vscale x 8 x i16>, <vscale x 8 x i16> } @ldnt1_x2_i16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ldnt1_x2_i16_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ldnt1h { z0.h, z1.h }, pn8/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i16, ptr %ptr, i64 %index
+ %res = call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv8i16(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 8 x i16>, <vscale x 8 x i16> } %res
+}
+
define { <vscale x 4 x i32>, <vscale x 4 x i32> } @ldnt1_x2_i32(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ldnt1_x2_i32:
; CHECK: // %bb.0:
@@ -355,6 +706,23 @@ define { <vscale x 4 x i32>, <vscale x 4 x i32> } @ldnt1_x2_i32(target("aarch64.
ret { <vscale x 4 x i32>, <vscale x 4 x i32> } %res
}
+define { <vscale x 4 x i32>, <vscale x 4 x i32> } @ldnt1_x2_i32_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ldnt1_x2_i32_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ldnt1w { z0.s, z1.s }, pn8/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i32, ptr %ptr, i64 %index
+ %res = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv4i32(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 4 x i32>, <vscale x 4 x i32> } %res
+}
+
define { <vscale x 2 x i64>, <vscale x 2 x i64> } @ldnt1_x2_i64(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ldnt1_x2_i64:
; CHECK: // %bb.0:
@@ -371,6 +739,23 @@ define { <vscale x 2 x i64>, <vscale x 2 x i64> } @ldnt1_x2_i64(target("aarch64.
ret { <vscale x 2 x i64>, <vscale x 2 x i64> } %res
}
+define { <vscale x 2 x i64>, <vscale x 2 x i64> } @ldnt1_x2_i64_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ldnt1_x2_i64_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ldnt1d { z0.d, z1.d }, pn8/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i64, ptr %ptr, i64 %index
+ %res = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv2i64(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 2 x i64>, <vscale x 2 x i64> } %res
+}
+
define { <vscale x 8 x half>, <vscale x 8 x half> } @ldnt1_x2_f16(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ldnt1_x2_f16:
; CHECK: // %bb.0:
@@ -387,6 +772,23 @@ define { <vscale x 8 x half>, <vscale x 8 x half> } @ldnt1_x2_f16(target("aarch6
ret { <vscale x 8 x half>, <vscale x 8 x half> } %res
}
+define { <vscale x 8 x half>, <vscale x 8 x half> } @ldnt1_x2_f16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ldnt1_x2_f16_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ldnt1h { z0.h, z1.h }, pn8/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i16, ptr %ptr, i64 %index
+ %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv8f16(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 8 x half>, <vscale x 8 x half> } %res
+}
+
define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ldnt1_x2_bf16(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ldnt1_x2_bf16:
; CHECK: // %bb.0:
@@ -403,6 +805,23 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ldnt1_x2_bf16(target("a
ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
}
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ldnt1_x2_bf16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ldnt1_x2_bf16_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ldnt1h { z0.h, z1.h }, pn8/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr bfloat, ptr %ptr, i64 %index
+ %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv8bf16(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
+}
+
define { <vscale x 4 x float>, <vscale x 4 x float> } @ldnt1_x2_f32(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ldnt1_x2_f32:
; CHECK: // %bb.0:
@@ -419,6 +838,23 @@ define { <vscale x 4 x float>, <vscale x 4 x float> } @ldnt1_x2_f32(target("aarc
ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
}
+define { <vscale x 4 x float>, <vscale x 4 x float> } @ldnt1_x2_f32_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ldnt1_x2_f32_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ldnt1w { z0.s, z1.s }, pn8/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr float, ptr %ptr, i64 %index
+ %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv4f32(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
+}
+
define { <vscale x 2 x double>, <vscale x 2 x double> } @ldnt1_x2_f64(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ldnt1_x2_f64:
; CHECK: // %bb.0:
@@ -435,6 +871,23 @@ define { <vscale x 2 x double>, <vscale x 2 x double> } @ldnt1_x2_f64(target("aa
ret { <vscale x 2 x double>, <vscale x 2 x double> } %res
}
+define { <vscale x 2 x double>, <vscale x 2 x double> } @ldnt1_x2_f64_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ldnt1_x2_f64_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ldnt1d { z0.d, z1.d }, pn8/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr double, ptr %ptr, i64 %index
+ %res = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv2f64(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 2 x double>, <vscale x 2 x double> } %res
+}
+
; Test to ensure we load into the correct registers for the instruction
define <vscale x 4 x i32> @ldnt1_x2_i32_z0_taken(target("aarch64.svcount") %pn, ptr %ptr, <vscale x 4 x i32> %val) {
; CHECK-LABEL: ldnt1_x2_i32_z0_taken:
@@ -457,6 +910,29 @@ define <vscale x 4 x i32> @ldnt1_x2_i32_z0_taken(target("aarch64.svcount") %pn,
ret <vscale x 4 x i32> %res
}
+; Test to ensure we load into the correct registers for the instruction
+define <vscale x 4 x i32> @ldnt1_x2_i32_z0_taken_scalar(target("aarch64.svcount") %pn, ptr %ptr, <vscale x 4 x i32> %val, i64 %index) {
+; CHECK-LABEL: ldnt1_x2_i32_z0_taken_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ldnt1w { z2.s, z3.s }, pn8/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: add z0.s, z0.s, z2.s
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i32, ptr %ptr, i64 %index
+ %ld1 = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ldnt1.pn.x2.nxv4i32(target("aarch64.svcount") %pn, ptr %base);
+ %ld1_0 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %ld1, 0
+ %res = add <vscale x 4 x i32> %val, %ld1_0
+ ret <vscale x 4 x i32> %res
+}
+
define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ldnt1_x4_i8(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ldnt1_x4_i8:
; CHECK: // %bb.0:
@@ -473,6 +949,23 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
}
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ldnt1_x4_i8_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ldnt1_x4_i8_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ldnt1b { z0.b - z3.b }, pn8/z, [x0, x1]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i8, ptr %ptr, i64 %index
+ %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv16i8(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ldnt1_x4_i16(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ldnt1_x4_i16:
; CHECK: // %bb.0:
@@ -489,6 +982,23 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8
ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
}
+define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ldnt1_x4_i16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ldnt1_x4_i16_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ldnt1h { z0.h - z3.h }, pn8/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i16, ptr %ptr, i64 %index
+ %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv8i16(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
+}
+
define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ldnt1_x4_i32(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ldnt1_x4_i32:
; CHECK: // %bb.0:
@@ -505,6 +1015,23 @@ define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4
ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
}
+define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ldnt1_x4_i32_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ldnt1_x4_i32_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ldnt1w { z0.s - z3.s }, pn8/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i32, ptr %ptr, i64 %index
+ %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv4i32(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
+}
+
define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ldnt1_x4_i64(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ldnt1_x4_i64:
; CHECK: // %bb.0:
@@ -521,6 +1048,23 @@ define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2
ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
}
+define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ldnt1_x4_i64_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ldnt1_x4_i64_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ldnt1d { z0.d - z3.d }, pn8/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i64, ptr %ptr, i64 %index
+ %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv2i64(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
+}
+
define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ldnt1_x4_f16(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ldnt1_x4_f16:
; CHECK: // %bb.0:
@@ -537,6 +1081,23 @@ define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale
ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
}
+define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ldnt1_x4_f16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ldnt1_x4_f16_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ldnt1h { z0.h - z3.h }, pn8/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr half, ptr %ptr, i64 %index
+ %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv8f16(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
+}
+
define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ldnt1_x4_bf16(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ldnt1_x4_bf16:
; CHECK: // %bb.0:
@@ -553,6 +1114,23 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <v
ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
}
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ldnt1_x4_bf16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ldnt1_x4_bf16_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ldnt1h { z0.h - z3.h }, pn8/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr bfloat, ptr %ptr, i64 %index
+ %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv8bf16(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
+}
+
define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ldnt1_x4_f32(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ldnt1_x4_f32:
; CHECK: // %bb.0:
@@ -569,6 +1147,23 @@ define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vsca
ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
}
+define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ldnt1_x4_f32_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ldnt1_x4_f32_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ldnt1w { z0.s - z3.s }, pn8/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr float, ptr %ptr, i64 %index
+ %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv4f32(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
+}
+
define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ldnt1_x4_f64(target("aarch64.svcount") %pn, ptr %ptr) nounwind {
; CHECK-LABEL: ldnt1_x4_f64:
; CHECK: // %bb.0:
@@ -585,6 +1180,23 @@ define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <v
ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
}
+define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ldnt1_x4_f64_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind {
+; CHECK-LABEL: ldnt1_x4_f64_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ldnt1d { z0.d - z3.d }, pn8/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr double, ptr %ptr, i64 %index
+ %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv2f64(target("aarch64.svcount") %pn, ptr %base);
+ ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
+}
+
; Test to ensure we load into the correct registers for the instruction
define <vscale x 2 x i64> @ldnt1_x4_i64_z0_taken(target("aarch64.svcount") %pn, ptr %ptr, <vscale x 2 x i64> %val) {
; CHECK-LABEL: ldnt1_x4_i64_z0_taken:
@@ -607,6 +1219,29 @@ define <vscale x 2 x i64> @ldnt1_x4_i64_z0_taken(target("aarch64.svcount") %pn,
ret <vscale x 2 x i64> %res
}
+; Test to ensure we load into the correct registers for the instruction
+define <vscale x 2 x i64> @ldnt1_x4_i64_z0_taken_scalar(target("aarch64.svcount") %pn, ptr %ptr, <vscale x 2 x i64> %val, i64 %index) {
+; CHECK-LABEL: ldnt1_x4_i64_z0_taken_scalar:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: mov p8.b, p0.b
+; CHECK-NEXT: ldnt1d { z4.d - z7.d }, pn8/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: add z0.d, z0.d, z4.d
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %base = getelementptr i64, ptr %ptr, i64 %index
+ %ld1 = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ldnt1.pn.x4.nxv2i64(target("aarch64.svcount") %pn, ptr %base);
+ %ld1_0 = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %ld1, 0
+ %res = add <vscale x 2 x i64> %val, %ld1_0
+ ret <vscale x 2 x i64> %res
+}
+
declare { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld1.pn.x2.nxv2i64(target("aarch64.svcount"), ptr)
declare { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld1.pn.x2.nxv4i32(target("aarch64.svcount"), ptr)
declare { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld1.pn.x2.nxv8i16(target("aarch64.svcount"), ptr)
More information about the llvm-commits
mailing list