[llvm] r207838 - AArch64/ARM64: support indexed loads/stores on vector types.
Tim Northover
tnorthover at apple.com
Fri May 2 07:54:15 PDT 2014
Author: tnorthover
Date: Fri May 2 09:54:15 2014
New Revision: 207838
URL: http://llvm.org/viewvc/llvm-project?rev=207838&view=rev
Log:
AArch64/ARM64: support indexed loads/stores on vector types.
While post-indexed LD1/ST1 instructions do exist for vector loads,
this patch makes use of the more flexible addressing-modes in LDR/STR
instructions.
Added:
llvm/trunk/test/CodeGen/ARM64/indexed-vector-ldst.ll
Modified:
llvm/trunk/lib/Target/ARM64/ARM64AsmPrinter.cpp
llvm/trunk/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp
llvm/trunk/lib/Target/ARM64/ARM64ISelLowering.cpp
llvm/trunk/lib/Target/ARM64/ARM64InstrInfo.td
Modified: llvm/trunk/lib/Target/ARM64/ARM64AsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM64/ARM64AsmPrinter.cpp?rev=207838&r1=207837&r2=207838&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM64/ARM64AsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/ARM64/ARM64AsmPrinter.cpp Fri May 2 09:54:15 2014
@@ -427,6 +427,7 @@ static unsigned getRealIndexedOpcode(uns
switch (Opc) {
case ARM64::LDRXpre_isel: return ARM64::LDRXpre;
case ARM64::LDRWpre_isel: return ARM64::LDRWpre;
+ case ARM64::LDRQpre_isel: return ARM64::LDRQpre;
case ARM64::LDRDpre_isel: return ARM64::LDRDpre;
case ARM64::LDRSpre_isel: return ARM64::LDRSpre;
case ARM64::LDRBBpre_isel: return ARM64::LDRBBpre;
@@ -437,6 +438,7 @@ static unsigned getRealIndexedOpcode(uns
case ARM64::LDRSHXpre_isel: return ARM64::LDRSHXpre;
case ARM64::LDRSWpre_isel: return ARM64::LDRSWpre;
+ case ARM64::LDRQpost_isel: return ARM64::LDRQpost;
case ARM64::LDRDpost_isel: return ARM64::LDRDpost;
case ARM64::LDRSpost_isel: return ARM64::LDRSpost;
case ARM64::LDRXpost_isel: return ARM64::LDRXpost;
@@ -453,6 +455,7 @@ static unsigned getRealIndexedOpcode(uns
case ARM64::STRWpre_isel: return ARM64::STRWpre;
case ARM64::STRHHpre_isel: return ARM64::STRHHpre;
case ARM64::STRBBpre_isel: return ARM64::STRBBpre;
+ case ARM64::STRQpre_isel: return ARM64::STRQpre;
case ARM64::STRDpre_isel: return ARM64::STRDpre;
case ARM64::STRSpre_isel: return ARM64::STRSpre;
}
@@ -494,6 +497,7 @@ void ARM64AsmPrinter::EmitInstruction(co
case ARM64::LDRBBpre_isel:
case ARM64::LDRXpre_isel:
case ARM64::LDRWpre_isel:
+ case ARM64::LDRQpre_isel:
case ARM64::LDRDpre_isel:
case ARM64::LDRSpre_isel:
case ARM64::LDRSBWpre_isel:
@@ -501,6 +505,7 @@ void ARM64AsmPrinter::EmitInstruction(co
case ARM64::LDRSHWpre_isel:
case ARM64::LDRSHXpre_isel:
case ARM64::LDRSWpre_isel:
+ case ARM64::LDRQpost_isel:
case ARM64::LDRDpost_isel:
case ARM64::LDRSpost_isel:
case ARM64::LDRXpost_isel:
@@ -525,6 +530,7 @@ void ARM64AsmPrinter::EmitInstruction(co
case ARM64::STRWpre_isel:
case ARM64::STRHHpre_isel:
case ARM64::STRBBpre_isel:
+ case ARM64::STRQpre_isel:
case ARM64::STRDpre_isel:
case ARM64::STRSpre_isel: {
MCInst TmpInst;
Modified: llvm/trunk/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp?rev=207838&r1=207837&r2=207838&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp Fri May 2 09:54:15 2014
@@ -907,8 +907,10 @@ SDNode *ARM64DAGToDAGISel::SelectIndexed
}
} else if (VT == MVT::f32) {
Opcode = IsPre ? ARM64::LDRSpre_isel : ARM64::LDRSpost_isel;
- } else if (VT == MVT::f64) {
+ } else if (VT == MVT::f64 || VT.is64BitVector()) {
Opcode = IsPre ? ARM64::LDRDpre_isel : ARM64::LDRDpost_isel;
+ } else if (VT.is128BitVector()) {
+ Opcode = IsPre ? ARM64::LDRQpre_isel : ARM64::LDRQpost_isel;
} else
return nullptr;
SDValue Chain = LD->getChain();
Modified: llvm/trunk/lib/Target/ARM64/ARM64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM64/ARM64ISelLowering.cpp?rev=207838&r1=207837&r2=207838&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM64/ARM64ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/ARM64/ARM64ISelLowering.cpp Fri May 2 09:54:15 2014
@@ -521,6 +521,14 @@ void ARM64TargetLowering::addTypeForNEON
setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Custom);
setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Custom);
+
+ if (Subtarget->isLittleEndian()) {
+ for (unsigned im = (unsigned)ISD::PRE_INC;
+ im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
+ setIndexedLoadAction(im, VT.getSimpleVT(), Legal);
+ setIndexedStoreAction(im, VT.getSimpleVT(), Legal);
+ }
+ }
}
void ARM64TargetLowering::addDRTypeForNEON(MVT VT) {
Modified: llvm/trunk/lib/Target/ARM64/ARM64InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM64/ARM64InstrInfo.td?rev=207838&r1=207837&r2=207838&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM64/ARM64InstrInfo.td (original)
+++ llvm/trunk/lib/Target/ARM64/ARM64InstrInfo.td Fri May 2 09:54:15 2014
@@ -1503,6 +1503,7 @@ def LDRHHpre : LoadPreIdx<0b01, 0, 0b01,
def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64, "ldrsw">;
// ISel pseudos and patterns. See expanded comment on LoadPreIdxPseudo.
+def LDRQpre_isel : LoadPreIdxPseudo<FPR128>;
def LDRDpre_isel : LoadPreIdxPseudo<FPR64>;
def LDRSpre_isel : LoadPreIdxPseudo<FPR32>;
def LDRXpre_isel : LoadPreIdxPseudo<GPR64>;
@@ -1542,6 +1543,7 @@ def LDRHHpost : LoadPostIdx<0b01, 0, 0b0
def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64, "ldrsw">;
// ISel pseudos and patterns. See expanded comment on LoadPostIdxPseudo.
+def LDRQpost_isel : LoadPostIdxPseudo<FPR128>;
def LDRDpost_isel : LoadPostIdxPseudo<FPR64>;
def LDRSpost_isel : LoadPostIdxPseudo<FPR32>;
def LDRXpost_isel : LoadPostIdxPseudo<GPR64>;
@@ -1812,6 +1814,7 @@ def STRBBpre : StorePreIdx<0b00, 0, 0b00
def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32, "strh">;
// ISel pseudos and patterns. See expanded comment on StorePreIdxPseudo.
+defm STRQpre : StorePreIdxPseudo<FPR128, f128, pre_store>;
defm STRDpre : StorePreIdxPseudo<FPR64, f64, pre_store>;
defm STRSpre : StorePreIdxPseudo<FPR32, f32, pre_store>;
defm STRXpre : StorePreIdxPseudo<GPR64, i64, pre_store>;
@@ -1829,6 +1832,32 @@ def : Pat<(pre_truncsti8 GPR64:$Rt, am_n
(STRBBpre_isel (EXTRACT_SUBREG GPR64:$Rt, sub_32), am_noindex:$addr,
simm9:$off)>;
+def : Pat<(pre_store (v8i8 FPR64:$Rt), am_noindex:$addr, simm9:$off),
+ (STRDpre_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(pre_store (v4i16 FPR64:$Rt), am_noindex:$addr, simm9:$off),
+ (STRDpre_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(pre_store (v2i32 FPR64:$Rt), am_noindex:$addr, simm9:$off),
+ (STRDpre_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(pre_store (v2f32 FPR64:$Rt), am_noindex:$addr, simm9:$off),
+ (STRDpre_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(pre_store (v1i64 FPR64:$Rt), am_noindex:$addr, simm9:$off),
+ (STRDpre_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(pre_store (v1f64 FPR64:$Rt), am_noindex:$addr, simm9:$off),
+ (STRDpre_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
+
+def : Pat<(pre_store (v16i8 FPR128:$Rt), am_noindex:$addr, simm9:$off),
+ (STRQpre_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(pre_store (v8i16 FPR128:$Rt), am_noindex:$addr, simm9:$off),
+ (STRQpre_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(pre_store (v4i32 FPR128:$Rt), am_noindex:$addr, simm9:$off),
+ (STRQpre_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(pre_store (v4f32 FPR128:$Rt), am_noindex:$addr, simm9:$off),
+ (STRQpre_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(pre_store (v2i64 FPR128:$Rt), am_noindex:$addr, simm9:$off),
+ (STRQpre_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(pre_store (v2f64 FPR128:$Rt), am_noindex:$addr, simm9:$off),
+ (STRQpre_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
+
//---
// (immediate post-indexed)
def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32, "str">;
@@ -1843,6 +1872,7 @@ def STRBBpost : StorePostIdx<0b00, 0, 0b
def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32, "strh">;
// ISel pseudos and patterns. See expanded comment on StorePostIdxPseudo.
+defm STRQpost : StorePostIdxPseudo<FPR128, f128, post_store, STRQpost>;
defm STRDpost : StorePostIdxPseudo<FPR64, f64, post_store, STRDpost>;
defm STRSpost : StorePostIdxPseudo<FPR32, f32, post_store, STRSpost>;
defm STRXpost : StorePostIdxPseudo<GPR64, i64, post_store, STRXpost>;
@@ -1860,6 +1890,31 @@ def : Pat<(post_truncsti8 GPR64:$Rt, am_
(STRBBpost_isel (EXTRACT_SUBREG GPR64:$Rt, sub_32), am_noindex:$addr,
simm9:$off)>;
+def : Pat<(post_store (v8i8 FPR64:$Rt), am_noindex:$addr, simm9:$off),
+ (STRDpost_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(post_store (v4i16 FPR64:$Rt), am_noindex:$addr, simm9:$off),
+ (STRDpost_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(post_store (v2i32 FPR64:$Rt), am_noindex:$addr, simm9:$off),
+ (STRDpost_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(post_store (v2f32 FPR64:$Rt), am_noindex:$addr, simm9:$off),
+ (STRDpost_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(post_store (v1i64 FPR64:$Rt), am_noindex:$addr, simm9:$off),
+ (STRDpost_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(post_store (v1f64 FPR64:$Rt), am_noindex:$addr, simm9:$off),
+ (STRDpost_isel FPR64:$Rt, am_noindex:$addr, simm9:$off)>;
+
+def : Pat<(post_store (v16i8 FPR128:$Rt), am_noindex:$addr, simm9:$off),
+ (STRQpost_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(post_store (v8i16 FPR128:$Rt), am_noindex:$addr, simm9:$off),
+ (STRQpost_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(post_store (v4i32 FPR128:$Rt), am_noindex:$addr, simm9:$off),
+ (STRQpost_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(post_store (v4f32 FPR128:$Rt), am_noindex:$addr, simm9:$off),
+ (STRQpost_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(post_store (v2i64 FPR128:$Rt), am_noindex:$addr, simm9:$off),
+ (STRQpost_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
+def : Pat<(post_store (v2f64 FPR128:$Rt), am_noindex:$addr, simm9:$off),
+ (STRQpost_isel FPR128:$Rt, am_noindex:$addr, simm9:$off)>;
//===----------------------------------------------------------------------===//
// Load/store exclusive instructions.
Added: llvm/trunk/test/CodeGen/ARM64/indexed-vector-ldst.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM64/indexed-vector-ldst.ll?rev=207838&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM64/indexed-vector-ldst.ll (added)
+++ llvm/trunk/test/CodeGen/ARM64/indexed-vector-ldst.ll Fri May 2 09:54:15 2014
@@ -0,0 +1,402 @@
+; RUN: llc -mtriple=arm64-apple-ios7.0 -o - %s | FileCheck %s
+
+ at ptr = global i8* null
+
+define <8 x i8> @test_v8i8_pre_load(<8 x i8>* %addr) {
+; CHECK-LABEL: test_v8i8_pre_load:
+; CHECK: ldr d0, [x0, #40]!
+ %newaddr = getelementptr <8 x i8>* %addr, i32 5
+ %val = load <8 x i8>* %newaddr, align 8
+ store <8 x i8>* %newaddr, <8 x i8>** bitcast(i8** @ptr to <8 x i8>**)
+ ret <8 x i8> %val
+}
+
+define <8 x i8> @test_v8i8_post_load(<8 x i8>* %addr) {
+; CHECK-LABEL: test_v8i8_post_load:
+; CHECK: ldr d0, [x0], #40
+ %newaddr = getelementptr <8 x i8>* %addr, i32 5
+ %val = load <8 x i8>* %addr, align 8
+ store <8 x i8>* %newaddr, <8 x i8>** bitcast(i8** @ptr to <8 x i8>**)
+ ret <8 x i8> %val
+}
+
+define void @test_v8i8_pre_store(<8 x i8> %in, <8 x i8>* %addr) {
+; CHECK-LABEL: test_v8i8_pre_store:
+; CHECK: str d0, [x0, #40]!
+ %newaddr = getelementptr <8 x i8>* %addr, i32 5
+ store <8 x i8> %in, <8 x i8>* %newaddr, align 8
+ store <8 x i8>* %newaddr, <8 x i8>** bitcast(i8** @ptr to <8 x i8>**)
+ ret void
+}
+
+define void @test_v8i8_post_store(<8 x i8> %in, <8 x i8>* %addr) {
+; CHECK-LABEL: test_v8i8_post_store:
+; CHECK: str d0, [x0], #40
+ %newaddr = getelementptr <8 x i8>* %addr, i32 5
+ store <8 x i8> %in, <8 x i8>* %addr, align 8
+ store <8 x i8>* %newaddr, <8 x i8>** bitcast(i8** @ptr to <8 x i8>**)
+ ret void
+}
+
+define <4 x i16> @test_v4i16_pre_load(<4 x i16>* %addr) {
+; CHECK-LABEL: test_v4i16_pre_load:
+; CHECK: ldr d0, [x0, #40]!
+ %newaddr = getelementptr <4 x i16>* %addr, i32 5
+ %val = load <4 x i16>* %newaddr, align 8
+ store <4 x i16>* %newaddr, <4 x i16>** bitcast(i8** @ptr to <4 x i16>**)
+ ret <4 x i16> %val
+}
+
+define <4 x i16> @test_v4i16_post_load(<4 x i16>* %addr) {
+; CHECK-LABEL: test_v4i16_post_load:
+; CHECK: ldr d0, [x0], #40
+ %newaddr = getelementptr <4 x i16>* %addr, i32 5
+ %val = load <4 x i16>* %addr, align 8
+ store <4 x i16>* %newaddr, <4 x i16>** bitcast(i8** @ptr to <4 x i16>**)
+ ret <4 x i16> %val
+}
+
+define void @test_v4i16_pre_store(<4 x i16> %in, <4 x i16>* %addr) {
+; CHECK-LABEL: test_v4i16_pre_store:
+; CHECK: str d0, [x0, #40]!
+ %newaddr = getelementptr <4 x i16>* %addr, i32 5
+ store <4 x i16> %in, <4 x i16>* %newaddr, align 8
+ store <4 x i16>* %newaddr, <4 x i16>** bitcast(i8** @ptr to <4 x i16>**)
+ ret void
+}
+
+define void @test_v4i16_post_store(<4 x i16> %in, <4 x i16>* %addr) {
+; CHECK-LABEL: test_v4i16_post_store:
+; CHECK: str d0, [x0], #40
+ %newaddr = getelementptr <4 x i16>* %addr, i32 5
+ store <4 x i16> %in, <4 x i16>* %addr, align 8
+ store <4 x i16>* %newaddr, <4 x i16>** bitcast(i8** @ptr to <4 x i16>**)
+ ret void
+}
+
+define <2 x i32> @test_v2i32_pre_load(<2 x i32>* %addr) {
+; CHECK-LABEL: test_v2i32_pre_load:
+; CHECK: ldr d0, [x0, #40]!
+ %newaddr = getelementptr <2 x i32>* %addr, i32 5
+ %val = load <2 x i32>* %newaddr, align 8
+ store <2 x i32>* %newaddr, <2 x i32>** bitcast(i8** @ptr to <2 x i32>**)
+ ret <2 x i32> %val
+}
+
+define <2 x i32> @test_v2i32_post_load(<2 x i32>* %addr) {
+; CHECK-LABEL: test_v2i32_post_load:
+; CHECK: ldr d0, [x0], #40
+ %newaddr = getelementptr <2 x i32>* %addr, i32 5
+ %val = load <2 x i32>* %addr, align 8
+ store <2 x i32>* %newaddr, <2 x i32>** bitcast(i8** @ptr to <2 x i32>**)
+ ret <2 x i32> %val
+}
+
+define void @test_v2i32_pre_store(<2 x i32> %in, <2 x i32>* %addr) {
+; CHECK-LABEL: test_v2i32_pre_store:
+; CHECK: str d0, [x0, #40]!
+ %newaddr = getelementptr <2 x i32>* %addr, i32 5
+ store <2 x i32> %in, <2 x i32>* %newaddr, align 8
+ store <2 x i32>* %newaddr, <2 x i32>** bitcast(i8** @ptr to <2 x i32>**)
+ ret void
+}
+
+define void @test_v2i32_post_store(<2 x i32> %in, <2 x i32>* %addr) {
+; CHECK-LABEL: test_v2i32_post_store:
+; CHECK: str d0, [x0], #40
+ %newaddr = getelementptr <2 x i32>* %addr, i32 5
+ store <2 x i32> %in, <2 x i32>* %addr, align 8
+ store <2 x i32>* %newaddr, <2 x i32>** bitcast(i8** @ptr to <2 x i32>**)
+ ret void
+}
+
+define <2 x float> @test_v2f32_pre_load(<2 x float>* %addr) {
+; CHECK-LABEL: test_v2f32_pre_load:
+; CHECK: ldr d0, [x0, #40]!
+ %newaddr = getelementptr <2 x float>* %addr, i32 5
+ %val = load <2 x float>* %newaddr, align 8
+ store <2 x float>* %newaddr, <2 x float>** bitcast(i8** @ptr to <2 x float>**)
+ ret <2 x float> %val
+}
+
+define <2 x float> @test_v2f32_post_load(<2 x float>* %addr) {
+; CHECK-LABEL: test_v2f32_post_load:
+; CHECK: ldr d0, [x0], #40
+ %newaddr = getelementptr <2 x float>* %addr, i32 5
+ %val = load <2 x float>* %addr, align 8
+ store <2 x float>* %newaddr, <2 x float>** bitcast(i8** @ptr to <2 x float>**)
+ ret <2 x float> %val
+}
+
+define void @test_v2f32_pre_store(<2 x float> %in, <2 x float>* %addr) {
+; CHECK-LABEL: test_v2f32_pre_store:
+; CHECK: str d0, [x0, #40]!
+ %newaddr = getelementptr <2 x float>* %addr, i32 5
+ store <2 x float> %in, <2 x float>* %newaddr, align 8
+ store <2 x float>* %newaddr, <2 x float>** bitcast(i8** @ptr to <2 x float>**)
+ ret void
+}
+
+define void @test_v2f32_post_store(<2 x float> %in, <2 x float>* %addr) {
+; CHECK-LABEL: test_v2f32_post_store:
+; CHECK: str d0, [x0], #40
+ %newaddr = getelementptr <2 x float>* %addr, i32 5
+ store <2 x float> %in, <2 x float>* %addr, align 8
+ store <2 x float>* %newaddr, <2 x float>** bitcast(i8** @ptr to <2 x float>**)
+ ret void
+}
+
+define <1 x i64> @test_v1i64_pre_load(<1 x i64>* %addr) {
+; CHECK-LABEL: test_v1i64_pre_load:
+; CHECK: ldr d0, [x0, #40]!
+ %newaddr = getelementptr <1 x i64>* %addr, i32 5
+ %val = load <1 x i64>* %newaddr, align 8
+ store <1 x i64>* %newaddr, <1 x i64>** bitcast(i8** @ptr to <1 x i64>**)
+ ret <1 x i64> %val
+}
+
+define <1 x i64> @test_v1i64_post_load(<1 x i64>* %addr) {
+; CHECK-LABEL: test_v1i64_post_load:
+; CHECK: ldr d0, [x0], #40
+ %newaddr = getelementptr <1 x i64>* %addr, i32 5
+ %val = load <1 x i64>* %addr, align 8
+ store <1 x i64>* %newaddr, <1 x i64>** bitcast(i8** @ptr to <1 x i64>**)
+ ret <1 x i64> %val
+}
+
+define void @test_v1i64_pre_store(<1 x i64> %in, <1 x i64>* %addr) {
+; CHECK-LABEL: test_v1i64_pre_store:
+; CHECK: str d0, [x0, #40]!
+ %newaddr = getelementptr <1 x i64>* %addr, i32 5
+ store <1 x i64> %in, <1 x i64>* %newaddr, align 8
+ store <1 x i64>* %newaddr, <1 x i64>** bitcast(i8** @ptr to <1 x i64>**)
+ ret void
+}
+
+define void @test_v1i64_post_store(<1 x i64> %in, <1 x i64>* %addr) {
+; CHECK-LABEL: test_v1i64_post_store:
+; CHECK: str d0, [x0], #40
+ %newaddr = getelementptr <1 x i64>* %addr, i32 5
+ store <1 x i64> %in, <1 x i64>* %addr, align 8
+ store <1 x i64>* %newaddr, <1 x i64>** bitcast(i8** @ptr to <1 x i64>**)
+ ret void
+}
+
+define <16 x i8> @test_v16i8_pre_load(<16 x i8>* %addr) {
+; CHECK-LABEL: test_v16i8_pre_load:
+; CHECK: ldr q0, [x0, #80]!
+ %newaddr = getelementptr <16 x i8>* %addr, i32 5
+ %val = load <16 x i8>* %newaddr, align 8
+ store <16 x i8>* %newaddr, <16 x i8>** bitcast(i8** @ptr to <16 x i8>**)
+ ret <16 x i8> %val
+}
+
+define <16 x i8> @test_v16i8_post_load(<16 x i8>* %addr) {
+; CHECK-LABEL: test_v16i8_post_load:
+; CHECK: ldr q0, [x0], #80
+ %newaddr = getelementptr <16 x i8>* %addr, i32 5
+ %val = load <16 x i8>* %addr, align 8
+ store <16 x i8>* %newaddr, <16 x i8>** bitcast(i8** @ptr to <16 x i8>**)
+ ret <16 x i8> %val
+}
+
+define void @test_v16i8_pre_store(<16 x i8> %in, <16 x i8>* %addr) {
+; CHECK-LABEL: test_v16i8_pre_store:
+; CHECK: str q0, [x0, #80]!
+ %newaddr = getelementptr <16 x i8>* %addr, i32 5
+ store <16 x i8> %in, <16 x i8>* %newaddr, align 8
+ store <16 x i8>* %newaddr, <16 x i8>** bitcast(i8** @ptr to <16 x i8>**)
+ ret void
+}
+
+define void @test_v16i8_post_store(<16 x i8> %in, <16 x i8>* %addr) {
+; CHECK-LABEL: test_v16i8_post_store:
+; CHECK: str q0, [x0], #80
+ %newaddr = getelementptr <16 x i8>* %addr, i32 5
+ store <16 x i8> %in, <16 x i8>* %addr, align 8
+ store <16 x i8>* %newaddr, <16 x i8>** bitcast(i8** @ptr to <16 x i8>**)
+ ret void
+}
+
+define <8 x i16> @test_v8i16_pre_load(<8 x i16>* %addr) {
+; CHECK-LABEL: test_v8i16_pre_load:
+; CHECK: ldr q0, [x0, #80]!
+ %newaddr = getelementptr <8 x i16>* %addr, i32 5
+ %val = load <8 x i16>* %newaddr, align 8
+ store <8 x i16>* %newaddr, <8 x i16>** bitcast(i8** @ptr to <8 x i16>**)
+ ret <8 x i16> %val
+}
+
+define <8 x i16> @test_v8i16_post_load(<8 x i16>* %addr) {
+; CHECK-LABEL: test_v8i16_post_load:
+; CHECK: ldr q0, [x0], #80
+ %newaddr = getelementptr <8 x i16>* %addr, i32 5
+ %val = load <8 x i16>* %addr, align 8
+ store <8 x i16>* %newaddr, <8 x i16>** bitcast(i8** @ptr to <8 x i16>**)
+ ret <8 x i16> %val
+}
+
+define void @test_v8i16_pre_store(<8 x i16> %in, <8 x i16>* %addr) {
+; CHECK-LABEL: test_v8i16_pre_store:
+; CHECK: str q0, [x0, #80]!
+ %newaddr = getelementptr <8 x i16>* %addr, i32 5
+ store <8 x i16> %in, <8 x i16>* %newaddr, align 8
+ store <8 x i16>* %newaddr, <8 x i16>** bitcast(i8** @ptr to <8 x i16>**)
+ ret void
+}
+
+define void @test_v8i16_post_store(<8 x i16> %in, <8 x i16>* %addr) {
+; CHECK-LABEL: test_v8i16_post_store:
+; CHECK: str q0, [x0], #80
+ %newaddr = getelementptr <8 x i16>* %addr, i32 5
+ store <8 x i16> %in, <8 x i16>* %addr, align 8
+ store <8 x i16>* %newaddr, <8 x i16>** bitcast(i8** @ptr to <8 x i16>**)
+ ret void
+}
+
+define <4 x i32> @test_v4i32_pre_load(<4 x i32>* %addr) {
+; CHECK-LABEL: test_v4i32_pre_load:
+; CHECK: ldr q0, [x0, #80]!
+ %newaddr = getelementptr <4 x i32>* %addr, i32 5
+ %val = load <4 x i32>* %newaddr, align 8
+ store <4 x i32>* %newaddr, <4 x i32>** bitcast(i8** @ptr to <4 x i32>**)
+ ret <4 x i32> %val
+}
+
+define <4 x i32> @test_v4i32_post_load(<4 x i32>* %addr) {
+; CHECK-LABEL: test_v4i32_post_load:
+; CHECK: ldr q0, [x0], #80
+ %newaddr = getelementptr <4 x i32>* %addr, i32 5
+ %val = load <4 x i32>* %addr, align 8
+ store <4 x i32>* %newaddr, <4 x i32>** bitcast(i8** @ptr to <4 x i32>**)
+ ret <4 x i32> %val
+}
+
+define void @test_v4i32_pre_store(<4 x i32> %in, <4 x i32>* %addr) {
+; CHECK-LABEL: test_v4i32_pre_store:
+; CHECK: str q0, [x0, #80]!
+ %newaddr = getelementptr <4 x i32>* %addr, i32 5
+ store <4 x i32> %in, <4 x i32>* %newaddr, align 8
+ store <4 x i32>* %newaddr, <4 x i32>** bitcast(i8** @ptr to <4 x i32>**)
+ ret void
+}
+
+define void @test_v4i32_post_store(<4 x i32> %in, <4 x i32>* %addr) {
+; CHECK-LABEL: test_v4i32_post_store:
+; CHECK: str q0, [x0], #80
+ %newaddr = getelementptr <4 x i32>* %addr, i32 5
+ store <4 x i32> %in, <4 x i32>* %addr, align 8
+ store <4 x i32>* %newaddr, <4 x i32>** bitcast(i8** @ptr to <4 x i32>**)
+ ret void
+}
+
+
+define <4 x float> @test_v4f32_pre_load(<4 x float>* %addr) {
+; CHECK-LABEL: test_v4f32_pre_load:
+; CHECK: ldr q0, [x0, #80]!
+ %newaddr = getelementptr <4 x float>* %addr, i32 5
+ %val = load <4 x float>* %newaddr, align 8
+ store <4 x float>* %newaddr, <4 x float>** bitcast(i8** @ptr to <4 x float>**)
+ ret <4 x float> %val
+}
+
+define <4 x float> @test_v4f32_post_load(<4 x float>* %addr) {
+; CHECK-LABEL: test_v4f32_post_load:
+; CHECK: ldr q0, [x0], #80
+ %newaddr = getelementptr <4 x float>* %addr, i32 5
+ %val = load <4 x float>* %addr, align 8
+ store <4 x float>* %newaddr, <4 x float>** bitcast(i8** @ptr to <4 x float>**)
+ ret <4 x float> %val
+}
+
+define void @test_v4f32_pre_store(<4 x float> %in, <4 x float>* %addr) {
+; CHECK-LABEL: test_v4f32_pre_store:
+; CHECK: str q0, [x0, #80]!
+ %newaddr = getelementptr <4 x float>* %addr, i32 5
+ store <4 x float> %in, <4 x float>* %newaddr, align 8
+ store <4 x float>* %newaddr, <4 x float>** bitcast(i8** @ptr to <4 x float>**)
+ ret void
+}
+
+define void @test_v4f32_post_store(<4 x float> %in, <4 x float>* %addr) {
+; CHECK-LABEL: test_v4f32_post_store:
+; CHECK: str q0, [x0], #80
+ %newaddr = getelementptr <4 x float>* %addr, i32 5
+ store <4 x float> %in, <4 x float>* %addr, align 8
+ store <4 x float>* %newaddr, <4 x float>** bitcast(i8** @ptr to <4 x float>**)
+ ret void
+}
+
+
+define <2 x i64> @test_v2i64_pre_load(<2 x i64>* %addr) {
+; CHECK-LABEL: test_v2i64_pre_load:
+; CHECK: ldr q0, [x0, #80]!
+ %newaddr = getelementptr <2 x i64>* %addr, i32 5
+ %val = load <2 x i64>* %newaddr, align 8
+ store <2 x i64>* %newaddr, <2 x i64>** bitcast(i8** @ptr to <2 x i64>**)
+ ret <2 x i64> %val
+}
+
+define <2 x i64> @test_v2i64_post_load(<2 x i64>* %addr) {
+; CHECK-LABEL: test_v2i64_post_load:
+; CHECK: ldr q0, [x0], #80
+ %newaddr = getelementptr <2 x i64>* %addr, i32 5
+ %val = load <2 x i64>* %addr, align 8
+ store <2 x i64>* %newaddr, <2 x i64>** bitcast(i8** @ptr to <2 x i64>**)
+ ret <2 x i64> %val
+}
+
+define void @test_v2i64_pre_store(<2 x i64> %in, <2 x i64>* %addr) {
+; CHECK-LABEL: test_v2i64_pre_store:
+; CHECK: str q0, [x0, #80]!
+ %newaddr = getelementptr <2 x i64>* %addr, i32 5
+ store <2 x i64> %in, <2 x i64>* %newaddr, align 8
+ store <2 x i64>* %newaddr, <2 x i64>** bitcast(i8** @ptr to <2 x i64>**)
+ ret void
+}
+
+define void @test_v2i64_post_store(<2 x i64> %in, <2 x i64>* %addr) {
+; CHECK-LABEL: test_v2i64_post_store:
+; CHECK: str q0, [x0], #80
+ %newaddr = getelementptr <2 x i64>* %addr, i32 5
+ store <2 x i64> %in, <2 x i64>* %addr, align 8
+ store <2 x i64>* %newaddr, <2 x i64>** bitcast(i8** @ptr to <2 x i64>**)
+ ret void
+}
+
+
+define <2 x double> @test_v2f64_pre_load(<2 x double>* %addr) {
+; CHECK-LABEL: test_v2f64_pre_load:
+; CHECK: ldr q0, [x0, #80]!
+ %newaddr = getelementptr <2 x double>* %addr, i32 5
+ %val = load <2 x double>* %newaddr, align 8
+ store <2 x double>* %newaddr, <2 x double>** bitcast(i8** @ptr to <2 x double>**)
+ ret <2 x double> %val
+}
+
+define <2 x double> @test_v2f64_post_load(<2 x double>* %addr) {
+; CHECK-LABEL: test_v2f64_post_load:
+; CHECK: ldr q0, [x0], #80
+ %newaddr = getelementptr <2 x double>* %addr, i32 5
+ %val = load <2 x double>* %addr, align 8
+ store <2 x double>* %newaddr, <2 x double>** bitcast(i8** @ptr to <2 x double>**)
+ ret <2 x double> %val
+}
+
+define void @test_v2f64_pre_store(<2 x double> %in, <2 x double>* %addr) {
+; CHECK-LABEL: test_v2f64_pre_store:
+; CHECK: str q0, [x0, #80]!
+ %newaddr = getelementptr <2 x double>* %addr, i32 5
+ store <2 x double> %in, <2 x double>* %newaddr, align 8
+ store <2 x double>* %newaddr, <2 x double>** bitcast(i8** @ptr to <2 x double>**)
+ ret void
+}
+
+define void @test_v2f64_post_store(<2 x double> %in, <2 x double>* %addr) {
+; CHECK-LABEL: test_v2f64_post_store:
+; CHECK: str q0, [x0], #80
+ %newaddr = getelementptr <2 x double>* %addr, i32 5
+ store <2 x double> %in, <2 x double>* %addr, align 8
+ store <2 x double>* %newaddr, <2 x double>** bitcast(i8** @ptr to <2 x double>**)
+ ret void
+}
More information about the llvm-commits
mailing list