[llvm] r329611 - AArch64: Allow offsets to be folded into addresses with ELF.
Peter Collingbourne via llvm-commits
llvm-commits at lists.llvm.org
Mon Apr 9 12:59:57 PDT 2018
Author: pcc
Date: Mon Apr 9 12:59:57 2018
New Revision: 329611
URL: http://llvm.org/viewvc/llvm-project?rev=329611&view=rev
Log:
AArch64: Allow offsets to be folded into addresses with ELF.
This is a code size win in code that takes offseted addresses
frequently, such as C++ constructors that typically need to compute
an offseted address of a vtable. It reduces the size of Chromium for
Android's .text section by 46KB, or 56KB with ThinLTO (which exposes
more opportunities to use a direct access rather than a GOT access).
Because the addend range is limited in COFF and Mach-O, this is
enabled for ELF only.
Differential Revision: https://reviews.llvm.org/D45199
Modified:
llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/trunk/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll
llvm/trunk/test/CodeGen/AArch64/arm64-addrmode.ll
llvm/trunk/test/CodeGen/AArch64/arm64-atomic-128.ll
llvm/trunk/test/CodeGen/AArch64/arm64-memcpy-inline.ll
llvm/trunk/test/CodeGen/AArch64/arm64-misched-multimmo.ll
llvm/trunk/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vector-ldst.ll
llvm/trunk/test/CodeGen/AArch64/cmpxchg-O0.ll
llvm/trunk/test/CodeGen/AArch64/func-argpassing.ll
llvm/trunk/test/CodeGen/AArch64/func-calls.ll
llvm/trunk/test/CodeGen/AArch64/misched-fusion-addr.ll
Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp?rev=329611&r1=329610&r2=329611&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp Mon Apr 9 12:59:57 2018
@@ -743,14 +743,16 @@ bool AArch64DAGToDAGISel::SelectAddrMode
if (!GAN)
return true;
- const GlobalValue *GV = GAN->getGlobal();
- unsigned Alignment = GV->getAlignment();
- Type *Ty = GV->getValueType();
- if (Alignment == 0 && Ty->isSized())
- Alignment = DL.getABITypeAlignment(Ty);
+ if (GAN->getOffset() % Size == 0) {
+ const GlobalValue *GV = GAN->getGlobal();
+ unsigned Alignment = GV->getAlignment();
+ Type *Ty = GV->getValueType();
+ if (Alignment == 0 && Ty->isSized())
+ Alignment = DL.getABITypeAlignment(Ty);
- if (Alignment >= Size)
- return true;
+ if (Alignment >= Size)
+ return true;
+ }
}
if (CurDAG->isBaseWithConstantOffset(N)) {
Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp?rev=329611&r1=329610&r2=329611&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp Mon Apr 9 12:59:57 2018
@@ -3675,7 +3675,8 @@ AArch64TargetLowering::LowerReturn(SDVal
SDValue AArch64TargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
SelectionDAG &DAG,
unsigned Flag) const {
- return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty, 0, Flag);
+ return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty,
+ N->getOffset(), Flag);
}
SDValue AArch64TargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
@@ -3749,9 +3750,9 @@ SDValue AArch64TargetLowering::LowerGlob
: AArch64II::MO_NO_FLAG);
unsigned char OpFlags =
Subtarget->ClassifyGlobalReference(GV, getTargetMachine());
-
- assert(cast<GlobalAddressSDNode>(Op)->getOffset() == 0 &&
- "unexpected offset in global node");
+ if (OpFlags != AArch64II::MO_NO_FLAG)
+ assert(cast<GlobalAddressSDNode>(Op)->getOffset() == 0 &&
+ "unexpected offset in global node");
// This also catches the large code model case for Darwin.
if ((OpFlags & AArch64II::MO_GOT) != 0) {
@@ -4989,11 +4990,15 @@ SDValue AArch64TargetLowering::LowerShif
bool AArch64TargetLowering::isOffsetFoldingLegal(
const GlobalAddressSDNode *GA) const {
- DEBUG(dbgs() << "Skipping offset folding global address: ");
- DEBUG(GA->dump());
- DEBUG(dbgs() << "AArch64 doesn't support folding offsets into global "
- "addresses\n");
- return false;
+ // FIXME: Only ELF can represent the full range of possible addends here, as
+ // the format stores the addend in a 64-bit field. With Mach-O the equivalent
+ // field is 24 bits, and with COFF it is 21 bits. To make this work with the
+ // other object formats we will need to arrange to prevent the addend from
+ // going out of bounds.
+ if (!getTargetMachine().getTargetTriple().isOSBinFormatELF())
+ return false;
+ return Subtarget->ClassifyGlobalReference(
+ GA->getGlobal(), getTargetMachine()) == AArch64II::MO_NO_FLAG;
}
bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll?rev=329611&r1=329610&r2=329611&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll Mon Apr 9 12:59:57 2018
@@ -6,12 +6,13 @@
; The important thing for this test is that we need an unaligned load of `l_b'
; ("ldr w2, [x1, #8]" in this case).
-; CHECK: adrp x[[PAGE:[0-9]+]], {{l_b at PAGE|.Lb}}
-; CHECK: add x[[ADDR:[0-9]+]], x[[PAGE]], {{l_b at PAGEOFF|:lo12:.Lb}}
-; CHECK-NEXT: ldr [[VAL:w[0-9]+]], [x[[ADDR]], #8]
-; CHECK-NEXT: str [[VAL]], [x0, #8]
-; CHECK-NEXT: ldr [[VAL2:x[0-9]+]], [x[[ADDR]]]
-; CHECK-NEXT: str [[VAL2]], [x0]
+; CHECK: adrp x[[PAGE:[0-9]+]], .Lb+8
+; CHECK-NEXT: ldr [[VAL:w[0-9]+]], [x[[PAGE]], :lo12:.Lb+8]
+; CHECK-NEXT: str [[VAL]], [x0, #8]
+; CHECK-NEXT: adrp x[[ADDR:[0-9]+]], .Lb
+; CHECK-NEXT: add x[[ADDR]], x[[ADDR]], :lo12:.Lb
+; CHECK-NEXT: ldr [[VAL2:x[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: str [[VAL2]], [x0]
define void @foo(i8* %a) {
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a, i8* align 4 bitcast ([3 x i32]* @b to i8*), i64 12, i1 false)
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-addrmode.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-addrmode.ll?rev=329611&r1=329610&r2=329611&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-addrmode.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-addrmode.ll Mon Apr 9 12:59:57 2018
@@ -5,32 +5,31 @@
; base + offset (imm9)
; CHECK: @t1
-; CHECK: ldr xzr, [x{{[0-9]+}}, #8]
+; CHECK: ldr xzr, [x0, #8]
; CHECK: ret
-define void @t1() {
- %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 1
+define void @t1(i64* %object) {
+ %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 1
%tmp = load volatile i64, i64* %incdec.ptr, align 8
ret void
}
; base + offset (> imm9)
; CHECK: @t2
-; CHECK: sub [[ADDREG:x[0-9]+]], x{{[0-9]+}}, #264
+; CHECK: sub [[ADDREG:x[0-9]+]], x0, #264
; CHECK: ldr xzr, [
-; CHECK: [[ADDREG]]]
; CHECK: ret
-define void @t2() {
- %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 -33
+define void @t2(i64* %object) {
+ %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 -33
%tmp = load volatile i64, i64* %incdec.ptr, align 8
ret void
}
; base + unsigned offset (> imm9 and <= imm12 * size of type in bytes)
; CHECK: @t3
-; CHECK: ldr xzr, [x{{[0-9]+}}, #32760]
+; CHECK: ldr xzr, [x0, #32760]
; CHECK: ret
-define void @t3() {
- %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 4095
+define void @t3(i64* %object) {
+ %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 4095
%tmp = load volatile i64, i64* %incdec.ptr, align 8
ret void
}
@@ -38,10 +37,10 @@ define void @t3() {
; base + unsigned offset (> imm12 * size of type in bytes)
; CHECK: @t4
; CHECK: orr w[[NUM:[0-9]+]], wzr, #0x8000
-; CHECK: ldr xzr, [x{{[0-9]+}}, x[[NUM]]]
+; CHECK: ldr xzr, [x0, x[[NUM]]]
; CHECK: ret
-define void @t4() {
- %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 4096
+define void @t4(i64* %object) {
+ %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 4096
%tmp = load volatile i64, i64* %incdec.ptr, align 8
ret void
}
@@ -58,12 +57,12 @@ define void @t5(i64 %a) {
; base + reg + imm
; CHECK: @t6
-; CHECK: add [[ADDREG:x[0-9]+]], x{{[0-9]+}}, x{{[0-9]+}}, lsl #3
+; CHECK: add [[ADDREG:x[0-9]+]], x1, x0, lsl #3
; CHECK-NEXT: orr w[[NUM:[0-9]+]], wzr, #0x8000
; CHECK: ldr xzr, [x{{[0-9]+}}, x[[NUM]]]
; CHECK: ret
-define void @t6(i64 %a) {
- %tmp1 = getelementptr inbounds i64, i64* @object, i64 %a
+define void @t6(i64 %a, i64* %object) {
+ %tmp1 = getelementptr inbounds i64, i64* %object, i64 %a
%incdec.ptr = getelementptr inbounds i64, i64* %tmp1, i64 4096
%tmp = load volatile i64, i64* %incdec.ptr, align 8
ret void
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-atomic-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-atomic-128.ll?rev=329611&r1=329610&r2=329611&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-atomic-128.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-atomic-128.ll Mon Apr 9 12:59:57 2018
@@ -29,7 +29,8 @@ define void @fetch_and_nand(i128* %p, i1
; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
-; CHECK-DAG: stp [[DEST_REGLO]], [[DEST_REGHI]]
+; CHECK-DAG: str [[DEST_REGLO]], [{{.*}}, :lo12:var]
+; CHECK-DAG: str [[DEST_REGHI]], [{{.*}}, :lo12:var+8]
%val = atomicrmw nand i128* %p, i128 %bits release
store i128 %val, i128* @var, align 16
ret void
@@ -44,7 +45,8 @@ define void @fetch_and_or(i128* %p, i128
; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
-; CHECK-DAG: stp [[DEST_REGLO]], [[DEST_REGHI]]
+; CHECK-DAG: str [[DEST_REGLO]], [{{.*}}, :lo12:var]
+; CHECK-DAG: str [[DEST_REGHI]], [{{.*}}, :lo12:var+8]
%val = atomicrmw or i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -59,7 +61,8 @@ define void @fetch_and_add(i128* %p, i12
; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
-; CHECK-DAG: stp [[DEST_REGLO]], [[DEST_REGHI]]
+; CHECK-DAG: str [[DEST_REGLO]], [{{.*}}, :lo12:var]
+; CHECK-DAG: str [[DEST_REGHI]], [{{.*}}, :lo12:var+8]
%val = atomicrmw add i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -74,7 +77,8 @@ define void @fetch_and_sub(i128* %p, i12
; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
-; CHECK-DAG: stp [[DEST_REGLO]], [[DEST_REGHI]]
+; CHECK-DAG: str [[DEST_REGLO]], [{{.*}}, :lo12:var]
+; CHECK-DAG: str [[DEST_REGHI]], [{{.*}}, :lo12:var+8]
%val = atomicrmw sub i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -95,7 +99,8 @@ define void @fetch_and_min(i128* %p, i12
; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
-; CHECK-DAG: stp [[DEST_REGLO]], [[DEST_REGHI]]
+; CHECK-DAG: str [[DEST_REGLO]], [{{.*}}, :lo12:var]
+; CHECK-DAG: str [[DEST_REGHI]], [{{.*}}, :lo12:var+8]
%val = atomicrmw min i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -116,7 +121,8 @@ define void @fetch_and_max(i128* %p, i12
; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
-; CHECK-DAG: stp [[DEST_REGLO]], [[DEST_REGHI]]
+; CHECK-DAG: str [[DEST_REGLO]], [{{.*}}, :lo12:var]
+; CHECK-DAG: str [[DEST_REGHI]], [{{.*}}, :lo12:var+8]
%val = atomicrmw max i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -137,7 +143,8 @@ define void @fetch_and_umin(i128* %p, i1
; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
-; CHECK-DAG: stp [[DEST_REGLO]], [[DEST_REGHI]]
+; CHECK-DAG: str [[DEST_REGLO]], [{{.*}}, :lo12:var]
+; CHECK-DAG: str [[DEST_REGHI]], [{{.*}}, :lo12:var+8]
%val = atomicrmw umin i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
@@ -158,7 +165,8 @@ define void @fetch_and_umax(i128* %p, i1
; CHECK: stlxp [[SCRATCH_RES:w[0-9]+]], [[SCRATCH_REGLO]], [[SCRATCH_REGHI]], [x0]
; CHECK: cbnz [[SCRATCH_RES]], [[LABEL]]
-; CHECK-DAG: stp [[DEST_REGLO]], [[DEST_REGHI]]
+; CHECK-DAG: str [[DEST_REGLO]], [{{.*}}, :lo12:var]
+; CHECK-DAG: str [[DEST_REGHI]], [{{.*}}, :lo12:var+8]
%val = atomicrmw umax i128* %p, i128 %bits seq_cst
store i128 %val, i128* @var, align 16
ret void
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-memcpy-inline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-memcpy-inline.ll?rev=329611&r1=329610&r2=329611&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-memcpy-inline.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-memcpy-inline.ll Mon Apr 9 12:59:57 2018
@@ -16,10 +16,12 @@
define i32 @t0() {
entry:
; CHECK-LABEL: t0:
-; CHECK: ldrb [[REG0:w[0-9]+]], [x[[BASEREG:[0-9]+]], #10]
-; CHECK: strb [[REG0]], [x[[BASEREG2:[0-9]+]], #10]
-; CHECK: ldrh [[REG1:w[0-9]+]], [x[[BASEREG]], #8]
-; CHECK: strh [[REG1]], [x[[BASEREG2]], #8]
+; CHECK: ldrb [[REG0:w[0-9]+]], [{{x[0-9]+}}, :lo12:src+10]
+; CHECK: add x[[BASEREG:[0-9]+]], {{x[0-9]+}}, :lo12:src+8
+; CHECK: strb [[REG0]], [{{x[0-9]+}}, :lo12:dst+10]
+; CHECK: ldrh [[REG1:w[0-9]+]], [x[[BASEREG]]]
+; CHECK: add x[[BASEREG2:[0-9]+]], {{x[0-9]+}}, :lo12:dst+8
+; CHECK: strh [[REG1]], [x[[BASEREG2]]]
; CHECK: ldr [[REG2:x[0-9]+]],
; CHECK: str [[REG2]],
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 getelementptr inbounds (%struct.x, %struct.x* @dst, i32 0, i32 0), i8* align 8 getelementptr inbounds (%struct.x, %struct.x* @src, i32 0, i32 0), i32 11, i1 false)
@@ -29,7 +31,8 @@ entry:
define void @t1(i8* nocapture %C) nounwind {
entry:
; CHECK-LABEL: t1:
-; CHECK: ldur [[DEST:q[0-9]+]], [x[[BASEREG:[0-9]+]], #15]
+; CHECK: add x[[BASEREG:[0-9]+]], {{x[0-9]+}}, :lo12:.L.str1+15
+; CHECK: ldr [[DEST:q[0-9]+]], [x[[BASEREG:[0-9]+]]]
; CHECK: stur [[DEST]], [x0, #15]
; CHECK: ldr [[DEST:q[0-9]+]], [x[[BASEREG]]]
; CHECK: str [[DEST]], [x0]
@@ -43,7 +46,10 @@ entry:
; CHECK: mov [[REG3:w[0-9]+]]
; CHECK: movk [[REG3]],
; CHECK: str [[REG3]], [x0, #32]
-; CHECK: ldp [[DEST1:q[0-9]+]], [[DEST2:q[0-9]+]], [x{{[0-9]+}}]
+; CHECK: add x[[BASEREG:[0-9]+]], {{x[0-9]+}}, :lo12:.L.str2+16
+; CHECK: ldr [[DEST2:q[0-9]+]], [x[[BASEREG]]]
+; CHECK: add x[[BASEREG:[0-9]+]], {{x[0-9]+}}, :lo12:.L.str2
+; CHECK: ldr [[DEST1:q[0-9]+]], [x[[BASEREG]]]
; CHECK: stp [[DEST1]], [[DEST2]], [x0]
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([36 x i8], [36 x i8]* @.str2, i64 0, i64 0), i64 36, i1 false)
ret void
@@ -52,8 +58,10 @@ entry:
define void @t3(i8* nocapture %C) nounwind {
entry:
; CHECK-LABEL: t3:
-; CHECK: ldr [[REG4:x[0-9]+]], [x[[BASEREG:[0-9]+]], #16]
+; CHECK: add x[[BASEREG:[0-9]+]], {{x[0-9]+}}, :lo12:.L.str3+16
+; CHECK: ldr [[REG4:x[0-9]+]], [x[[BASEREG]]]
; CHECK: str [[REG4]], [x0, #16]
+; CHECK: add x[[BASEREG:[0-9]+]], {{x[0-9]+}}, :lo12:.L.str3
; CHECK: ldr [[DEST:q[0-9]+]], [x[[BASEREG]]]
; CHECK: str [[DEST]], [x0]
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([24 x i8], [24 x i8]* @.str3, i64 0, i64 0), i64 24, i1 false)
@@ -87,8 +95,10 @@ entry:
define void @t6() nounwind {
entry:
; CHECK-LABEL: t6:
-; CHECK: ldur [[REG9:x[0-9]+]], [x{{[0-9]+}}, #6]
-; CHECK: stur [[REG9]], [x{{[0-9]+}}, #6]
+; CHECK: add x[[BASEREG:[0-9]+]], {{x[0-9]+}}, :lo12:.L.str6+6
+; CHECK: ldr [[REG9:x[0-9]+]], [x[[BASEREG]]]
+; CHECK: add x[[BASEREG:[0-9]+]], {{x[0-9]+}}, :lo12:spool.splbuf+6
+; CHECK: str [[REG9]], [x[[BASEREG]]]
; CHECK: ldr
; CHECK: str
call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([512 x i8], [512 x i8]* @spool.splbuf, i64 0, i64 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str6, i64 0, i64 0), i64 14, i1 false)
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-misched-multimmo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-misched-multimmo.ll?rev=329611&r1=329610&r2=329611&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-misched-multimmo.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-misched-multimmo.ll Mon Apr 9 12:59:57 2018
@@ -8,11 +8,15 @@
; Check that no scheduling dependencies are created between the paired loads and the store during post-RA MI scheduling.
;
; CHECK-LABEL: # Machine code for function foo:
-; CHECK: SU(2): renamable $w{{[0-9]+}}, renamable $w{{[0-9]+}} = LDPWi
+; CHECK: SU(1): renamable $w{{[0-9]+}} = LDRWui
; CHECK: Successors:
-; CHECK-NOT: ch SU(4)
-; CHECK: SU(3)
-; CHECK: SU(4): STRWui $wzr, renamable $x{{[0-9]+}}
+; CHECK-NOT: SU(5)
+; CHECK: SU(2)
+; CHECK: SU(3): renamable $w{{[0-9]+}} = LDRWui
+; CHECK: Successors:
+; CHECK-NOT: SU(5)
+; CHECK: SU(4)
+; CHECK: SU(5): STRWui $wzr, renamable $x{{[0-9]+}}
define i32 @foo() {
entry:
%0 = load i32, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @G2, i64 0, i64 0), align 4
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-variadic-aapcs.ll?rev=329611&r1=329610&r2=329611&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-variadic-aapcs.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-variadic-aapcs.ll Mon Apr 9 12:59:57 2018
@@ -11,9 +11,6 @@ define void @test_simple(i32 %n, ...) {
; CHECK: sub sp, sp, #[[STACKSIZE:[0-9]+]]
; CHECK: add [[STACK_TOP:x[0-9]+]], sp, #[[STACKSIZE]]
-; CHECK: adrp x[[VA_LIST_HI:[0-9]+]], var
-; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, :lo12:var
-
; CHECK: stp x1, x2, [sp, #[[GR_BASE:[0-9]+]]]
; ... omit middle ones ...
; CHECK: str x7, [sp, #
@@ -22,19 +19,20 @@ define void @test_simple(i32 %n, ...) {
; ... omit middle ones ...
; CHECK: stp q6, q7, [sp, #
-; CHECK: str [[STACK_TOP]], [x[[VA_LIST]]]
+; CHECK: str [[STACK_TOP]], [{{[x[0-9]+}}, :lo12:var]
; CHECK: add [[GR_TOPTMP:x[0-9]+]], sp, #[[GR_BASE]]
; CHECK: add [[GR_TOP:x[0-9]+]], [[GR_TOPTMP]], #56
-; CHECK: str [[GR_TOP]], [x[[VA_LIST]], #8]
+; CHECK: str [[GR_TOP]], [{{[x[0-9]+}}, :lo12:var+8]
; CHECK: mov [[VR_TOPTMP:x[0-9]+]], sp
; CHECK: add [[VR_TOP:x[0-9]+]], [[VR_TOPTMP]], #128
-; CHECK: str [[VR_TOP]], [x[[VA_LIST]], #16]
+; CHECK: str [[VR_TOP]], [{{[x[0-9]+}}, :lo12:var+16]
-; CHECK: mov [[GRVR:x[0-9]+]], #-545460846720
-; CHECK: movk [[GRVR]], #65480
-; CHECK: str [[GRVR]], [x[[VA_LIST]], #24]
+; CHECK: mov [[GRVR1:w[0-9]+]], #-56
+; CHECK: str [[GRVR1]], [{{[x[0-9]+}}, :lo12:var+24]
+; CHECK: orr [[GRVR2:w[0-9]+]], wzr, #0xffffff80
+; CHECK: str [[GRVR2]], [{{[x[0-9]+}}, :lo12:var+28]
%addr = bitcast %va_list* @var to i8*
call void @llvm.va_start(i8* %addr)
@@ -47,9 +45,6 @@ define void @test_fewargs(i32 %n, i32 %n
; CHECK: sub sp, sp, #[[STACKSIZE:[0-9]+]]
; CHECK: add [[STACK_TOP:x[0-9]+]], sp, #[[STACKSIZE]]
-; CHECK: adrp x[[VA_LIST_HI:[0-9]+]], var
-; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, :lo12:var
-
; CHECK: stp x3, x4, [sp, #[[GR_BASE:[0-9]+]]]
; ... omit middle ones ...
; CHECK: str x7, [sp, #
@@ -58,19 +53,20 @@ define void @test_fewargs(i32 %n, i32 %n
; ... omit middle ones ...
; CHECK: str q7, [sp, #
-; CHECK: str [[STACK_TOP]], [x[[VA_LIST]]]
+; CHECK: str [[STACK_TOP]], [{{[x[0-9]+}}, :lo12:var]
; CHECK: add [[GR_TOPTMP:x[0-9]+]], sp, #[[GR_BASE]]
; CHECK: add [[GR_TOP:x[0-9]+]], [[GR_TOPTMP]], #40
-; CHECK: str [[GR_TOP]], [x[[VA_LIST]], #8]
+; CHECK: str [[GR_TOP]], [{{[x[0-9]+}}, :lo12:var+8]
; CHECK: mov [[VR_TOPTMP:x[0-9]+]], sp
; CHECK: add [[VR_TOP:x[0-9]+]], [[VR_TOPTMP]], #112
-; CHECK: str [[VR_TOP]], [x[[VA_LIST]], #16]
+; CHECK: str [[VR_TOP]], [{{[x[0-9]+}}, :lo12:var+16]
-; CHECK: mov [[GRVR_OFFS:x[0-9]+]], #-40
-; CHECK: movk [[GRVR_OFFS]], #65424, lsl #32
-; CHECK: str [[GRVR_OFFS]], [x[[VA_LIST]], #24]
+; CHECK: mov [[GRVR1:w[0-9]+]], #-40
+; CHECK: str [[GRVR1]], [{{[x[0-9]+}}, :lo12:var+24]
+; CHECK: mov [[GRVR2:w[0-9]+]], #-112
+; CHECK: str [[GRVR2]], [{{[x[0-9]+}}, :lo12:var+28]
%addr = bitcast %va_list* @var to i8*
call void @llvm.va_start(i8* %addr)
@@ -85,8 +81,7 @@ define void @test_nospare([8 x i64], [8
call void @llvm.va_start(i8* %addr)
; CHECK-NOT: sub sp, sp
; CHECK: mov [[STACK:x[0-9]+]], sp
-; CHECK: add x[[VAR:[0-9]+]], {{x[0-9]+}}, :lo12:var
-; CHECK: str [[STACK]], [x[[VAR]]]
+; CHECK: str [[STACK]], [{{[x[0-9]+}}, :lo12:var]
ret void
}
@@ -97,8 +92,7 @@ define void @test_offsetstack([8 x i64],
; CHECK-LABEL: test_offsetstack:
; CHECK: stp {{q[0-9]+}}, {{q[0-9]+}}, [sp, #-80]!
; CHECK: add [[STACK_TOP:x[0-9]+]], sp, #96
-; CHECK: add x[[VAR:[0-9]+]], {{x[0-9]+}}, :lo12:var
-; CHECK: str [[STACK_TOP]], [x[[VAR]]]
+; CHECK: str [[STACK_TOP]], [{{[x[0-9]+}}, :lo12:var]
%addr = bitcast %va_list* @var to i8*
call void @llvm.va_start(i8* %addr)
@@ -129,13 +123,13 @@ define void @test_va_copy() {
call void @llvm.va_copy(i8* %dstaddr, i8* %srcaddr)
; CHECK: add x[[SRC:[0-9]+]], {{x[0-9]+}}, :lo12:var
-
; CHECK: ldr [[BLOCK:q[0-9]+]], [x[[SRC]]]
; CHECK: add x[[DST:[0-9]+]], {{x[0-9]+}}, :lo12:second_list
; CHECK: str [[BLOCK]], [x[[DST]]]
-; CHECK: ldr [[BLOCK:q[0-9]+]], [x[[SRC]], #16]
-; CHECK: str [[BLOCK]], [x[[DST]], #16]
+; CHECK: add x[[SRC:[0-9]+]], {{x[0-9]+}}, :lo12:var+16
+; CHECK: ldr [[BLOCK:q[0-9]+]], [x[[SRC]]]
+; CHECK: str [[BLOCK]], [x[[DST]]]
ret void
; CHECK: ret
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vector-ldst.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vector-ldst.ll?rev=329611&r1=329610&r2=329611&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vector-ldst.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vector-ldst.ll Mon Apr 9 12:59:57 2018
@@ -264,149 +264,196 @@ entry:
; Add a bunch of tests for rdar://13258794: Match LDUR/STUR for D and Q
; registers for unscaled vector accesses
- at str = global [63 x i8] c"Test case for rdar://13258794: LDUR/STUR for D and Q registers\00", align 1
-define <1 x i64> @fct0() nounwind readonly ssp {
+define <1 x i64> @fct0(i8* %str) nounwind readonly ssp {
entry:
; CHECK-LABEL: fct0:
; CHECK: ldur {{d[0-9]+}}, [{{x[0-9]+}}, #3]
- %0 = load <1 x i64>, <1 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 3) to <1 x i64>*), align 8
+ %p = getelementptr inbounds i8, i8* %str, i64 3
+ %q = bitcast i8* %p to <1 x i64>*
+ %0 = load <1 x i64>, <1 x i64>* %q, align 8
ret <1 x i64> %0
}
-define <2 x i32> @fct1() nounwind readonly ssp {
+define <2 x i32> @fct1(i8* %str) nounwind readonly ssp {
entry:
; CHECK-LABEL: fct1:
; CHECK: ldur {{d[0-9]+}}, [{{x[0-9]+}}, #3]
- %0 = load <2 x i32>, <2 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 3) to <2 x i32>*), align 8
+ %p = getelementptr inbounds i8, i8* %str, i64 3
+ %q = bitcast i8* %p to <2 x i32>*
+ %0 = load <2 x i32>, <2 x i32>* %q, align 8
ret <2 x i32> %0
}
-define <4 x i16> @fct2() nounwind readonly ssp {
+define <4 x i16> @fct2(i8* %str) nounwind readonly ssp {
entry:
; CHECK-LABEL: fct2:
; CHECK: ldur {{d[0-9]+}}, [{{x[0-9]+}}, #3]
- %0 = load <4 x i16>, <4 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 3) to <4 x i16>*), align 8
+ %p = getelementptr inbounds i8, i8* %str, i64 3
+ %q = bitcast i8* %p to <4 x i16>*
+ %0 = load <4 x i16>, <4 x i16>* %q, align 8
ret <4 x i16> %0
}
-define <8 x i8> @fct3() nounwind readonly ssp {
+define <8 x i8> @fct3(i8* %str) nounwind readonly ssp {
entry:
; CHECK-LABEL: fct3:
; CHECK: ldur {{d[0-9]+}}, [{{x[0-9]+}}, #3]
- %0 = load <8 x i8>, <8 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 3) to <8 x i8>*), align 8
+ %p = getelementptr inbounds i8, i8* %str, i64 3
+ %q = bitcast i8* %p to <8 x i8>*
+ %0 = load <8 x i8>, <8 x i8>* %q, align 8
ret <8 x i8> %0
}
-define <2 x i64> @fct4() nounwind readonly ssp {
+define <2 x i64> @fct4(i8* %str) nounwind readonly ssp {
entry:
; CHECK-LABEL: fct4:
; CHECK: ldur {{q[0-9]+}}, [{{x[0-9]+}}, #3]
- %0 = load <2 x i64>, <2 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 3) to <2 x i64>*), align 16
+ %p = getelementptr inbounds i8, i8* %str, i64 3
+ %q = bitcast i8* %p to <2 x i64>*
+ %0 = load <2 x i64>, <2 x i64>* %q, align 16
ret <2 x i64> %0
}
-define <4 x i32> @fct5() nounwind readonly ssp {
+define <4 x i32> @fct5(i8* %str) nounwind readonly ssp {
entry:
; CHECK-LABEL: fct5:
; CHECK: ldur {{q[0-9]+}}, [{{x[0-9]+}}, #3]
- %0 = load <4 x i32>, <4 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 3) to <4 x i32>*), align 16
+ %p = getelementptr inbounds i8, i8* %str, i64 3
+ %q = bitcast i8* %p to <4 x i32>*
+ %0 = load <4 x i32>, <4 x i32>* %q, align 16
ret <4 x i32> %0
}
-define <8 x i16> @fct6() nounwind readonly ssp {
+define <8 x i16> @fct6(i8* %str) nounwind readonly ssp {
entry:
; CHECK-LABEL: fct6:
; CHECK: ldur {{q[0-9]+}}, [{{x[0-9]+}}, #3]
- %0 = load <8 x i16>, <8 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 3) to <8 x i16>*), align 16
+ %p = getelementptr inbounds i8, i8* %str, i64 3
+ %q = bitcast i8* %p to <8 x i16>*
+ %0 = load <8 x i16>, <8 x i16>* %q, align 16
ret <8 x i16> %0
}
-define <16 x i8> @fct7() nounwind readonly ssp {
+define <16 x i8> @fct7(i8* %str) nounwind readonly ssp {
entry:
; CHECK-LABEL: fct7:
; CHECK: ldur {{q[0-9]+}}, [{{x[0-9]+}}, #3]
- %0 = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 3) to <16 x i8>*), align 16
+ %p = getelementptr inbounds i8, i8* %str, i64 3
+ %q = bitcast i8* %p to <16 x i8>*
+ %0 = load <16 x i8>, <16 x i8>* %q, align 16
ret <16 x i8> %0
}
-define void @fct8() nounwind ssp {
+define void @fct8(i8* %str) nounwind ssp {
entry:
; CHECK-LABEL: fct8:
; CHECK: ldur [[DESTREG:d[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
- %0 = load <1 x i64>, <1 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 3) to <1 x i64>*), align 8
- store <1 x i64> %0, <1 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 4) to <1 x i64>*), align 8
+ %p = getelementptr inbounds i8, i8* %str, i64 3
+ %q = bitcast i8* %p to <1 x i64>*
+ %0 = load <1 x i64>, <1 x i64>* %q, align 8
+ %p2 = getelementptr inbounds i8, i8* %str, i64 4
+ %q2 = bitcast i8* %p2 to <1 x i64>*
+ store <1 x i64> %0, <1 x i64>* %q2, align 8
ret void
}
-define void @fct9() nounwind ssp {
+define void @fct9(i8* %str) nounwind ssp {
entry:
; CHECK-LABEL: fct9:
; CHECK: ldur [[DESTREG:d[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
- %0 = load <2 x i32>, <2 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 3) to <2 x i32>*), align 8
- store <2 x i32> %0, <2 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 4) to <2 x i32>*), align 8
+ %p = getelementptr inbounds i8, i8* %str, i64 3
+ %q = bitcast i8* %p to <2 x i32>*
+ %0 = load <2 x i32>, <2 x i32>* %q, align 8
+ %p2 = getelementptr inbounds i8, i8* %str, i64 4
+ %q2 = bitcast i8* %p2 to <2 x i32>*
+ store <2 x i32> %0, <2 x i32>* %q2, align 8
ret void
}
-define void @fct10() nounwind ssp {
+define void @fct10(i8* %str) nounwind ssp {
entry:
; CHECK-LABEL: fct10:
; CHECK: ldur [[DESTREG:d[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
- %0 = load <4 x i16>, <4 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 3) to <4 x i16>*), align 8
- store <4 x i16> %0, <4 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 4) to <4 x i16>*), align 8
+ %p = getelementptr inbounds i8, i8* %str, i64 3
+ %q = bitcast i8* %p to <4 x i16>*
+ %0 = load <4 x i16>, <4 x i16>* %q, align 8
+ %p2 = getelementptr inbounds i8, i8* %str, i64 4
+ %q2 = bitcast i8* %p2 to <4 x i16>*
+ store <4 x i16> %0, <4 x i16>* %q2, align 8
ret void
}
-define void @fct11() nounwind ssp {
+define void @fct11(i8* %str) nounwind ssp {
entry:
; CHECK-LABEL: fct11:
; CHECK: ldur [[DESTREG:d[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
- %0 = load <8 x i8>, <8 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 3) to <8 x i8>*), align 8
- store <8 x i8> %0, <8 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 4) to <8 x i8>*), align 8
+ %p = getelementptr inbounds i8, i8* %str, i64 3
+ %q = bitcast i8* %p to <8 x i8>*
+ %0 = load <8 x i8>, <8 x i8>* %q, align 8
+ %p2 = getelementptr inbounds i8, i8* %str, i64 4
+ %q2 = bitcast i8* %p2 to <8 x i8>*
+ store <8 x i8> %0, <8 x i8>* %q2, align 8
ret void
}
-define void @fct12() nounwind ssp {
+define void @fct12(i8* %str) nounwind ssp {
entry:
; CHECK-LABEL: fct12:
; CHECK: ldur [[DESTREG:q[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
- %0 = load <2 x i64>, <2 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 3) to <2 x i64>*), align 16
- store <2 x i64> %0, <2 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 4) to <2 x i64>*), align 16
+ %p = getelementptr inbounds i8, i8* %str, i64 3
+ %q = bitcast i8* %p to <2 x i64>*
+ %0 = load <2 x i64>, <2 x i64>* %q, align 16
+ %p2 = getelementptr inbounds i8, i8* %str, i64 4
+ %q2 = bitcast i8* %p2 to <2 x i64>*
+ store <2 x i64> %0, <2 x i64>* %q2, align 16
ret void
}
-define void @fct13() nounwind ssp {
+define void @fct13(i8* %str) nounwind ssp {
entry:
; CHECK-LABEL: fct13:
; CHECK: ldur [[DESTREG:q[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
- %0 = load <4 x i32>, <4 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 3) to <4 x i32>*), align 16
- store <4 x i32> %0, <4 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 4) to <4 x i32>*), align 16
+ %p = getelementptr inbounds i8, i8* %str, i64 3
+ %q = bitcast i8* %p to <4 x i32>*
+ %0 = load <4 x i32>, <4 x i32>* %q, align 16
+ %p2 = getelementptr inbounds i8, i8* %str, i64 4
+ %q2 = bitcast i8* %p2 to <4 x i32>*
+ store <4 x i32> %0, <4 x i32>* %q2, align 16
ret void
}
-define void @fct14() nounwind ssp {
+define void @fct14(i8* %str) nounwind ssp {
entry:
; CHECK-LABEL: fct14:
; CHECK: ldur [[DESTREG:q[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
- %0 = load <8 x i16>, <8 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 3) to <8 x i16>*), align 16
- store <8 x i16> %0, <8 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 4) to <8 x i16>*), align 16
+ %p = getelementptr inbounds i8, i8* %str, i64 3
+ %q = bitcast i8* %p to <8 x i16>*
+ %0 = load <8 x i16>, <8 x i16>* %q, align 16
+ %p2 = getelementptr inbounds i8, i8* %str, i64 4
+ %q2 = bitcast i8* %p2 to <8 x i16>*
+ store <8 x i16> %0, <8 x i16>* %q2, align 16
ret void
}
-define void @fct15() nounwind ssp {
+define void @fct15(i8* %str) nounwind ssp {
entry:
; CHECK-LABEL: fct15:
; CHECK: ldur [[DESTREG:q[0-9]+]], {{\[}}[[BASEREG:x[0-9]+]], #3]
; CHECK: stur [[DESTREG]], {{\[}}[[BASEREG]], #4]
- %0 = load <16 x i8>, <16 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 3) to <16 x i8>*), align 16
- store <16 x i8> %0, <16 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8], [63 x i8]* @str, i64 0, i64 4) to <16 x i8>*), align 16
+ %p = getelementptr inbounds i8, i8* %str, i64 3
+ %q = bitcast i8* %p to <16 x i8>*
+ %0 = load <16 x i8>, <16 x i8>* %q, align 16
+ %p2 = getelementptr inbounds i8, i8* %str, i64 4
+ %q2 = bitcast i8* %p2 to <16 x i8>*
+ store <16 x i8> %0, <16 x i8>* %q2, align 16
ret void
}
Modified: llvm/trunk/test/CodeGen/AArch64/cmpxchg-O0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/cmpxchg-O0.ll?rev=329611&r1=329610&r2=329611&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/cmpxchg-O0.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/cmpxchg-O0.ll Mon Apr 9 12:59:57 2018
@@ -86,11 +86,10 @@ define { i128, i1 } @test_cmpxchg_128(i1
@var128 = global i128 0
define {i128, i1} @test_cmpxchg_128_unsplit(i128* %addr) {
; CHECK-LABEL: test_cmpxchg_128_unsplit:
-; CHECK: add x[[VAR128:[0-9]+]], {{x[0-9]+}}, :lo12:var128
-; CHECK: ldr [[DESIRED_HI:x[0-9]+]], [x[[VAR128]], #8]
-; CHECK: ldr [[DESIRED_LO:x[0-9]+]], [x[[VAR128]]]
-; CHECK: ldr [[NEW_HI:x[0-9]+]], [x[[VAR128]], #8]
-; CHECK: ldr [[NEW_LO:x[0-9]+]], [x[[VAR128]]]
+; CHECK: ldr [[DESIRED_HI:x[0-9]+]], [{{x[0-9]+}}, :lo12:var128+8]
+; CHECK: ldr [[DESIRED_LO:x[0-9]+]], [{{x[0-9]+}}, :lo12:var128]
+; CHECK: ldr [[NEW_HI:x[0-9]+]], [{{x[0-9]+}}, :lo12:var128+8]
+; CHECK: ldr [[NEW_LO:x[0-9]+]], [{{x[0-9]+}}, :lo12:var128]
; CHECK: [[RETRY:.LBB[0-9]+_[0-9]+]]:
; CHECK: ldaxp [[OLD_LO:x[0-9]+]], [[OLD_HI:x[0-9]+]], [x0]
; CHECK: cmp [[OLD_LO]], [[DESIRED_LO]]
Modified: llvm/trunk/test/CodeGen/AArch64/func-argpassing.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/func-argpassing.ll?rev=329611&r1=329610&r2=329611&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/func-argpassing.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/func-argpassing.ll Mon Apr 9 12:59:57 2018
@@ -96,8 +96,8 @@ define [2 x i64] @return_struct() {
%addr = bitcast %myStruct* @varstruct to [2 x i64]*
%val = load [2 x i64], [2 x i64]* %addr
ret [2 x i64] %val
-; CHECK: add x[[VARSTRUCT:[0-9]+]], {{x[0-9]+}}, :lo12:varstruct
-; CHECK: ldp x0, x1, [x[[VARSTRUCT]]]
+; CHECK: ldr x0, [{{x[0-9]+}}, :lo12:varstruct]
+; CHECK: ldr x1, [{{x[0-9]+}}, :lo12:varstruct+8]
; Make sure epilogue immediately follows
; CHECK-NEXT: ret
}
@@ -164,8 +164,8 @@ define void @stacked_fpu(float %var0, do
define i64 @check_i128_regalign(i32 %val0, i128 %val1, i64 %val2) {
; CHECK-LABEL: check_i128_regalign
store i128 %val1, i128* @var128
-; CHECK-DAG: add x[[VAR128:[0-9]+]], {{x[0-9]+}}, :lo12:var128
-; CHECK-DAG: stp x2, x3, [x[[VAR128]]]
+; CHECK-DAG: str x3, [{{x[0-9]+}}, :lo12:var128+8]
+; CHECK-DAG: str x2, [{{x[0-9]+}}, :lo12:var128]
ret i64 %val2
; CHECK-DAG: mov x0, x4
Modified: llvm/trunk/test/CodeGen/AArch64/func-calls.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/func-calls.ll?rev=329611&r1=329610&r2=329611&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/func-calls.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/func-calls.ll Mon Apr 9 12:59:57 2018
@@ -62,11 +62,11 @@ define void @simple_rets() {
%arr = call [2 x i64] @return_smallstruct()
store [2 x i64] %arr, [2 x i64]* @varsmallstruct
; CHECK: bl return_smallstruct
-; CHECK: add x[[VARSMALLSTRUCT:[0-9]+]], {{x[0-9]+}}, :lo12:varsmallstruct
-; CHECK: stp x0, x1, [x[[VARSMALLSTRUCT]]]
+; CHECK: str x1, [{{x[0-9]+}}, {{#?}}:lo12:varsmallstruct+8]
+; CHECK: add x8, {{x[0-9]+}}, {{#?}}:lo12:varstruct
+; CHECK: str x0, [{{x[0-9]+}}, {{#?}}:lo12:varsmallstruct]
call void @return_large_struct(%myStruct* sret @varstruct)
-; CHECK: add x8, {{x[0-9]+}}, {{#?}}:lo12:varstruct
; CHECK: bl return_large_struct
ret void
@@ -128,12 +128,12 @@ define void @check_i128_align() {
call void @check_i128_stackalign(i32 0, i32 1, i32 2, i32 3,
i32 4, i32 5, i32 6, i32 7,
i32 42, i128 %val)
-; CHECK: add x[[VAR128:[0-9]+]], {{x[0-9]+}}, :lo12:var128
-; CHECK: ldp [[I128LO:x[0-9]+]], [[I128HI:x[0-9]+]], [x[[VAR128]]]
+; CHECK: ldr [[I128LO:x[0-9]+]], [{{x[0-9]+}}, :lo12:var128]
+; CHECK: ldr [[I128HI:x[0-9]+]], [{{x[0-9]+}}, :lo12:var128+8]
; CHECK: stp [[I128HI]], {{x[0-9]+}}, [sp, #24]
-; CHECK-NONEON: add x[[VAR128:[0-9]+]], {{x[0-9]+}}, :lo12:var128
-; CHECK-NONEON: ldp [[I128LO:x[0-9]+]], [[I128HI:x[0-9]+]], [x[[VAR128]]]
+; CHECK-NONEON: ldr [[I128LO:x[0-9]+]], [{{x[0-9]+}}, :lo12:var128]
+; CHECK-NONEON: ldr [[I128HI:x[0-9]+]], [{{x[0-9]+}}, :lo12:var128+8]
; CHECK-NONEON: stp [[I128HI]], {{x[0-9]+}}, [sp, #24]
; CHECK: bl check_i128_stackalign
Modified: llvm/trunk/test/CodeGen/AArch64/misched-fusion-addr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/misched-fusion-addr.ll?rev=329611&r1=329610&r2=329611&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/misched-fusion-addr.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/misched-fusion-addr.ll Mon Apr 9 12:59:57 2018
@@ -67,8 +67,10 @@ define void @ldst_64bit() {
; CHECK-LABEL: ldst_64bit:
; CHECK: adrp [[RL:x[0-9]+]], var_64bit
; CHECK-NEXT: ldr {{x[0-9]+}}, {{\[}}[[RL]], {{#?}}:lo12:var_64bit{{\]}}
-; CHECK: adrp [[RQ:x[0-9]+]], var_128bit
-; CHECK-NEXT: add {{x[0-9]+}}, [[RQ]], {{#?}}:lo12:var_128bit
+; CHECK: adrp [[RQ1:x[0-9]+]], var_128bit
+; CHECK-NEXT: str {{x[0-9]+}}, {{\[}}[[RQ1]], {{#?}}:lo12:var_128bit{{\]}}
+; CHECK: adrp [[RQ2:x[0-9]+]], var_128bit+8
+; CHECK-NEXT: str {{x[0-9]+}}, {{\[}}[[RQ2]], {{#?}}:lo12:var_128bit+8{{\]}}
}
define void @ldst_half() {
More information about the llvm-commits
mailing list