[llvm] [SVE] Ensure SVE call operands passed via memory are correctly initialised. (PR #66070)

via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 12 04:23:46 PDT 2023


llvmbot wrote:

@llvm/pr-subscribers-backend-aarch64

<details>
<summary>Changes</summary>

The stores created when passing operands via memory don't typically
maintain the chain, because they can be done in any order. Instead,
a new chain is created based on all collated stores. SVE parameters
passed via memory don't follow this idiom and try to maintain the
chain, which unfortunately can result in them being incorrectly
deadcoded when the chain is recreated.

This patch brings the SVE side in line with the non-SVE side to
ensure no stores become lost whilst also allowing greater flexibility
when ordering the stores.

--
Full diff: https://github.com/llvm/llvm-project/pull/66070.diff

3 Files Affected:

- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.cpp (+3-1) 
- (modified) llvm/test/CodeGen/AArch64/arm64ec-varargs.ll (+15-15) 
- (modified) llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll (+14-10) 


<pre>
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index fb25dd1d77553fb..9cf077c807c7bc6 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -7499,7 +7499,9 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
       // Ensure we generate all stores for each tuple part, whilst updating the
       // pointer after each store correctly using vscale.
       while (NumParts) {
-        Chain = DAG.getStore(Chain, DL, OutVals[i], Ptr, MPI);
+        SDValue Store = DAG.getStore(Chain, DL, OutVals[i], Ptr, MPI);
+        MemOpChains.push_back(Store);
+
         NumParts--;
         if (NumParts > 0) {
           SDValue BytesIncrement;
diff --git a/llvm/test/CodeGen/AArch64/arm64ec-varargs.ll b/llvm/test/CodeGen/AArch64/arm64ec-varargs.ll
index 3950a3026769c89..957ed995ee3bfc0 100644
--- a/llvm/test/CodeGen/AArch64/arm64ec-varargs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64ec-varargs.ll
@@ -35,15 +35,15 @@ define void @varargs_caller() nounwind {
 ; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    mov x4, sp
 ; CHECK-NEXT:    add x8, sp, #16
-; CHECK-NEXT:    mov x9, #4617315517961601024
-; CHECK-NEXT:    mov x0, #4607182418800017408
-; CHECK-NEXT:    mov w1, #2
-; CHECK-NEXT:    mov x2, #4613937818241073152
-; CHECK-NEXT:    mov w3, #4
-; CHECK-NEXT:    mov w5, #16
+; CHECK-NEXT:    mov x9, #4617315517961601024 // =0x4014000000000000
+; CHECK-NEXT:    mov x0, #4607182418800017408 // =0x3ff0000000000000
+; CHECK-NEXT:    mov w1, #2 // =0x2
+; CHECK-NEXT:    mov x2, #4613937818241073152 // =0x4008000000000000
+; CHECK-NEXT:    mov w3, #4 // =0x4
+; CHECK-NEXT:    mov w5, #16 // =0x10
 ; CHECK-NEXT:    stp xzr, x30, [sp, #24] // 8-byte Folded Spill
-; CHECK-NEXT:    stp x8, xzr, [sp, #8]
-; CHECK-NEXT:    str x9, [sp]
+; CHECK-NEXT:    stp x9, x8, [sp]
+; CHECK-NEXT:    str xzr, [sp, #16]
 ; CHECK-NEXT:    bl varargs_callee
 ; CHECK-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
 ; CHECK-NEXT:    add sp, sp, #48
@@ -70,17 +70,17 @@ define void @varargs_many_argscalleer() nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #64
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
-; CHECK-NEXT:    mov x4, sp
-; CHECK-NEXT:    mov x8, #4618441417868443648
+; CHECK-NEXT:    mov x8, #4618441417868443648 // =0x4018000000000000
 ; CHECK-NEXT:    add x9, sp, #16
 ; CHECK-NEXT:    add x3, sp, #32
-; CHECK-NEXT:    mov x0, #4607182418800017408
-; CHECK-NEXT:    mov x1, #4611686018427387904
-; CHECK-NEXT:    mov x2, #4613937818241073152
-; CHECK-NEXT:    mov w5, #16
+; CHECK-NEXT:    mov x0, #4607182418800017408 // =0x3ff0000000000000
+; CHECK-NEXT:    mov x1, #4611686018427387904 // =0x4000000000000000
+; CHECK-NEXT:    mov x2, #4613937818241073152 // =0x4008000000000000
+; CHECK-NEXT:    mov x4, sp
+; CHECK-NEXT:    mov w5, #16 // =0x10
 ; CHECK-NEXT:    str x30, [sp, #48] // 8-byte Folded Spill
-; CHECK-NEXT:    stp q0, q0, [sp, #16]
 ; CHECK-NEXT:    stp x9, x8, [sp]
+; CHECK-NEXT:    stp q0, q0, [sp, #16]
 ; CHECK-NEXT:    bl varargs_many_argscallee
 ; CHECK-NEXT:    ldr x30, [sp, #48] // 8-byte Folded Reload
 ; CHECK-NEXT:    add sp, sp, #64
diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
index c8748052a9f263e..b7505625cde9773 100644
--- a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
+++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
@@ -18,10 +18,10 @@ define float @foo1(ptr %x0, ptr %x1, ptr %x2) nounwind {
 ; CHECK-NEXT:    ld4d { z16.d - z19.d }, p0/z, [x1]
 ; CHECK-NEXT:    ld1d { z5.d }, p0/z, [x2]
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    st1d { z16.d }, p0, [sp]
-; CHECK-NEXT:    st1d { z17.d }, p0, [sp, #1, mul vl]
-; CHECK-NEXT:    st1d { z18.d }, p0, [sp, #2, mul vl]
 ; CHECK-NEXT:    st1d { z19.d }, p0, [sp, #3, mul vl]
+; CHECK-NEXT:    st1d { z18.d }, p0, [sp, #2, mul vl]
+; CHECK-NEXT:    st1d { z17.d }, p0, [sp, #1, mul vl]
+; CHECK-NEXT:    st1d { z16.d }, p0, [sp]
 ; CHECK-NEXT:    bl callee1
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
@@ -73,10 +73,10 @@ define float @foo2(ptr %x0, ptr %x1) nounwind {
 ; CHECK-NEXT:    ld4d { z16.d - z19.d }, p0/z, [x1]
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov w1, #1 // =0x1
-; CHECK-NEXT:    st1d { z16.d }, p0, [x8]
-; CHECK-NEXT:    st1d { z17.d }, p0, [x8, #1, mul vl]
+; CHECK-NEXT:    st1d { z19.d }, p0, [x8, #3, mul vl]
 ; CHECK-NEXT:    st1d { z18.d }, p0, [x8, #2, mul vl]
-; CHECK-NEXT:    st1d { z19.d }, p0, [x9, #3, mul vl]
+; CHECK-NEXT:    st1d { z17.d }, p0, [x8, #1, mul vl]
+; CHECK-NEXT:    st1d { z16.d }, p0, [x9]
 ; CHECK-NEXT:    str x8, [sp]
 ; CHECK-NEXT:    bl callee2
 ; CHECK-NEXT:    addvl sp, sp, #4
@@ -121,9 +121,9 @@ define float @foo3(ptr %x0, ptr %x1, ptr %x2) nounwind {
 ; CHECK-NEXT:    ld3d { z16.d - z18.d }, p0/z, [x1]
 ; CHECK-NEXT:    ld1d { z6.d }, p0/z, [x2]
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    st1d { z16.d }, p0, [sp]
-; CHECK-NEXT:    st1d { z17.d }, p0, [sp, #1, mul vl]
 ; CHECK-NEXT:    st1d { z18.d }, p0, [sp, #2, mul vl]
+; CHECK-NEXT:    st1d { z17.d }, p0, [sp, #1, mul vl]
+; CHECK-NEXT:    st1d { z16.d }, p0, [sp]
 ; CHECK-NEXT:    bl callee3
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
@@ -704,17 +704,21 @@ define void @verify_all_operands_are_initialised() {
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    movi d0, #0000000000000000
 ; CHECK-NEXT:    fmov s1, #1.00000000
+; CHECK-NEXT:    fmov z16.s, #9.00000000
 ; CHECK-NEXT:    mov w8, #1090519040 // =0x41000000
+; CHECK-NEXT:    add x0, sp, #16
 ; CHECK-NEXT:    fmov s2, #2.00000000
 ; CHECK-NEXT:    fmov s3, #3.00000000
-; CHECK-NEXT:    add x0, sp, #16
+; CHECK-NEXT:    add x9, sp, #16
 ; CHECK-NEXT:    fmov s4, #4.00000000
 ; CHECK-NEXT:    fmov s5, #5.00000000
-; CHECK-NEXT:    str w8, [sp]
 ; CHECK-NEXT:    fmov s6, #6.00000000
 ; CHECK-NEXT:    fmov s7, #7.00000000
+; CHECK-NEXT:    st1w { z16.s }, p0, [x9]
+; CHECK-NEXT:    str w8, [sp]
 ; CHECK-NEXT:    bl func_f8_and_v0_passed_via_memory
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    add sp, sp, #16
</pre>

</details>

https://github.com/llvm/llvm-project/pull/66070


More information about the llvm-commits mailing list