[llvm] 194b080 - [DAG] LoadedSlice::canMergeExpensiveCrossRegisterBankCopy - replace getABITypeAlign with allowsMemoryAccess (PR45116)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 24 07:28:47 PDT 2021


Author: Simon Pilgrim
Date: 2021-08-24T15:28:30+01:00
New Revision: 194b08000c1c3dee322acfac9fd83e055f6bc557

URL: https://github.com/llvm/llvm-project/commit/194b08000c1c3dee322acfac9fd83e055f6bc557
DIFF: https://github.com/llvm/llvm-project/commit/194b08000c1c3dee322acfac9fd83e055f6bc557.diff

LOG: [DAG] LoadedSlice::canMergeExpensiveCrossRegisterBankCopy - replace getABITypeAlign with allowsMemoryAccess (PR45116)

One of the cases identified in PR45116 - we don't need to limit load combines to ABI alignment, we can use allowsMemoryAccess - which tests using getABITypeAlign, but also checks if a target permits (fast) misaligned memory loads by checking allowsMisalignedMemoryAccesses as a fallback.

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/X86/load-partial.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 9f11ebcbc22e..2ba25c77dc6e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -16257,11 +16257,12 @@ struct LoadedSlice {
       return false;
 
     // Check if it will be merged with the load.
-    // 1. Check the alignment constraint.
-    Align RequiredAlignment = DAG->getDataLayout().getABITypeAlign(
-        ResVT.getTypeForEVT(*DAG->getContext()));
-
-    if (RequiredAlignment > getAlign())
+    // 1. Check the alignment / fast memory access constraint.
+    bool IsFast = false;
+    if (!TLI.allowsMemoryAccess(*DAG->getContext(), DAG->getDataLayout(), ResVT,
+                                Origin->getAddressSpace(), getAlign(),
+                                Origin->getMemOperand()->getFlags(), &IsFast) ||
+        !IsFast)
       return false;
 
     // 2. Check that the load is a legal operation for that type.

diff  --git a/llvm/test/CodeGen/X86/load-partial.ll b/llvm/test/CodeGen/X86/load-partial.ll
index faab192d220e..3ebb3687ea7f 100644
--- a/llvm/test/CodeGen/X86/load-partial.ll
+++ b/llvm/test/CodeGen/X86/load-partial.ll
@@ -280,24 +280,16 @@ define <4 x float> @load_float4_float3_trunc_0123_unaligned(<4 x float>* nocaptu
 ;
 ; SSE41-LABEL: load_float4_float3_trunc_0123_unaligned:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    movq 8(%rdi), %rax
-; SSE41-NEXT:    movd %eax, %xmm1
-; SSE41-NEXT:    shrq $32, %rax
-; SSE41-NEXT:    movd %eax, %xmm2
 ; SSE41-NEXT:    movups (%rdi), %xmm0
-; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
-; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[0]
+; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: load_float4_float3_trunc_0123_unaligned:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    movq 8(%rdi), %rax
-; AVX-NEXT:    vmovd %eax, %xmm0
-; AVX-NEXT:    shrq $32, %rax
-; AVX-NEXT:    vmovd %eax, %xmm1
-; AVX-NEXT:    vmovups (%rdi), %xmm2
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0,1],xmm0[0],xmm2[3]
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; AVX-NEXT:    vmovups (%rdi), %xmm0
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
 ; AVX-NEXT:    retq
   %2 = bitcast <4 x float>* %0 to i64*
   %3 = load i64, i64* %2, align 1


        


More information about the llvm-commits mailing list