[llvm] r246658 - [x86] fix allowsMisalignedMemoryAccesses() for 8-byte and smaller accesses

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 2 08:42:50 PDT 2015


Author: spatel
Date: Wed Sep  2 10:42:49 2015
New Revision: 246658

URL: http://llvm.org/viewvc/llvm-project?rev=246658&view=rev
Log:
[x86] fix allowsMisalignedMemoryAccesses() for 8-byte and smaller accesses

This is a continuation of the fix from:
http://reviews.llvm.org/D10662

and discussion in:
http://reviews.llvm.org/D12154

Here, we distinguish slow unaligned SSE (128-bit) accesses from slow unaligned
scalar (64-bit and under) accesses. Other lowering (eg, getOptimalMemOpType) 
assumes that unaligned scalar accesses are always ok, so this changes 
allowsMisalignedMemoryAccesses() to match that behavior.

Differential Revision: http://reviews.llvm.org/D12543

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/memcpy-2.ll
    llvm/trunk/test/CodeGen/X86/pr11985.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=246658&r1=246657&r2=246658&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Wed Sep  2 10:42:49 2015
@@ -1923,13 +1923,21 @@ X86TargetLowering::allowsMisalignedMemor
                                                   unsigned,
                                                   bool *Fast) const {
   if (Fast) {
-    if (VT.getSizeInBits() == 256)
-      *Fast = !Subtarget->isUnalignedMem32Slow();
-    else
-      // FIXME: We should always return that 8-byte and under accesses are fast.
-      // That is what other x86 lowering code assumes.
+    switch (VT.getSizeInBits()) {
+    default:
+      // 8-byte and under are always assumed to be fast.
+      *Fast = true;
+      break;
+    case 128:
       *Fast = !Subtarget->isUnalignedMem16Slow();
+      break;
+    case 256:
+      *Fast = !Subtarget->isUnalignedMem32Slow();
+      break;
+    // TODO: What about AVX-512 (512-bit) accesses?
+    }
   }
+  // Misaligned accesses of any size are always allowed.
   return true;
 }
 

Modified: llvm/trunk/test/CodeGen/X86/memcpy-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memcpy-2.ll?rev=246658&r1=246657&r2=246658&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memcpy-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/memcpy-2.ll Wed Sep  2 10:42:49 2015
@@ -5,15 +5,6 @@
 ; RUN: llc < %s                 -mtriple=x86_64-apple-darwin -mcpu=core2 | FileCheck %s -check-prefix=X86-64
 ; RUN: llc < %s                 -mtriple=x86_64-apple-darwin -mcpu=nehalem | FileCheck %s -check-prefix=NHM_64
 
-;;; TODO: The last run line chooses cpu=nehalem to reveal possible bugs in the "t4" test case.
-;;;
-;;; Nehalem has a 'fast unaligned memory' attribute, so (1) some of the loads and stores
-;;; are certainly unaligned and (2) the first load and first store overlap with the second
-;;; load and second store respectively.
-;;;
-;;; Is either of the sequences ideal?
-;;; Is the ideal code being generated for all CPU models?
-
 
 @.str = internal constant [25 x i8] c"image\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00"
 @.str2 = internal constant [30 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxxxx\00", align 4
@@ -190,13 +181,18 @@ entry:
 ; NOSSE: movl $2021161080
 ; NOSSE: movl $2021161080
 
+;;; TODO: (1) Some of the loads and stores are certainly unaligned and (2) the first load and first
+;;; store overlap with the second load and second store respectively.
+;;;
+;;; Is either of the sequences ideal?
+
 ; X86-64-LABEL: t4:
-; X86-64: movabsq $8680820740569200760, %rax
-; X86-64: movq %rax
-; X86-64: movq %rax
-; X86-64: movq %rax
-; X86-64: movw $120
-; X86-64: movl $2021161080
+; X86-64: movabsq  $33909456017848440, %rax ## imm = 0x78787878787878
+; X86-64: movq     %rax, -10(%rsp)
+; X86-64: movabsq  $8680820740569200760, %rax ## imm = 0x7878787878787878
+; X86-64: movq     %rax, -16(%rsp)
+; X86-64: movq     %rax, -24(%rsp)
+; X86-64: movq     %rax, -32(%rsp)
 
 ; NHM_64-LABEL: t4:
 ; NHM_64: movups   _.str2+14(%rip), %xmm0

Modified: llvm/trunk/test/CodeGen/X86/pr11985.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pr11985.ll?rev=246658&r1=246657&r2=246658&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pr11985.ll (original)
+++ llvm/trunk/test/CodeGen/X86/pr11985.ll Wed Sep  2 10:42:49 2015
@@ -1,26 +1,20 @@
 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=prescott | FileCheck %s --check-prefix=PRESCOTT
 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=nehalem | FileCheck %s --check-prefix=NEHALEM
 
-;;; TODO: The last run line chooses cpu=nehalem to reveal possible bugs in the "foo" test case.
-;;;
-;;; Nehalem has a 'fast unaligned memory' attribute, so (1) some of the loads and stores
-;;; are certainly unaligned and (2) the first load and first store overlap with the second
-;;; load and second store respectively.
+;;; TODO: (1) Some of the loads and stores are certainly unaligned and (2) the first load and first
+;;; store overlap with the second load and second store respectively.
 ;;;
 ;;; Is either of these sequences ideal? 
-;;; Is the ideal code being generated for all CPU models?
 
 define float @foo(i8* nocapture %buf, float %a, float %b) nounwind uwtable {
 ; PRESCOTT-LABEL: foo:
 ; PRESCOTT:       # BB#0: # %entry
-; PRESCOTT-NEXT:    movw .Ltmp0+20(%rip), %ax
-; PRESCOTT-NEXT:    movw %ax, 20(%rdi)
-; PRESCOTT-NEXT:    movl .Ltmp0+16(%rip), %eax
-; PRESCOTT-NEXT:    movl %eax, 16(%rdi)
-; PRESCOTT-NEXT:    movq .Ltmp0+8(%rip), %rax
-; PRESCOTT-NEXT:    movq %rax, 8(%rdi)
-; PRESCOTT-NEXT:    movq .Ltmp0(%rip), %rax
-; PRESCOTT-NEXT:    movq %rax, (%rdi)
+; PRESCOTT-NEXT:    movq   .Ltmp0+14(%rip), %rax
+; PRESCOTT-NEXT:    movq   %rax, 14(%rdi)
+; PRESCOTT-NEXT:    movq   .Ltmp0+8(%rip), %rax
+; PRESCOTT-NEXT:    movq   %rax, 8(%rdi)
+; PRESCOTT-NEXT:    movq   .Ltmp0(%rip), %rax
+; PRESCOTT-NEXT:    movq   %rax, (%rdi)
 ;
 ; NEHALEM-LABEL: foo:
 ; NEHALEM:       # BB#0: # %entry




More information about the llvm-commits mailing list