[llvm-branch-commits] [llvm-branch] r164491 - in /llvm/branches/R600: lib/Target/ARM/ARMFastISel.cpp test/CodeGen/ARM/fast-isel.ll

Tom Stellard thomas.stellard at amd.com
Mon Sep 24 08:53:01 PDT 2012


Author: tstellar
Date: Mon Sep 24 10:51:20 2012
New Revision: 164491

URL: http://llvm.org/viewvc/llvm-project?rev=164491&view=rev
Log:
[fast-isel] Fallback to SelectionDAG isel if we require strict alignment for
non-aligned i32 loads/stores.
rdar://12304911

Modified:
    llvm/branches/R600/lib/Target/ARM/ARMFastISel.cpp
    llvm/branches/R600/test/CodeGen/ARM/fast-isel.ll

Modified: llvm/branches/R600/lib/Target/ARM/ARMFastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/R600/lib/Target/ARM/ARMFastISel.cpp?rev=164491&r1=164490&r2=164491&view=diff
==============================================================================
--- llvm/branches/R600/lib/Target/ARM/ARMFastISel.cpp (original)
+++ llvm/branches/R600/lib/Target/ARM/ARMFastISel.cpp Mon Sep 24 10:51:20 2012
@@ -1036,6 +1036,9 @@
       RC = &ARM::GPRRegClass;
       break;
     case MVT::i32:
+      if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
+        return false;
+
       if (isThumb2) {
         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
           Opc = ARM::t2LDRi8;
@@ -1156,6 +1159,9 @@
       }
       break;
     case MVT::i32:
+      if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
+        return false;
+
       if (isThumb2) {
         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
           StrOpc = ARM::t2STRi8;

Modified: llvm/branches/R600/test/CodeGen/ARM/fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/R600/test/CodeGen/ARM/fast-isel.ll?rev=164491&r1=164490&r2=164491&view=diff
==============================================================================
--- llvm/branches/R600/test/CodeGen/ARM/fast-isel.ll (original)
+++ llvm/branches/R600/test/CodeGen/ARM/fast-isel.ll Mon Sep 24 10:51:20 2012
@@ -268,3 +268,39 @@
   %0 = load i16* %x, align 1
   ret i16 %0
 }
+
+define void @unaligned_i32_store(i32 %x, i32* %y) nounwind {
+entry:
+; ARM-STRICT-ALIGN: @unaligned_i32_store
+; ARM-STRICT-ALIGN: strb
+; ARM-STRICT-ALIGN: strb
+; ARM-STRICT-ALIGN: strb
+; ARM-STRICT-ALIGN: strb
+
+; THUMB-STRICT-ALIGN: @unaligned_i32_store
+; THUMB-STRICT-ALIGN: strb
+; THUMB-STRICT-ALIGN: strb
+; THUMB-STRICT-ALIGN: strb
+; THUMB-STRICT-ALIGN: strb
+
+  store i32 %x, i32* %y, align 1
+  ret void
+}
+
+define i32 @unaligned_i32_load(i32* %x) nounwind {
+entry:
+; ARM-STRICT-ALIGN: @unaligned_i32_load
+; ARM-STRICT-ALIGN: ldrb
+; ARM-STRICT-ALIGN: ldrb
+; ARM-STRICT-ALIGN: ldrb
+; ARM-STRICT-ALIGN: ldrb
+
+; THUMB-STRICT-ALIGN: @unaligned_i32_load
+; THUMB-STRICT-ALIGN: ldrb
+; THUMB-STRICT-ALIGN: ldrb
+; THUMB-STRICT-ALIGN: ldrb
+; THUMB-STRICT-ALIGN: ldrb
+
+  %0 = load i32* %x, align 1
+  ret i32 %0
+}





More information about the llvm-branch-commits mailing list