[PATCH] Don't allow unaligned accesses to be generated for V6-M
Benjamin Foster
benjamin.foster at arm.com
Mon Jul 28 02:25:19 PDT 2014
There was a small logic mistake that allowed unaligned accesses to be generated for V6-M targets if they were optionally enabled, although the V6-M ARM says that faults are always generated when unaligned accesses occur.
http://reviews.llvm.org/D4693
Files:
lib/Target/ARM/ARMSubtarget.cpp
test/CodeGen/Thumb/cortex-m0-unaligned-access.ll
test/CodeGen/Thumb/v6m-unaligned-access.ll
Index: lib/Target/ARM/ARMSubtarget.cpp
===================================================================
--- lib/Target/ARM/ARMSubtarget.cpp
+++ lib/Target/ARM/ARMSubtarget.cpp
@@ -326,19 +326,19 @@
(hasV7Ops() && (isTargetLinux() || isTargetNaCl() ||
isTargetNetBSD())) ||
(hasV6Ops() && (isTargetMachO() || isTargetNetBSD()));
- // The one exception is cortex-m0, which despite being v6, does not
- // support unaligned accesses. Rather than make the above boolean
- // expression even more obtuse, just override the value here.
- if (isThumb1Only() && isMClass())
- AllowsUnalignedMem = false;
break;
case StrictAlign:
AllowsUnalignedMem = false;
break;
case NoStrictAlign:
AllowsUnalignedMem = true;
break;
}
+ // The exception is v6m, which does not support unaligned accesses. Rather
+ // than make the above boolean expression even more obtuse, just override the
+ // value here.
+ if (isThumb1Only() && isMClass())
+ AllowsUnalignedMem = false;
switch (IT) {
case DefaultIT:
Index: test/CodeGen/Thumb/cortex-m0-unaligned-access.ll
===================================================================
--- test/CodeGen/Thumb/cortex-m0-unaligned-access.ll
+++ test/CodeGen/Thumb/cortex-m0-unaligned-access.ll
@@ -1,13 +0,0 @@
-; RUN: llc -mtriple=thumbv6m-apple-unknown-macho < %s | FileCheck --check-prefix=V6M %s
-; RUN: llc -mtriple=thumbv7m-apple-unknown-macho < %s | FileCheck --check-prefix=V7M %s
-
-define i32 @split_load(i32* %p) nounwind {
-; V6M-LABEL: split_load
-; V6M: ldrh
-; V6M: ldrh
-; V7M-LABEL: split_load
-; V7M-NOT: ldrh
-; V7M: bx lr
- %val = load i32* %p, align 2
- ret i32 %val
-}
Index: test/CodeGen/Thumb/v6m-unaligned-access.ll
===================================================================
--- test/CodeGen/Thumb/v6m-unaligned-access.ll
+++ test/CodeGen/Thumb/v6m-unaligned-access.ll
@@ -0,0 +1,27 @@
+; RUN: llc < %s -O3 -verify-machineinstrs -mtriple=thumbv6m-none-linux-gnueabi -mcpu=cortex-m0 -arm-strict-align | FileCheck %s --check-prefix=V6M
+; RUN: llc < %s -O3 -verify-machineinstrs -mtriple=thumbv6m-none-linux-gnueabi -mcpu=cortex-m0 -arm-no-strict-align | FileCheck %s --check-prefix=V6M
+; RUN: llc < %s -O3 -verify-machineinstrs -mtriple=thumbv7m-none-linux-gnueabi -mcpu=cortex-m3 -arm-strict-align | FileCheck %s --check-prefix=V7M-ALIGNED
+; RUN: llc < %s -O3 -verify-machineinstrs -mtriple=thumbv7m-none-linux-gnueabi -mcpu=cortex-m3 -arm-no-strict-align | FileCheck %s --check-prefix=V7M
+; RUN: llc -O0 -mtriple=thumbv6m-apple-unknown-macho < %s | FileCheck --check-prefix=V6M %s
+; RUN: llc -O0 -mtriple=thumbv7m-apple-unknown-macho < %s | FileCheck --check-prefix=V7M %s
+
+; This test verifies that unaligned accesses are not generated for v6m targets
+
+define i32 @func0(i32* %p) #0 {
+entry:
+ ; V7M-ALIGNED-LABEL: entry
+ ; V7M-ALIGNED: ldrh
+ ; V7M-ALIGNED: ldrh
+
+ ; V6M-LABEL: entry
+ ; V6M: ldrh
+ ; V6M: ldrh
+
+ ; V7M-LABEL: entry
+ ; V7M-NOT: ldrh
+ ; V7M: bx lr
+
+ %0 = load i32* %p, align 2
+
+ ret i32 %0
+}
-------------- next part --------------
A non-text attachment was scrubbed...
Name: D4693.11939.patch
Type: text/x-patch
Size: 3139 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20140728/cc0fef78/attachment.bin>
More information about the llvm-commits
mailing list