[llvm-commits] [llvm] r144518 - in /llvm/trunk: lib/Target/ARM/ARMFastISel.cpp test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll
Chad Rosier
mcrosier at apple.com
Sun Nov 13 20:09:28 PST 2011
Author: mcrosier
Date: Sun Nov 13 22:09:28 2011
New Revision: 144518
URL: http://llvm.org/viewvc/llvm-project?rev=144518&view=rev
Log:
Add support for ARM halfword load/stores and signed byte loads with negative
offsets.
rdar://10412592
Modified:
llvm/trunk/lib/Target/ARM/ARMFastISel.cpp
llvm/trunk/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll
Modified: llvm/trunk/lib/Target/ARM/ARMFastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMFastISel.cpp?rev=144518&r1=144517&r2=144518&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMFastISel.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMFastISel.cpp Sun Nov 13 22:09:28 2011
@@ -875,8 +875,7 @@
needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
else
// ARM halfword load/stores and signed byte loads use +/-imm8 offsets.
- // FIXME: Negative offsets require special handling.
- needsLowering = (Addr.Offset > 255 || Addr.Offset < 0);
+ needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);
break;
case MVT::f32:
case MVT::f64:
@@ -933,18 +932,26 @@
MIB.addFrameIndex(FI);
// ARM halfword load/stores and signed byte loads need an additional operand.
- if (useAM3) MIB.addReg(0);
-
- MIB.addImm(Addr.Offset);
+ if (useAM3) {
+ signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
+ MIB.addReg(0);
+ MIB.addImm(Imm);
+ } else {
+ MIB.addImm(Addr.Offset);
+ }
MIB.addMemOperand(MMO);
} else {
// Now add the rest of the operands.
MIB.addReg(Addr.Base.Reg);
// ARM halfword load/stores and signed byte loads need an additional operand.
- if (useAM3) MIB.addReg(0);
-
- MIB.addImm(Addr.Offset);
+ if (useAM3) {
+ signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
+ MIB.addReg(0);
+ MIB.addImm(Imm);
+ } else {
+ MIB.addImm(Addr.Offset);
+ }
}
AddOptionalDefs(MIB);
}
Modified: llvm/trunk/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll?rev=144518&r1=144517&r2=144518&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll Sun Nov 13 22:09:28 2011
@@ -1,48 +1,33 @@
; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-darwin | FileCheck %s --check-prefix=ARM
; rdar://10418009
-; TODO: We currently don't support ldrh/strh for negative offsets. Likely a
-; rare case, but possibly worth pursuing. Comments above the test case show
-; what could be selected.
-
-; ldrh r0, [r0, #-16]
define zeroext i16 @t1(i16* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t1
%add.ptr = getelementptr inbounds i16* %a, i64 -8
%0 = load i16* %add.ptr, align 2
-; ARM: mvn r{{[1-9]}}, #15
-; ARM: add r0, r0, r{{[1-9]}}
-; ARM: ldrh r0, [r0]
+; ARM: ldrh r0, [r0, #-16]
ret i16 %0
}
-; ldrh r0, [r0, #-32]
define zeroext i16 @t2(i16* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t2
%add.ptr = getelementptr inbounds i16* %a, i64 -16
%0 = load i16* %add.ptr, align 2
-; ARM: mvn r{{[1-9]}}, #31
-; ARM: add r0, r0, r{{[1-9]}}
-; ARM: ldrh r0, [r0]
+; ARM: ldrh r0, [r0, #-32]
ret i16 %0
}
-; ldrh r0, [r0, #-254]
define zeroext i16 @t3(i16* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t3
%add.ptr = getelementptr inbounds i16* %a, i64 -127
%0 = load i16* %add.ptr, align 2
-; ARM: mvn r{{[1-9]}}, #253
-; ARM: add r0, r0, r{{[1-9]}}
-; ARM: ldrh r0, [r0]
+; ARM: ldrh r0, [r0, #-254]
ret i16 %0
}
-; mvn r1, #255
-; ldrh r0, [r0, r1]
define zeroext i16 @t4(i16* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t4
@@ -91,15 +76,12 @@
ret i16 %0
}
-; strh r1, [r0, #-16]
define void @t9(i16* nocapture %a) nounwind uwtable ssp {
entry:
; ARM: t9
%add.ptr = getelementptr inbounds i16* %a, i64 -8
store i16 0, i16* %add.ptr, align 2
-; ARM: mvn r{{[1-9]}}, #15
-; ARM: add r0, r0, r{{[1-9]}}
-; ARM: strh r{{[1-9]}}, [r0]
+; ARM: strh r1, [r0, #-16]
ret void
}
@@ -136,3 +118,32 @@
; ARM: strh r{{[1-9]}}, [r0]
ret void
}
+
+define signext i8 @t13(i8* nocapture %a) nounwind uwtable readonly ssp {
+entry:
+; ARM: t13
+ %add.ptr = getelementptr inbounds i8* %a, i64 -8
+ %0 = load i8* %add.ptr, align 2
+; ARM: ldrsb r0, [r0, #-8]
+ ret i8 %0
+}
+
+define signext i8 @t14(i8* nocapture %a) nounwind uwtable readonly ssp {
+entry:
+; ARM: t14
+ %add.ptr = getelementptr inbounds i8* %a, i64 -255
+ %0 = load i8* %add.ptr, align 2
+; ARM: ldrsb r0, [r0, #-255]
+ ret i8 %0
+}
+
+define signext i8 @t15(i8* nocapture %a) nounwind uwtable readonly ssp {
+entry:
+; ARM: t15
+ %add.ptr = getelementptr inbounds i8* %a, i64 -256
+ %0 = load i8* %add.ptr, align 2
+; ARM: mvn r{{[1-9]}}, #255
+; ARM: add r0, r0, r{{[1-9]}}
+; ARM: ldrsb r0, [r0]
+ ret i8 %0
+}
More information about the llvm-commits
mailing list