[llvm] r269263 - [AArch64] Add support for unscaled narrow stores in getUsefulBitsForUse.
Chad Rosier via llvm-commits
llvm-commits at lists.llvm.org
Wed May 11 18:42:01 PDT 2016
Author: mcrosier
Date: Wed May 11 20:42:01 2016
New Revision: 269263
URL: http://llvm.org/viewvc/llvm-project?rev=269263&view=rev
Log:
[AArch64] Add support for unscaled narrow stores in getUsefulBitsForUse.
Modified:
llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll
Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp?rev=269263&r1=269262&r2=269263&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp Wed May 11 20:42:01 2016
@@ -1851,12 +1851,14 @@ static void getUsefulBitsForUse(SDNode *
return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
case AArch64::STRBBui:
+ case AArch64::STURBBi:
if (UserNode->getOperand(0) != Orig)
return;
UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xff);
return;
case AArch64::STRHHui:
+ case AArch64::STURHHi:
if (UserNode->getOperand(0) != Orig)
return;
UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xffff);
Modified: llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll?rev=269263&r1=269262&r2=269263&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll Wed May 11 20:42:01 2016
@@ -273,3 +273,41 @@ entry:
store i16 %trunc, i16* %ptr16
ret void
}
+
+define void @test_nouseful_sturb(i32* %ptr32, i8* %ptr8, i32 %x) {
+entry:
+; CHECK-LABEL: @test_nouseful_sturb
+; CHECK: ldr [[REG1:w[0-9]+]],
+; CHECK-NOT: and {{w[0-9]+}}, {{w[0-9]+}}, #0xf8
+; CHECK-NEXT: bfxil [[REG1]], w2, #16, #3
+; CHECK-NEXT: sturb [[REG1]],
+; CHECK-NEXT: ret
+ %0 = load i32, i32* %ptr32, align 8
+ %and = and i32 %0, -8
+ %shr = lshr i32 %x, 16
+ %and1 = and i32 %shr, 7
+ %or = or i32 %and, %and1
+ %trunc = trunc i32 %or to i8
+ %gep = getelementptr i8, i8* %ptr8, i64 -1
+ store i8 %trunc, i8* %gep
+ ret void
+}
+
+define void @test_nouseful_sturh(i32* %ptr32, i16* %ptr16, i32 %x) {
+entry:
+; CHECK-LABEL: @test_nouseful_sturh
+; CHECK: ldr [[REG1:w[0-9]+]],
+; CHECK-NOT: and {{w[0-9]+}}, {{w[0-9]+}}, #0xfff0
+; CHECK-NEXT: bfxil [[REG1]], w2, #16, #4
+; CHECK-NEXT: sturh [[REG1]],
+; CHECK-NEXT: ret
+ %0 = load i32, i32* %ptr32, align 8
+ %and = and i32 %0, -16
+ %shr = lshr i32 %x, 16
+ %and1 = and i32 %shr, 15
+ %or = or i32 %and, %and1
+ %trunc = trunc i32 %or to i16
+ %gep = getelementptr i16, i16* %ptr16, i64 -1
+ store i16 %trunc, i16* %gep
+ ret void
+}
More information about the llvm-commits
mailing list