[llvm] r332103 - [AArch64] Fix performPostLD1Combine to check for constant lane index.
Geoff Berry via llvm-commits
llvm-commits at lists.llvm.org
Fri May 11 09:25:06 PDT 2018
Author: gberry
Date: Fri May 11 09:25:06 2018
New Revision: 332103
URL: http://llvm.org/viewvc/llvm-project?rev=332103&view=rev
Log:
[AArch64] Fix performPostLD1Combine to check for constant lane index.
Summary:
performPostLD1Combine in AArch64ISelLowering looks for vector
insert_vector_elt of a loaded value which it can optimize into a single
LD1LANE instruction. The code checking for the pattern was not checking
if the lane index was a constant which could cause two problems:
- an assert when lowering the LD1LANE ISD node since it assumes an
constant operand
- an assert in isel if the lane index value depends on the
post-incremented base register
Both of these issues are avoided by simply checking that the lane index
is a constant.
Fixes bug 35822.
Reviewers: t.p.northover, javed.absar
Subscribers: rengolin, kristof.beyls, mcrosier, llvm-commits
Differential Revision: https://reviews.llvm.org/D46591
Modified:
llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp?rev=332103&r1=332102&r2=332103&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp Fri May 11 09:25:06 2018
@@ -9935,6 +9935,15 @@ static SDValue performPostLD1Combine(SDN
if (LD->getOpcode() != ISD::LOAD)
return SDValue();
+ // The vector lane must be a constant in the LD1LANE opcode.
+ SDValue Lane;
+ if (IsLaneOp) {
+ Lane = N->getOperand(2);
+ auto *LaneC = dyn_cast<ConstantSDNode>(Lane);
+ if (!LaneC || LaneC->getZExtValue() >= VT.getVectorNumElements())
+ return SDValue();
+ }
+
LoadSDNode *LoadSDN = cast<LoadSDNode>(LD);
EVT MemVT = LoadSDN->getMemoryVT();
// Check if memory operand is the same type as the vector element.
@@ -9991,7 +10000,7 @@ static SDValue performPostLD1Combine(SDN
Ops.push_back(LD->getOperand(0)); // Chain
if (IsLaneOp) {
Ops.push_back(Vector); // The vector to be inserted
- Ops.push_back(N->getOperand(2)); // The lane to be inserted in the vector
+ Ops.push_back(Lane); // The lane to be inserted in the vector
}
Ops.push_back(Addr);
Ops.push_back(Inc);
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll?rev=332103&r1=332102&r2=332103&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll Fri May 11 09:25:06 2018
@@ -28,6 +28,28 @@ return:
ret void
}
+; Avoid an assert/bad codegen in LD1LANEPOST lowering by not forming
+; LD1LANEPOST ISD nodes with a non-constant lane index.
+define <4 x i32> @f2(i32 *%p, <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2, i32 %idx) {
+ %L0 = load i32, i32* %p
+ %p1 = getelementptr i32, i32* %p, i64 1
+ %L1 = load i32, i32* %p1
+ %v = select <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2
+ %vret = insertelement <4 x i32> %v, i32 %L0, i32 %idx
+ store i32 %L1, i32 *%p
+ ret <4 x i32> %vret
+}
+
+; Check that a cycle is avoided during isel between the LD1LANEPOST instruction and the load of %L1.
+define <4 x i32> @f3(i32 *%p, <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2) {
+ %L0 = load i32, i32* %p
+ %p1 = getelementptr i32, i32* %p, i64 1
+ %L1 = load i32, i32* %p1
+ %v = select <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2
+ %vret = insertelement <4 x i32> %v, i32 %L0, i32 %L1
+ ret <4 x i32> %vret
+}
+
; Function Attrs: nounwind readnone
declare i64 @llvm.objectsize.i64.p0i8(i8*, i1) #1
More information about the llvm-commits
mailing list