[llvm-branch-commits] [llvm-branch] r332158 - Merging r332103:

Tom Stellard via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Fri May 11 16:55:26 PDT 2018


Author: tstellar
Date: Fri May 11 16:55:26 2018
New Revision: 332158

URL: http://llvm.org/viewvc/llvm-project?rev=332158&view=rev
Log:
Merging r332103:

------------------------------------------------------------------------
r332103 | gberry | 2018-05-11 09:25:06 -0700 (Fri, 11 May 2018) | 24 lines

[AArch64] Fix performPostLD1Combine to check for constant lane index.

Summary:
performPostLD1Combine in AArch64ISelLowering looks for vector
insert_vector_elt of a loaded value which it can optimize into a single
LD1LANE instruction.  The code checking for the pattern was not checking
if the lane index was a constant which could cause two problems:

- an assert when lowering the LD1LANE ISD node since it assumes an
  constant operand

- an assert in isel if the lane index value depends on the
  post-incremented base register

Both of these issues are avoided by simply checking that the lane index
is a constant.

Fixes bug 35822.

Reviewers: t.p.northover, javed.absar

Subscribers: rengolin, kristof.beyls, mcrosier, llvm-commits

Differential Revision: https://reviews.llvm.org/D46591
------------------------------------------------------------------------

Modified:
    llvm/branches/release_60/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/branches/release_60/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll

Modified: llvm/branches/release_60/lib/Target/AArch64/AArch64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/release_60/lib/Target/AArch64/AArch64ISelLowering.cpp?rev=332158&r1=332157&r2=332158&view=diff
==============================================================================
--- llvm/branches/release_60/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
+++ llvm/branches/release_60/lib/Target/AArch64/AArch64ISelLowering.cpp Fri May 11 16:55:26 2018
@@ -9637,6 +9637,15 @@ static SDValue performPostLD1Combine(SDN
   if (LD->getOpcode() != ISD::LOAD)
     return SDValue();
 
+  // The vector lane must be a constant in the LD1LANE opcode.
+  SDValue Lane;
+  if (IsLaneOp) {
+    Lane = N->getOperand(2);
+    auto *LaneC = dyn_cast<ConstantSDNode>(Lane);
+    if (!LaneC || LaneC->getZExtValue() >= VT.getVectorNumElements())
+      return SDValue();
+  }
+
   LoadSDNode *LoadSDN = cast<LoadSDNode>(LD);
   EVT MemVT = LoadSDN->getMemoryVT();
   // Check if memory operand is the same type as the vector element.
@@ -9693,7 +9702,7 @@ static SDValue performPostLD1Combine(SDN
     Ops.push_back(LD->getOperand(0));  // Chain
     if (IsLaneOp) {
       Ops.push_back(Vector);           // The vector to be inserted
-      Ops.push_back(N->getOperand(2)); // The lane to be inserted in the vector
+      Ops.push_back(Lane);             // The lane to be inserted in the vector
     }
     Ops.push_back(Addr);
     Ops.push_back(Inc);

Modified: llvm/branches/release_60/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/release_60/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll?rev=332158&r1=332157&r2=332158&view=diff
==============================================================================
--- llvm/branches/release_60/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll (original)
+++ llvm/branches/release_60/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll Fri May 11 16:55:26 2018
@@ -28,6 +28,28 @@ return:
   ret void
 }
 
+; Avoid an assert/bad codegen in LD1LANEPOST lowering by not forming
+; LD1LANEPOST ISD nodes with a non-constant lane index.
+define <4 x i32> @f2(i32 *%p, <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2, i32 %idx) {
+  %L0 = load i32, i32* %p
+  %p1 = getelementptr i32, i32* %p, i64 1
+  %L1 = load i32, i32* %p1
+  %v = select <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2
+  %vret = insertelement <4 x i32> %v, i32 %L0, i32 %idx
+  store i32 %L1, i32 *%p
+  ret <4 x i32> %vret
+}
+
+; Check that a cycle is avoided during isel between the LD1LANEPOST instruction and the load of %L1.
+define <4 x i32> @f3(i32 *%p, <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2) {
+  %L0 = load i32, i32* %p
+  %p1 = getelementptr i32, i32* %p, i64 1
+  %L1 = load i32, i32* %p1
+  %v = select <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2
+  %vret = insertelement <4 x i32> %v, i32 %L0, i32 %L1
+  ret <4 x i32> %vret
+}
+
 ; Function Attrs: nounwind readnone
 declare i64 @llvm.objectsize.i64.p0i8(i8*, i1) #1
 




More information about the llvm-branch-commits mailing list