[PATCH] D116442: [AArch64ISelLowering] Don't look through scalable extract_subvector when optimising DUPLANE.

Paul Walker via Phabricator via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 31 06:27:32 PST 2021


paulwalker-arm created this revision.
Herald added subscribers: hiraditya, kristof.beyls.
paulwalker-arm requested review of this revision.
Herald added a project: LLVM.
Herald added a subscriber: llvm-commits.

When constructDup is passed an extract_subvector it tries to use
extract_subvector's operand directly when creating the DUPLANE.
This is invalid when extracting from a scalable vector because the
necessary DUPLANE ISel patterns do not exist.

NOTE: This patch is an update to https://reviews.llvm.org/D110524
that originally fixed this but introduced a bug when the result
VT is 64bits. I've restructured the code so the critial final
else block is entered when necessary.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D116442

Files:
  llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
  llvm/test/CodeGen/AArch64/sve-fixed-length-limit-duplane.ll


Index: llvm/test/CodeGen/AArch64/sve-fixed-length-limit-duplane.ll
===================================================================
--- llvm/test/CodeGen/AArch64/sve-fixed-length-limit-duplane.ll
+++ llvm/test/CodeGen/AArch64/sve-fixed-length-limit-duplane.ll
@@ -26,3 +26,27 @@
   store <16 x i32> %2, <16 x i32>* %arg1, align 256
   ret <4 x i32> %shvec
 }
+
+define <2 x i32> @test2(<16 x i32>* %arg1, <16 x i32>* %arg2) {
+; CHECK-LABEL: test2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov x8, #8
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x0, x8, lsl #2]
+; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x0]
+; CHECK-NEXT:    mov z0.d, z1.d
+; CHECK-NEXT:    add z2.s, p0/m, z2.s, z2.s
+; CHECK-NEXT:    ext z0.b, z0.b, z1.b, #24
+; CHECK-NEXT:    add z1.s, p0/m, z1.s, z1.s
+; CHECK-NEXT:    dup v0.2s, v0.s[0]
+; CHECK-NEXT:    st1w { z1.s }, p0, [x0, x8, lsl #2]
+; CHECK-NEXT:    st1w { z2.s }, p0, [x0]
+; CHECK-NEXT:    ret
+entry:
+  %0 = load <16 x i32>, <16 x i32>* %arg1, align 256
+  %1 = load <16 x i32>, <16 x i32>* %arg2, align 256
+  %shvec = shufflevector <16 x i32> %0, <16 x i32> %1, <2 x i32> <i32 14, i32 14>
+  %2 = add <16 x i32> %0, %0
+  store <16 x i32> %2, <16 x i32>* %arg1, align 256
+  ret <2 x i32> %shvec
+}
Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
===================================================================
--- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -9631,14 +9631,13 @@
   MVT CastVT;
   if (getScaledOffsetDup(V, Lane, CastVT)) {
     V = DAG.getBitcast(CastVT, V.getOperand(0).getOperand(0));
-  } else if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
+  } else if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
+             V.getOperand(0).getValueType().isFixedLengthVector() &&
+             V.getOperand(0).getValueType().getFixedSizeInBits() <= 128) {
     // The lane is incremented by the index of the extract.
     // Example: dup v2f32 (extract v4f32 X, 2), 1 --> dup v4f32 X, 3
-    auto VecVT = V.getOperand(0).getValueType();
-    if (VecVT.isFixedLengthVector() && VecVT.getFixedSizeInBits() <= 128) {
-      Lane += V.getConstantOperandVal(1);
-      V = V.getOperand(0);
-    }
+    Lane += V.getConstantOperandVal(1);
+    V = V.getOperand(0);
   } else if (V.getOpcode() == ISD::CONCAT_VECTORS) {
     // The lane is decremented if we are splatting from the 2nd operand.
     // Example: dup v4i32 (concat v2i32 X, v2i32 Y), 3 --> dup v4i32 Y, 1


-------------- next part --------------
A non-text attachment was scrubbed...
Name: D116442.396784.patch
Type: text/x-patch
Size: 2524 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20211231/c6a4d31d/attachment.bin>


More information about the llvm-commits mailing list