[llvm] 4325fd7 - [AArch64ISelLowering] Don't look through scalable extract_subvector when optimising DUPLANE.

Paul Walker via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 5 03:59:03 PST 2022


Author: Paul Walker
Date: 2022-01-05T11:56:59Z
New Revision: 4325fd7402bf41bbe6c944cfb84da95c4d8e875d

URL: https://github.com/llvm/llvm-project/commit/4325fd7402bf41bbe6c944cfb84da95c4d8e875d
DIFF: https://github.com/llvm/llvm-project/commit/4325fd7402bf41bbe6c944cfb84da95c4d8e875d.diff

LOG: [AArch64ISelLowering] Don't look through scalable extract_subvector when optimising DUPLANE.

When constructDup is passed an extract_subvector it tries to use
extract_subvector's operand directly when creating the DUPLANE.
This is invalid when extracting from a scalable vector because the
necessary DUPLANE ISel patterns do not exist.

NOTE: This patch is an update to https://reviews.llvm.org/D110524
that originally fixed this but introduced a bug when the result
VT is 64bits. I've restructured the code so the critial final
else block is entered when necessary.

Differential Revision: https://reviews.llvm.org/D116442

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/sve-fixed-length-limit-duplane.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index e141179fb5c87..1dfc51b1358dd 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -9631,14 +9631,12 @@ static SDValue constructDup(SDValue V, int Lane, SDLoc dl, EVT VT,
   MVT CastVT;
   if (getScaledOffsetDup(V, Lane, CastVT)) {
     V = DAG.getBitcast(CastVT, V.getOperand(0).getOperand(0));
-  } else if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
+  } else if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
+             V.getOperand(0).getValueType().is128BitVector()) {
     // The lane is incremented by the index of the extract.
     // Example: dup v2f32 (extract v4f32 X, 2), 1 --> dup v4f32 X, 3
-    auto VecVT = V.getOperand(0).getValueType();
-    if (VecVT.isFixedLengthVector() && VecVT.getFixedSizeInBits() <= 128) {
-      Lane += V.getConstantOperandVal(1);
-      V = V.getOperand(0);
-    }
+    Lane += V.getConstantOperandVal(1);
+    V = V.getOperand(0);
   } else if (V.getOpcode() == ISD::CONCAT_VECTORS) {
     // The lane is decremented if we are splatting from the 2nd operand.
     // Example: dup v4i32 (concat v2i32 X, v2i32 Y), 3 --> dup v4i32 Y, 1

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-limit-duplane.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-limit-duplane.ll
index c4a25349a1c24..3090dc6edda54 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-limit-duplane.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-limit-duplane.ll
@@ -26,3 +26,27 @@ entry:
   store <16 x i32> %2, <16 x i32>* %arg1, align 256
   ret <4 x i32> %shvec
 }
+
+define <2 x i32> @test2(<16 x i32>* %arg1, <16 x i32>* %arg2) {
+; CHECK-LABEL: test2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov x8, #8
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x0, x8, lsl #2]
+; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x0]
+; CHECK-NEXT:    mov z0.d, z1.d
+; CHECK-NEXT:    add z2.s, p0/m, z2.s, z2.s
+; CHECK-NEXT:    ext z0.b, z0.b, z1.b, #24
+; CHECK-NEXT:    add z1.s, p0/m, z1.s, z1.s
+; CHECK-NEXT:    dup v0.2s, v0.s[0]
+; CHECK-NEXT:    st1w { z1.s }, p0, [x0, x8, lsl #2]
+; CHECK-NEXT:    st1w { z2.s }, p0, [x0]
+; CHECK-NEXT:    ret
+entry:
+  %0 = load <16 x i32>, <16 x i32>* %arg1, align 256
+  %1 = load <16 x i32>, <16 x i32>* %arg2, align 256
+  %shvec = shufflevector <16 x i32> %0, <16 x i32> %1, <2 x i32> <i32 14, i32 14>
+  %2 = add <16 x i32> %0, %0
+  store <16 x i32> %2, <16 x i32>* %arg1, align 256
+  ret <2 x i32> %shvec
+}


        


More information about the llvm-commits mailing list