[PATCH] D109666: [AArch64][SVE][InstCombine] Fold redundant zip(unzip) operations
Usman Nadeem via Phabricator via llvm-commits
llvm-commits at lists.llvm.org
Sun Sep 12 15:51:32 PDT 2021
mnadeem created this revision.
mnadeem added reviewers: david-arm, sdesmalen, paulwalker-arm.
Herald added subscribers: psnobl, hiraditya, kristof.beyls, tschuett.
Herald added a reviewer: efriedma.
mnadeem requested review of this revision.
Herald added a project: LLVM.
Herald added a subscriber: llvm-commits.
zip1(uzp1(A, B), uzp2(A, B)) --> A
zip2(uzp1(A, B), uzp2(A, B)) --> B
Repository:
rG LLVM Github Monorepo
https://reviews.llvm.org/D109666
Files:
llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-opts-zip.ll
Index: llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-opts-zip.ll
===================================================================
--- /dev/null
+++ llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-opts-zip.ll
@@ -0,0 +1,24 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define <vscale x 4 x i32> @redundant_zip_unzip(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
+; CHECK-LABEL: @redundant_zip_unzip(
+; CHECK-NEXT: [[RET:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: ret <vscale x 4 x i32> [[RET]]
+;
+ %uzp1 = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp1.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
+ %uzp2 = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp2.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
+ %zip1 = call <vscale x 4 x i32> @llvm.aarch64.sve.zip1.nxv4i32(<vscale x 4 x i32> %uzp1, <vscale x 4 x i32> %uzp2)
+ %zip2 = call <vscale x 4 x i32> @llvm.aarch64.sve.zip2.nxv4i32(<vscale x 4 x i32> %uzp1, <vscale x 4 x i32> %uzp2)
+ %ret = add <vscale x 4 x i32> %zip1, %zip2
+ ret <vscale x 4 x i32> %ret
+}
+
+declare <vscale x 4 x i32> @llvm.aarch64.sve.uzp1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.uzp2.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.zip1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.zip2.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+
+attributes #0 = { "target-features"="+sve" }
Index: llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
===================================================================
--- llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -792,6 +792,22 @@
return IC.replaceInstUsesWith(II, VectorSplat);
}
+static Optional<Instruction *> instCombineSVEZip(InstCombiner &IC,
+ IntrinsicInst &II) {
+ // zip1(uzp1(A, B), uzp2(A, B)) --> A
+ // zip2(uzp1(A, B), uzp2(A, B)) --> B
+ Value *Uzp1Op0, *Uzp1Op1;
+ if (match(II.getArgOperand(0), m_Intrinsic<Intrinsic::aarch64_sve_uzp1>(
+ m_Value(Uzp1Op0), m_Value(Uzp1Op1))) &&
+ match(II.getArgOperand(1), m_Intrinsic<Intrinsic::aarch64_sve_uzp2>(
+ m_Specific(Uzp1Op0), m_Specific(Uzp1Op1))))
+ return IC.replaceInstUsesWith(
+ II, (II.getIntrinsicID() == Intrinsic::aarch64_sve_zip1 ? Uzp1Op0
+ : Uzp1Op1));
+
+ return None;
+}
+
Optional<Instruction *>
AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC,
IntrinsicInst &II) const {
@@ -835,6 +851,9 @@
case Intrinsic::aarch64_sve_sunpkhi:
case Intrinsic::aarch64_sve_sunpklo:
return instCombineSVEUnpack(IC, II);
+ case Intrinsic::aarch64_sve_zip1:
+ case Intrinsic::aarch64_sve_zip2:
+ return instCombineSVEZip(IC, II);
}
return None;
-------------- next part --------------
A non-text attachment was scrubbed...
Name: D109666.372140.patch
Type: text/x-patch
Size: 3190 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20210912/f4e0c2ed/attachment.bin>
More information about the llvm-commits
mailing list