[llvm] 828b89b - [AArch64][SelectionDAG] Supports unpklo/hi instructions to reduce the number of loads

via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 21 08:48:35 PDT 2022


Author: zhongyunde
Date: 2022-03-21T23:47:33+08:00
New Revision: 828b89bc0bb13177c48329ca866fd357e206fbcb

URL: https://github.com/llvm/llvm-project/commit/828b89bc0bb13177c48329ca866fd357e206fbcb
DIFF: https://github.com/llvm/llvm-project/commit/828b89bc0bb13177c48329ca866fd357e206fbcb.diff

LOG: [AArch64][SelectionDAG] Supports unpklo/hi instructions to reduce the number of loads

Trying to reduce the number of masked loads in favour of more unpklo/hi
instructions. Both ISD::ZEXTLOAD and ISD::SEXTLOAD are supported to extensions
from legal types.

Both of normal and masked loads test cases added to guard compile crash.

Reviewed By: paulwalker-arm

Differential Revision: https://reviews.llvm.org/D120953

Added: 
    llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 4cda3092d48d3..708f9c502cd61 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -11513,9 +11513,12 @@ static SDValue tryToFoldExtOfLoad(SelectionDAG &DAG, DAGCombiner &Combiner,
                                   bool LegalOperations, SDNode *N, SDValue N0,
                                   ISD::LoadExtType ExtLoadType,
                                   ISD::NodeType ExtOpc) {
+  // TODO: isFixedLengthVector() should be removed and any negative effects on
+  // code generation being the result of that target's implementation of
+  // isVectorLoadExtDesirable().
   if (!ISD::isNON_EXTLoad(N0.getNode()) ||
       !ISD::isUNINDEXEDLoad(N0.getNode()) ||
-      ((LegalOperations || VT.isVector() ||
+      ((LegalOperations || VT.isFixedLengthVector() ||
         !cast<LoadSDNode>(N0)->isSimple()) &&
        !TLI.isLoadExtLegal(ExtLoadType, VT, N0.getValueType())))
     return {};

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 03cd74cf8b85e..6e107ea873952 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1231,6 +1231,13 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       }
     }
 
+    // SVE supports unpklo/hi instructions to reduce the number of loads.
+    for (auto Op : {ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}) {
+      setLoadExtAction(Op, MVT::nxv16i64, MVT::nxv16i8, Expand);
+      setLoadExtAction(Op, MVT::nxv8i64, MVT::nxv8i16, Expand);
+      setLoadExtAction(Op, MVT::nxv4i64, MVT::nxv4i32, Expand);
+    }
+
     // SVE supports truncating stores of 64 and 128-bit vectors
     setTruncStoreAction(MVT::v2i64, MVT::v2i8, Custom);
     setTruncStoreAction(MVT::v2i64, MVT::v2i16, Custom);

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll
new file mode 100644
index 0000000000000..d4771c5b610ba
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll
@@ -0,0 +1,102 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -asm-verbose=1 < %s | FileCheck %s
+
+;
+; LD1B
+;
+
+define <vscale x 16 x i64> @ld1b_i8_sext(<vscale x 16 x i8> *%base) {
+; CHECK-LABEL: ld1b_i8_sext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1sb { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT:    ld1sb { z2.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT:    ld1sb { z3.d }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT:    ld1sb { z4.d }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT:    ld1sb { z5.d }, p0/z, [x0, #5, mul vl]
+; CHECK-NEXT:    ld1sb { z6.d }, p0/z, [x0, #6, mul vl]
+; CHECK-NEXT:    ld1sb { z7.d }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT:    ret
+  %wide.load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base
+  %res = sext <vscale x 16 x i8> %wide.load to <vscale x 16 x i64>
+  ret <vscale x 16 x i64> %res
+}
+
+define <vscale x 16 x i64> @ld1b_i8_zext(<vscale x 16 x i8> *%base) {
+; CHECK-LABEL: ld1b_i8_zext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1b { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT:    ld1b { z2.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT:    ld1b { z3.d }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT:    ld1b { z4.d }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT:    ld1b { z5.d }, p0/z, [x0, #5, mul vl]
+; CHECK-NEXT:    ld1b { z6.d }, p0/z, [x0, #6, mul vl]
+; CHECK-NEXT:    ld1b { z7.d }, p0/z, [x0, #7, mul vl]
+; CHECK-NEXT:    ret
+  %wide.load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base
+  %res = zext <vscale x 16 x i8> %wide.load to <vscale x 16 x i64>
+  ret <vscale x 16 x i64> %res
+}
+
+;
+; LD1H
+;
+
+define <vscale x 8 x i64> @ld1h_i16_sext(<vscale x 8 x i16> *%base) {
+; CHECK-LABEL: ld1h_i16_sext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1sh { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT:    ld1sh { z2.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT:    ld1sh { z3.d }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT:    ret
+  %wide.load = load <vscale x 8 x i16>, <vscale x 8 x i16>* %base
+  %res = sext <vscale x 8 x i16> %wide.load to <vscale x 8 x i64>
+  ret <vscale x 8 x i64> %res
+}
+
+define <vscale x 8 x i64> @ld1h_i16_zext(<vscale x 8 x i16> *%base) {
+; CHECK-LABEL: ld1h_i16_zext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT:    ld1h { z2.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT:    ld1h { z3.d }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT:    ret
+  %wide.load = load <vscale x 8 x i16>, <vscale x 8 x i16>* %base
+  %res = zext <vscale x 8 x i16> %wide.load to <vscale x 8 x i64>
+  ret <vscale x 8 x i64> %res
+}
+
+;
+; LD1W
+;
+
+define <vscale x 4 x i64> @ld1w_i32_sext(<vscale x 4 x i32> *%base) {
+; CHECK-LABEL: ld1w_i32_sext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1sw { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT:    ret
+  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %base
+  %res = sext <vscale x 4 x i32> %wide.load to <vscale x 4 x i64>
+  ret <vscale x 4 x i64> %res
+}
+
+define <vscale x 4 x i64> @ld1w_i32_zext(<vscale x 4 x i32> *%base) {
+; CHECK-LABEL: ld1w_i32_zext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT:    ret
+  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %base
+  %res = zext <vscale x 4 x i32> %wide.load to <vscale x 4 x i64>
+  ret <vscale x 4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll
new file mode 100644
index 0000000000000..38f2a3b2c829b
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll
@@ -0,0 +1,123 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -asm-verbose=1 < %s | FileCheck %s
+
+;
+; LD1B
+;
+
+define <vscale x 16 x i64> @masked_ld1b_i8_sext(<vscale x 16 x i8> *%base, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: masked_ld1b_i8_sext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT:    sunpklo z1.h, z0.b
+; CHECK-NEXT:    sunpkhi z0.h, z0.b
+; CHECK-NEXT:    sunpklo z2.s, z1.h
+; CHECK-NEXT:    sunpkhi z3.s, z1.h
+; CHECK-NEXT:    sunpklo z5.s, z0.h
+; CHECK-NEXT:    sunpkhi z7.s, z0.h
+; CHECK-NEXT:    sunpklo z0.d, z2.s
+; CHECK-NEXT:    sunpkhi z1.d, z2.s
+; CHECK-NEXT:    sunpklo z2.d, z3.s
+; CHECK-NEXT:    sunpkhi z3.d, z3.s
+; CHECK-NEXT:    sunpklo z4.d, z5.s
+; CHECK-NEXT:    sunpkhi z5.d, z5.s
+; CHECK-NEXT:    sunpklo z6.d, z7.s
+; CHECK-NEXT:    sunpkhi z7.d, z7.s
+; CHECK-NEXT:    ret
+  %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+  %res = sext <vscale x 16 x i8> %wide.masked.load to <vscale x 16 x i64>
+  ret <vscale x 16 x i64> %res
+}
+
+define <vscale x 16 x i64> @masked_ld1b_i8_zext(<vscale x 16 x i8> *%base, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: masked_ld1b_i8_zext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT:    uunpklo z1.h, z0.b
+; CHECK-NEXT:    uunpkhi z0.h, z0.b
+; CHECK-NEXT:    uunpklo z2.s, z1.h
+; CHECK-NEXT:    uunpkhi z3.s, z1.h
+; CHECK-NEXT:    uunpklo z5.s, z0.h
+; CHECK-NEXT:    uunpkhi z7.s, z0.h
+; CHECK-NEXT:    uunpklo z0.d, z2.s
+; CHECK-NEXT:    uunpkhi z1.d, z2.s
+; CHECK-NEXT:    uunpklo z2.d, z3.s
+; CHECK-NEXT:    uunpkhi z3.d, z3.s
+; CHECK-NEXT:    uunpklo z4.d, z5.s
+; CHECK-NEXT:    uunpkhi z5.d, z5.s
+; CHECK-NEXT:    uunpklo z6.d, z7.s
+; CHECK-NEXT:    uunpkhi z7.d, z7.s
+; CHECK-NEXT:    ret
+  %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+  %res = zext <vscale x 16 x i8> %wide.masked.load to <vscale x 16 x i64>
+  ret <vscale x 16 x i64> %res
+}
+
+;
+; LD1H
+;
+
+define <vscale x 8 x i64> @masked_ld1h_i16_sext(<vscale x 8 x i16> *%base, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: masked_ld1h_i16_sext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    sunpklo z1.s, z0.h
+; CHECK-NEXT:    sunpkhi z3.s, z0.h
+; CHECK-NEXT:    sunpklo z0.d, z1.s
+; CHECK-NEXT:    sunpkhi z1.d, z1.s
+; CHECK-NEXT:    sunpklo z2.d, z3.s
+; CHECK-NEXT:    sunpkhi z3.d, z3.s
+; CHECK-NEXT:    ret
+  %wide.masked.load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* %base, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
+  %res = sext <vscale x 8 x i16> %wide.masked.load to <vscale x 8 x i64>
+  ret <vscale x 8 x i64> %res
+}
+
+define <vscale x 8 x i64> @masked_ld1h_i16_zext(<vscale x 8 x i16> *%base, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: masked_ld1h_i16_zext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    uunpklo z1.s, z0.h
+; CHECK-NEXT:    uunpkhi z3.s, z0.h
+; CHECK-NEXT:    uunpklo z0.d, z1.s
+; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    uunpklo z2.d, z3.s
+; CHECK-NEXT:    uunpkhi z3.d, z3.s
+; CHECK-NEXT:    ret
+  %wide.masked.load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* %base, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
+  %res = zext <vscale x 8 x i16> %wide.masked.load to <vscale x 8 x i64>
+  ret <vscale x 8 x i64> %res
+}
+
+;
+; LD1W
+;
+
+define <vscale x 4 x i64> @masked_ld1w_i32_sext(<vscale x 4 x i32> *%base, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: masked_ld1w_i32_sext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; CHECK-NEXT:    sunpklo z0.d, z1.s
+; CHECK-NEXT:    sunpkhi z1.d, z1.s
+; CHECK-NEXT:    ret
+  %wide.masked.load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* %base, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+  %res = sext <vscale x 4 x i32> %wide.masked.load to <vscale x 4 x i64>
+  ret <vscale x 4 x i64> %res
+}
+
+define <vscale x 4 x i64> @masked_ld1w_i32_zext(<vscale x 4 x i32> *%base, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: masked_ld1w_i32_zext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; CHECK-NEXT:    uunpklo z0.d, z1.s
+; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    ret
+  %wide.masked.load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* %base, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+  %res = zext <vscale x 4 x i32> %wide.masked.load to <vscale x 4 x i64>
+  ret <vscale x 4 x i64> %res
+}
+
+declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>*, i32 immarg, <vscale x 16 x i1>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>*, i32 immarg, <vscale x 8 x i1>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>*, i32 immarg, <vscale x 4 x i1>, <vscale x 4 x i32>)
+

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll b/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
index 69b3b46d9a7ec..66c4798f3059a 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
@@ -79,17 +79,14 @@ define <vscale x 2 x i64> @masked_zload_passthru(<vscale x 2 x i32>* %src, <vsca
 ; Return type requires splitting
 define <vscale x 8 x i64> @masked_zload_nxv8i16(<vscale x 8 x i16>* %a, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: masked_zload_nxv8i16:
-; CHECK:       punpklo p1.h, p0.b
-; CHECK-NEXT:  punpkhi p0.h, p0.b
-; CHECK-NEXT:  punpklo p2.h, p1.b
-; CHECK-NEXT:  punpkhi p1.h, p1.b
-; CHECK-NEXT:  ld1h { z0.d }, p2/z, [x0]
-; CHECK-NEXT:  punpklo p2.h, p0.b
-; CHECK-NEXT:  punpkhi p0.h, p0.b
-; CHECK-NEXT:  ld1h { z1.d }, p1/z, [x0, #1, mul vl]
-; CHECK-NEXT:  ld1h { z2.d }, p2/z, [x0, #2, mul vl]
-; CHECK-NEXT:  ld1h { z3.d }, p0/z, [x0, #3, mul vl]
-; CHECK-NEXT:  ret
+; CHECK:         ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    uunpklo z1.s, z0.h
+; CHECK-NEXT:    uunpkhi z3.s, z0.h
+; CHECK-NEXT:    uunpklo z0.d, z1.s
+; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    uunpklo z2.d, z3.s
+; CHECK-NEXT:    uunpkhi z3.d, z3.s
+; CHECK-NEXT:    ret
   %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>* %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
   %ext = zext <vscale x 8 x i16> %load to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %ext


        


More information about the llvm-commits mailing list