[llvm] 50598f0 - [DAGCombiner][SVE] Add support for illegal extending masked loads
David Sherwood via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 5 03:46:26 PDT 2023
Author: David Sherwood
Date: 2023-09-05T10:41:21Z
New Revision: 50598f0ff44f3a4e75706f8c53f3380fe7faa896
URL: https://github.com/llvm/llvm-project/commit/50598f0ff44f3a4e75706f8c53f3380fe7faa896
DIFF: https://github.com/llvm/llvm-project/commit/50598f0ff44f3a4e75706f8c53f3380fe7faa896.diff
LOG: [DAGCombiner][SVE] Add support for illegal extending masked loads
In some cases where the same mask is used for multiple
extending masked loads it can be more efficient to combine
the zero- or sign-extend into the load even if it's not a
legal or custom operation. This leads to splitting up the
extending load into smaller parts, which also requires
splitting the mask. For SVE at least this improves the
performance of the SPEC benchmark x264 slightly on
neoverse-v1 (~0.3%), and at least one other benchmark
improves by around 30%. The uplift for SVE seems due to
removing the dependencies (vector unpacks) introduced
between the loads and the vector operations, since this
should increase the level of parallelism.
See tests:
CodeGen/AArch64/sve-masked-ldst-sext.ll
CodeGen/AArch64/sve-masked-ldst-zext.ll
https://reviews.llvm.org/D159191
Added:
Modified:
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll
llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index df2d7fe654e44ee..d2df87b6f85a236 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -12926,11 +12926,10 @@ static SDValue tryToFoldExtOfLoad(SelectionDAG &DAG, DAGCombiner &Combiner,
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
-static SDValue tryToFoldExtOfMaskedLoad(SelectionDAG &DAG,
- const TargetLowering &TLI, EVT VT,
- SDNode *N, SDValue N0,
- ISD::LoadExtType ExtLoadType,
- ISD::NodeType ExtOpc) {
+static SDValue
+tryToFoldExtOfMaskedLoad(SelectionDAG &DAG, const TargetLowering &TLI, EVT VT,
+ bool LegalOperations, SDNode *N, SDValue N0,
+ ISD::LoadExtType ExtLoadType, ISD::NodeType ExtOpc) {
if (!N0.hasOneUse())
return SDValue();
@@ -12938,7 +12937,8 @@ static SDValue tryToFoldExtOfMaskedLoad(SelectionDAG &DAG,
if (!Ld || Ld->getExtensionType() != ISD::NON_EXTLOAD)
return SDValue();
- if (!TLI.isLoadExtLegalOrCustom(ExtLoadType, VT, Ld->getValueType(0)))
+ if ((LegalOperations || !cast<MaskedLoadSDNode>(N0)->isSimple()) &&
+ !TLI.isLoadExtLegalOrCustom(ExtLoadType, VT, Ld->getValueType(0)))
return SDValue();
if (!TLI.isVectorLoadExtDesirable(SDValue(N, 0)))
@@ -13211,8 +13211,8 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
return foldedExt;
if (SDValue foldedExt =
- tryToFoldExtOfMaskedLoad(DAG, TLI, VT, N, N0, ISD::SEXTLOAD,
- ISD::SIGN_EXTEND))
+ tryToFoldExtOfMaskedLoad(DAG, TLI, VT, LegalOperations, N, N0,
+ ISD::SEXTLOAD, ISD::SIGN_EXTEND))
return foldedExt;
// fold (sext (load x)) to multiple smaller sextloads.
@@ -13490,8 +13490,8 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
return foldedExt;
if (SDValue foldedExt =
- tryToFoldExtOfMaskedLoad(DAG, TLI, VT, N, N0, ISD::ZEXTLOAD,
- ISD::ZERO_EXTEND))
+ tryToFoldExtOfMaskedLoad(DAG, TLI, VT, LegalOperations, N, N0,
+ ISD::ZEXTLOAD, ISD::ZERO_EXTEND))
return foldedExt;
// fold (zext (load x)) to multiple smaller zextloads.
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 16fe5bc2786318c..1e209839f45e788 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5351,8 +5351,33 @@ bool AArch64TargetLowering::shouldRemoveExtendFromGSIndex(EVT IndexVT,
}
bool AArch64TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
- return ExtVal.getValueType().isScalableVector() ||
- Subtarget->useSVEForFixedLengthVectors();
+ EVT ExtVT = ExtVal.getValueType();
+ if (!ExtVT.isScalableVector() && !Subtarget->useSVEForFixedLengthVectors())
+ return false;
+
+ // It may be worth creating extending masked loads if there are multiple
+ // masked loads using the same predicate. That way we'll end up creating
+ // extending masked loads that may then get split by the legaliser. This
+ // results in just one set of predicate unpacks at the start, instead of
+ // multiple sets of vector unpacks after each load.
+ if (auto *Ld = dyn_cast<MaskedLoadSDNode>(ExtVal->getOperand(0))) {
+ if (!isLoadExtLegalOrCustom(ISD::ZEXTLOAD, ExtVT, Ld->getValueType(0))) {
+ // Disable extending masked loads for fixed-width for now, since the code
+ // quality doesn't look great.
+ if (!ExtVT.isScalableVector())
+ return false;
+
+ unsigned NumExtMaskedLoads = 0;
+ for (auto *U : Ld->getMask()->uses())
+ if (isa<MaskedLoadSDNode>(U))
+ NumExtMaskedLoads++;
+
+ if (NumExtMaskedLoads <= 1)
+ return false;
+ }
+ }
+
+ return true;
}
unsigned getGatherVecOpcode(bool IsScaled, bool IsSigned, bool NeedsExtend) {
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll b/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll
index f8587ca86f392ab..e266ab78603fb0b 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll
@@ -170,14 +170,14 @@ define <vscale x 8 x i64> @masked_sload_8i8_8i64(ptr %a, <vscale x 8 x i1> %b) {
define <vscale x 4 x i64> @masked_sload_x2_4i8_4i64(ptr %a, ptr %b, <vscale x 4 x i1> %c) {
; CHECK-LABEL: masked_sload_x2_4i8_4i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
-; CHECK-NEXT: ld1sb { z1.s }, p0/z, [x1]
-; CHECK-NEXT: sunpkhi z2.d, z0.s
-; CHECK-NEXT: sunpklo z0.d, z0.s
-; CHECK-NEXT: sunpkhi z3.d, z1.s
-; CHECK-NEXT: sunpklo z1.d, z1.s
-; CHECK-NEXT: add z0.d, z0.d, z1.d
-; CHECK-NEXT: add z1.d, z2.d, z3.d
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z1.d }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1sb { z2.d }, p1/z, [x1, #1, mul vl]
+; CHECK-NEXT: ld1sb { z3.d }, p0/z, [x1]
+; CHECK-NEXT: add z0.d, z0.d, z3.d
+; CHECK-NEXT: add z1.d, z1.d, z2.d
; CHECK-NEXT: ret
%aval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8> *%a, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer)
%bval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8> *%b, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer)
@@ -190,14 +190,14 @@ define <vscale x 4 x i64> @masked_sload_x2_4i8_4i64(ptr %a, ptr %b, <vscale x 4
define <vscale x 4 x i64> @masked_sload_x2_4i16_4i64(ptr %a, ptr %b, <vscale x 4 x i1> %c) {
; CHECK-LABEL: masked_sload_x2_4i16_4i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
-; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x1]
-; CHECK-NEXT: sunpkhi z2.d, z0.s
-; CHECK-NEXT: sunpklo z0.d, z0.s
-; CHECK-NEXT: sunpkhi z3.d, z1.s
-; CHECK-NEXT: sunpklo z1.d, z1.s
-; CHECK-NEXT: add z0.d, z0.d, z1.d
-; CHECK-NEXT: add z1.d, z2.d, z3.d
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sh { z1.d }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1sh { z2.d }, p1/z, [x1, #1, mul vl]
+; CHECK-NEXT: ld1sh { z3.d }, p0/z, [x1]
+; CHECK-NEXT: add z0.d, z0.d, z3.d
+; CHECK-NEXT: add z1.d, z1.d, z2.d
; CHECK-NEXT: ret
%aval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16> *%a, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer)
%bval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16> *%b, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer)
@@ -210,14 +210,14 @@ define <vscale x 4 x i64> @masked_sload_x2_4i16_4i64(ptr %a, ptr %b, <vscale x 4
define <vscale x 8 x i32> @masked_sload_x2_8i8_8i32(ptr %a, ptr %b, <vscale x 8 x i1> %c) {
; CHECK-LABEL: masked_sload_x2_8i8_8i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
-; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x1]
-; CHECK-NEXT: sunpkhi z2.s, z0.h
-; CHECK-NEXT: sunpklo z0.s, z0.h
-; CHECK-NEXT: sunpkhi z3.s, z1.h
-; CHECK-NEXT: sunpklo z1.s, z1.h
-; CHECK-NEXT: add z0.s, z0.s, z1.s
-; CHECK-NEXT: add z1.s, z2.s, z3.s
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z1.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ld1sb { z2.s }, p1/z, [x1, #1, mul vl]
+; CHECK-NEXT: ld1sb { z3.s }, p0/z, [x1]
+; CHECK-NEXT: add z0.s, z0.s, z3.s
+; CHECK-NEXT: add z1.s, z1.s, z2.s
; CHECK-NEXT: ret
%aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
%bval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%b, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
@@ -230,24 +230,24 @@ define <vscale x 8 x i32> @masked_sload_x2_8i8_8i32(ptr %a, ptr %b, <vscale x 8
define <vscale x 8 x i64> @masked_sload_x2_8i8_8i64(ptr %a, ptr %b, <vscale x 8 x i1> %c) {
; CHECK-LABEL: masked_sload_x2_8i8_8i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
-; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x1]
-; CHECK-NEXT: sunpkhi z2.s, z0.h
-; CHECK-NEXT: sunpklo z0.s, z0.h
-; CHECK-NEXT: sunpklo z3.s, z1.h
-; CHECK-NEXT: sunpkhi z1.s, z1.h
-; CHECK-NEXT: sunpkhi z4.d, z2.s
-; CHECK-NEXT: sunpklo z2.d, z2.s
-; CHECK-NEXT: sunpkhi z5.d, z0.s
-; CHECK-NEXT: sunpklo z0.d, z0.s
-; CHECK-NEXT: sunpklo z6.d, z3.s
-; CHECK-NEXT: sunpkhi z7.d, z1.s
-; CHECK-NEXT: sunpklo z24.d, z1.s
-; CHECK-NEXT: sunpkhi z1.d, z3.s
-; CHECK-NEXT: add z0.d, z0.d, z6.d
-; CHECK-NEXT: add z3.d, z4.d, z7.d
-; CHECK-NEXT: add z1.d, z5.d, z1.d
-; CHECK-NEXT: add z2.d, z2.d, z24.d
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: punpkhi p2.h, p1.b
+; CHECK-NEXT: punpklo p1.h, p1.b
+; CHECK-NEXT: punpkhi p3.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z3.d }, p2/z, [x0, #3, mul vl]
+; CHECK-NEXT: ld1sb { z2.d }, p1/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1sb { z1.d }, p3/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1sb { z4.d }, p2/z, [x1, #3, mul vl]
+; CHECK-NEXT: ld1sb { z5.d }, p1/z, [x1, #2, mul vl]
+; CHECK-NEXT: ld1sb { z6.d }, p3/z, [x1, #1, mul vl]
+; CHECK-NEXT: ld1sb { z7.d }, p0/z, [x1]
+; CHECK-NEXT: add z2.d, z2.d, z5.d
+; CHECK-NEXT: add z3.d, z3.d, z4.d
+; CHECK-NEXT: add z0.d, z0.d, z7.d
+; CHECK-NEXT: add z1.d, z1.d, z6.d
; CHECK-NEXT: ret
%aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
%bval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%b, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll b/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
index a86dd18d7fd7c25..81863e7217d3a1a 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
@@ -165,14 +165,14 @@ define <vscale x 8 x i64> @masked_zload_8i8_8i64(ptr %a, <vscale x 8 x i1> %b) {
define <vscale x 4 x i64> @masked_zload_x2_4i8_4i64(ptr %a, ptr %b, <vscale x 4 x i1> %c) {
; CHECK-LABEL: masked_zload_x2_4i8_4i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0]
-; CHECK-NEXT: ld1b { z1.s }, p0/z, [x1]
-; CHECK-NEXT: uunpkhi z2.d, z0.s
-; CHECK-NEXT: uunpklo z0.d, z0.s
-; CHECK-NEXT: uunpkhi z3.d, z1.s
-; CHECK-NEXT: uunpklo z1.d, z1.s
-; CHECK-NEXT: add z0.d, z0.d, z1.d
-; CHECK-NEXT: add z1.d, z2.d, z3.d
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1b { z1.d }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z2.d }, p1/z, [x1, #1, mul vl]
+; CHECK-NEXT: ld1b { z3.d }, p0/z, [x1]
+; CHECK-NEXT: add z0.d, z0.d, z3.d
+; CHECK-NEXT: add z1.d, z1.d, z2.d
; CHECK-NEXT: ret
%aval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8> *%a, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer)
%bval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8> *%b, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer)
@@ -185,14 +185,14 @@ define <vscale x 4 x i64> @masked_zload_x2_4i8_4i64(ptr %a, ptr %b, <vscale x 4
define <vscale x 4 x i64> @masked_zload_x2_4i16_4i64(ptr %a, ptr %b, <vscale x 4 x i1> %c) {
; CHECK-LABEL: masked_zload_x2_4i16_4i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
-; CHECK-NEXT: ld1h { z1.s }, p0/z, [x1]
-; CHECK-NEXT: uunpkhi z2.d, z0.s
-; CHECK-NEXT: uunpklo z0.d, z0.s
-; CHECK-NEXT: uunpkhi z3.d, z1.s
-; CHECK-NEXT: uunpklo z1.d, z1.s
-; CHECK-NEXT: add z0.d, z0.d, z1.d
-; CHECK-NEXT: add z1.d, z2.d, z3.d
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z1.d }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1h { z2.d }, p1/z, [x1, #1, mul vl]
+; CHECK-NEXT: ld1h { z3.d }, p0/z, [x1]
+; CHECK-NEXT: add z0.d, z0.d, z3.d
+; CHECK-NEXT: add z1.d, z1.d, z2.d
; CHECK-NEXT: ret
%aval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16> *%a, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer)
%bval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16> *%b, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer)
@@ -205,14 +205,14 @@ define <vscale x 4 x i64> @masked_zload_x2_4i16_4i64(ptr %a, ptr %b, <vscale x 4
define <vscale x 8 x i32> @masked_zload_x2_8i8_8i32(ptr %a, ptr %b, <vscale x 8 x i1> %c) {
; CHECK-LABEL: masked_zload_x2_8i8_8i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0]
-; CHECK-NEXT: ld1b { z1.h }, p0/z, [x1]
-; CHECK-NEXT: uunpkhi z2.s, z0.h
-; CHECK-NEXT: uunpklo z0.s, z0.h
-; CHECK-NEXT: uunpkhi z3.s, z1.h
-; CHECK-NEXT: uunpklo z1.s, z1.h
-; CHECK-NEXT: add z0.s, z0.s, z1.s
-; CHECK-NEXT: add z1.s, z2.s, z3.s
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1b { z1.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z2.s }, p1/z, [x1, #1, mul vl]
+; CHECK-NEXT: ld1b { z3.s }, p0/z, [x1]
+; CHECK-NEXT: add z0.s, z0.s, z3.s
+; CHECK-NEXT: add z1.s, z1.s, z2.s
; CHECK-NEXT: ret
%aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
%bval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%b, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
@@ -225,24 +225,24 @@ define <vscale x 8 x i32> @masked_zload_x2_8i8_8i32(ptr %a, ptr %b, <vscale x 8
define <vscale x 8 x i64> @masked_zload_x2_8i8_8i64(ptr %a, ptr %b, <vscale x 8 x i1> %c) {
; CHECK-LABEL: masked_zload_x2_8i8_8i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0]
-; CHECK-NEXT: ld1b { z1.h }, p0/z, [x1]
-; CHECK-NEXT: uunpkhi z2.s, z0.h
-; CHECK-NEXT: uunpklo z0.s, z0.h
-; CHECK-NEXT: uunpklo z3.s, z1.h
-; CHECK-NEXT: uunpkhi z1.s, z1.h
-; CHECK-NEXT: uunpkhi z4.d, z2.s
-; CHECK-NEXT: uunpklo z2.d, z2.s
-; CHECK-NEXT: uunpkhi z5.d, z0.s
-; CHECK-NEXT: uunpklo z0.d, z0.s
-; CHECK-NEXT: uunpklo z6.d, z3.s
-; CHECK-NEXT: uunpkhi z7.d, z1.s
-; CHECK-NEXT: uunpklo z24.d, z1.s
-; CHECK-NEXT: uunpkhi z1.d, z3.s
-; CHECK-NEXT: add z0.d, z0.d, z6.d
-; CHECK-NEXT: add z3.d, z4.d, z7.d
-; CHECK-NEXT: add z1.d, z5.d, z1.d
-; CHECK-NEXT: add z2.d, z2.d, z24.d
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: punpkhi p2.h, p1.b
+; CHECK-NEXT: punpklo p1.h, p1.b
+; CHECK-NEXT: punpkhi p3.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1b { z3.d }, p2/z, [x0, #3, mul vl]
+; CHECK-NEXT: ld1b { z2.d }, p1/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1b { z1.d }, p3/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1b { z4.d }, p2/z, [x1, #3, mul vl]
+; CHECK-NEXT: ld1b { z5.d }, p1/z, [x1, #2, mul vl]
+; CHECK-NEXT: ld1b { z6.d }, p3/z, [x1, #1, mul vl]
+; CHECK-NEXT: ld1b { z7.d }, p0/z, [x1]
+; CHECK-NEXT: add z2.d, z2.d, z5.d
+; CHECK-NEXT: add z3.d, z3.d, z4.d
+; CHECK-NEXT: add z0.d, z0.d, z7.d
+; CHECK-NEXT: add z1.d, z1.d, z6.d
; CHECK-NEXT: ret
%aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
%bval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%b, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
More information about the llvm-commits
mailing list