[llvm] 9f37980 - [AArch64][SVE] Fold insert(zero, extract(X, 0), 0) -> X, when X is known to zero lanes 1-N

Bradley Smith via llvm-commits llvm-commits at lists.llvm.org
Tue May 4 07:05:26 PDT 2021


Author: Bradley Smith
Date: 2021-05-04T15:05:05+01:00
New Revision: 9f37980d45c7ea05c16851b1d672ab41907a38b7

URL: https://github.com/llvm/llvm-project/commit/9f37980d45c7ea05c16851b1d672ab41907a38b7
DIFF: https://github.com/llvm/llvm-project/commit/9f37980d45c7ea05c16851b1d672ab41907a38b7.diff

LOG: [AArch64][SVE] Fold insert(zero, extract(X, 0), 0) -> X, when X is known to zero lanes 1-N

Specifically, this allow us to rely on the lane zero'ing behaviour of
SVE reduce instructions.

Co-authored-by: Paul Walker <paul.walker at arm.com>

Differential Revision: https://reviews.llvm.org/D101369

Added: 
    llvm/test/CodeGen/AArch64/sve-implicit-zero-filling.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index c8daad66b5864..42c1deaca77fa 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -15901,6 +15901,75 @@ static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG,
   return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops);
 }
 
+// Return true if the vector operation can guarantee only the first lane of its
+// result contains data, with all bits in other lanes set to zero.
+static bool isLanes1toNKnownZero(SDValue Op) {
+  switch (Op.getOpcode()) {
+  default:
+    return false;
+  case AArch64ISD::ANDV_PRED:
+  case AArch64ISD::EORV_PRED:
+  case AArch64ISD::FADDA_PRED:
+  case AArch64ISD::FADDV_PRED:
+  case AArch64ISD::FMAXNMV_PRED:
+  case AArch64ISD::FMAXV_PRED:
+  case AArch64ISD::FMINNMV_PRED:
+  case AArch64ISD::FMINV_PRED:
+  case AArch64ISD::ORV_PRED:
+  case AArch64ISD::SADDV_PRED:
+  case AArch64ISD::SMAXV_PRED:
+  case AArch64ISD::SMINV_PRED:
+  case AArch64ISD::UADDV_PRED:
+  case AArch64ISD::UMAXV_PRED:
+  case AArch64ISD::UMINV_PRED:
+    return true;
+  }
+}
+
+static SDValue removeRedundantInsertVectorElt(SDNode *N) {
+  assert(N->getOpcode() == ISD::INSERT_VECTOR_ELT && "Unexpected node!");
+  SDValue InsertVec = N->getOperand(0);
+  SDValue InsertElt = N->getOperand(1);
+  SDValue InsertIdx = N->getOperand(2);
+
+  // We only care about inserts into the first element...
+  if (!isNullConstant(InsertIdx))
+    return SDValue();
+  // ...of a zero'd vector...
+  if (!ISD::isConstantSplatVectorAllZeros(InsertVec.getNode()))
+    return SDValue();
+  // ...where the inserted data was previously extracted...
+  if (InsertElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+    return SDValue();
+
+  SDValue ExtractVec = InsertElt.getOperand(0);
+  SDValue ExtractIdx = InsertElt.getOperand(1);
+
+  // ...from the first element of a vector.
+  if (!isNullConstant(ExtractIdx))
+    return SDValue();
+
+  // If we get here we are effectively trying to zero lanes 1-N of a vector.
+
+  // Ensure there's no type conversion going on.
+  if (N->getValueType(0) != ExtractVec.getValueType())
+    return SDValue();
+
+  if (!isLanes1toNKnownZero(ExtractVec))
+    return SDValue();
+
+  // The explicit zeroing is redundant.
+  return ExtractVec;
+}
+
+static SDValue
+performInsertVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
+  if (SDValue Res = removeRedundantInsertVectorElt(N))
+    return Res;
+
+  return performPostLD1Combine(N, DCI, true);
+}
+
 SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
                                                  DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
@@ -15982,7 +16051,7 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
   case AArch64ISD::GLD1S_IMM_MERGE_ZERO:
     return performGLD1Combine(N, DAG);
   case ISD::INSERT_VECTOR_ELT:
-    return performPostLD1Combine(N, DCI, true);
+    return performInsertVectorEltCombine(N, DCI);
   case ISD::EXTRACT_VECTOR_ELT:
     return performExtractVectorEltCombine(N, DAG);
   case ISD::VECREDUCE_ADD:

diff  --git a/llvm/test/CodeGen/AArch64/sve-implicit-zero-filling.ll b/llvm/test/CodeGen/AArch64/sve-implicit-zero-filling.ll
new file mode 100644
index 0000000000000..8f0356b37bfe9
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-implicit-zero-filling.ll
@@ -0,0 +1,239 @@
+; RUN: llc < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; Ensure we rely on the reduction's implicit zero filling.
+define <vscale x 16 x i8> @andv_zero_fill(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) #0 {
+; CHECK-LABEL: andv_zero_fill:
+; CHECK: andv b0, p0, z0.b
+; CHECK-NEXT: ret
+  %t1 = call i8 @llvm.aarch64.sve.andv.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a)
+  %t2 = insertelement <vscale x 16 x i8> zeroinitializer, i8 %t1, i64 0
+  ret <vscale x 16 x i8> %t2
+}
+
+; Ensure we rely on the reduction's implicit zero filling.
+define <vscale x 8 x i16> @eorv_zero_fill(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) #0 {
+; CHECK-LABEL: eorv_zero_fill:
+; CHECK: eorv h0, p0, z0.h
+; CHECK-NEXT: ret
+  %t1 = call i16 @llvm.aarch64.sve.eorv.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a)
+  %t2 = insertelement <vscale x 8 x i16> zeroinitializer, i16 %t1, i64 0
+  ret <vscale x 8 x i16> %t2
+}
+
+; Ensure we rely on the reduction's implicit zero filling.
+define <vscale x 2 x double> @fadda_zero_fill(<vscale x 2 x i1> %pg, double %init, <vscale x 2 x double> %a) #0 {
+; CHECK-LABEL: fadda_zero_fill:
+; CHECK: fadda d0, p0, d0, z1.d
+; CHECK-NEXT: ret
+  %t1 = call double @llvm.aarch64.sve.fadda.nxv2f64(<vscale x 2 x i1> %pg, double %init, <vscale x 2 x double> %a)
+  %t2 = insertelement <vscale x 2 x double> zeroinitializer, double %t1, i64 0
+  ret <vscale x 2 x double> %t2
+}
+
+; Ensure we rely on the reduction's implicit zero filling.
+define <vscale x 4 x float> @faddv_zero_fill(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) #0 {
+; CHECK-LABEL: faddv_zero_fill:
+; CHECK: faddv s0, p0, z0.s
+; CHECK-NEXT: ret
+  %t1 = call float @llvm.aarch64.sve.faddv.nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a)
+  %t2 = insertelement <vscale x 4 x float> zeroinitializer, float %t1, i64 0
+  ret <vscale x 4 x float> %t2
+}
+
+; Ensure we rely on the reduction's implicit zero filling.
+define <vscale x 8 x half> @fmaxv_zero_fill(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) #0 {
+; CHECK-LABEL: fmaxv_zero_fill:
+; CHECK: fmaxv h0, p0, z0.h
+; CHECK-NEXT: ret
+  %t1 = call half @llvm.aarch64.sve.fmaxv.nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a)
+  %t2 = insertelement <vscale x 8 x half> zeroinitializer, half %t1, i64 0
+  ret <vscale x 8 x half> %t2
+}
+
+; Ensure we rely on the reduction's implicit zero filling.
+define <vscale x 2 x float> @fmaxnmv_zero_fill(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a) #0 {
+; CHECK-LABEL: fmaxnmv_zero_fill:
+; CHECK: fmaxnmv s0, p0, z0.s
+; CHECK-NEXT: ret
+  %t1 = call float @llvm.aarch64.sve.fmaxnmv.nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a)
+  %t2 = insertelement <vscale x 2 x float> zeroinitializer, float %t1, i64 0
+  ret <vscale x 2 x float> %t2
+}
+
+; Ensure we rely on the reduction's implicit zero filling.
+define <vscale x 2 x float> @fminnmv_zero_fill(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a) #0 {
+; CHECK-LABEL: fminnmv_zero_fill:
+; CHECK: fminnmv s0, p0, z0.s
+; CHECK-NEXT: ret
+  %t1 = call float @llvm.aarch64.sve.fminnmv.nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a)
+  %t2 = insertelement <vscale x 2 x float> zeroinitializer, float %t1, i64 0
+  ret <vscale x 2 x float> %t2
+}
+
+; Ensure we rely on the reduction's implicit zero filling.
+define <vscale x 2 x float> @fminv_zero_fill(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a) #0 {
+; CHECK-LABEL: fminv_zero_fill:
+; CHECK: fminv s0, p0, z0.s
+; CHECK-NEXT: ret
+  %t1 = call float @llvm.aarch64.sve.fminv.nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a)
+  %t2 = insertelement <vscale x 2 x float> zeroinitializer, float %t1, i64 0
+  ret <vscale x 2 x float> %t2
+}
+
+; Ensure we rely on the reduction's implicit zero filling.
+define <vscale x 4 x i32> @orv_zero_fill(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) #0 {
+; CHECK-LABEL: orv_zero_fill:
+; CHECK: orv s0, p0, z0.s
+; CHECK-NEXT: ret
+  %t1 = call i32 @llvm.aarch64.sve.orv.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a)
+  %t2 = insertelement <vscale x 4 x i32> zeroinitializer, i32 %t1, i64 0
+  ret <vscale x 4 x i32> %t2
+}
+
+; Ensure we rely on the reduction's implicit zero filling.
+define <vscale x 2 x i64> @saddv_zero_fill(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) #0 {
+; CHECK-LABEL: saddv_zero_fill:
+; CHECK: saddv d0, p0, z0.b
+; CHECK-NEXT: ret
+  %t1 = call i64 @llvm.aarch64.sve.saddv.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a)
+  %t2 = insertelement <vscale x 2 x i64> zeroinitializer, i64 %t1, i64 0
+  ret <vscale x 2 x i64> %t2
+}
+
+; Ensure we rely on the reduction's implicit zero filling.
+define <vscale x 2 x i64> @smaxv_zero_fill(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) #0 {
+; CHECK-LABEL: smaxv_zero_fill:
+; CHECK: smaxv d0, p0, z0.d
+; CHECK-NEXT: ret
+  %t1 = call i64 @llvm.aarch64.sve.smaxv.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a)
+  %t2 = insertelement <vscale x 2 x i64> zeroinitializer, i64 %t1, i64 0
+  ret <vscale x 2 x i64> %t2
+}
+
+; Ensure we rely on the reduction's implicit zero filling.
+define <vscale x 4 x i32> @sminv_zero_fill(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) #0 {
+; CHECK-LABEL: sminv_zero_fill:
+; CHECK: sminv s0, p0, z0.s
+; CHECK-NEXT: ret
+  %t1 = call i32 @llvm.aarch64.sve.sminv.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a)
+  %t2 = insertelement <vscale x 4 x i32> zeroinitializer, i32 %t1, i64 0
+  ret <vscale x 4 x i32> %t2
+}
+
+; Ensure we rely on the reduction's implicit zero filling.
+define <vscale x 2 x i64> @uaddv_zero_fill(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) #0 {
+; CHECK-LABEL: uaddv_zero_fill:
+; CHECK: uaddv d0, p0, z0.h
+; CHECK-NEXT: ret
+  %t1 = call i64 @llvm.aarch64.sve.uaddv.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a)
+  %t2 = insertelement <vscale x 2 x i64> zeroinitializer, i64 %t1, i64 0
+  ret <vscale x 2 x i64> %t2
+}
+
+; Ensure we rely on the reduction's implicit zero filling.
+define <vscale x 16 x i8> @umaxv_zero_fill(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) #0 {
+; CHECK-LABEL: umaxv_zero_fill:
+; CHECK: umaxv b0, p0, z0.b
+; CHECK-NEXT: ret
+  %t1 = call i8 @llvm.aarch64.sve.umaxv.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a)
+  %t2 = insertelement <vscale x 16 x i8> zeroinitializer, i8 %t1, i64 0
+  ret <vscale x 16 x i8> %t2
+}
+
+; Ensure we rely on the reduction's implicit zero filling.
+define <vscale x 2 x i64> @uminv_zero_fill(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) #0 {
+; CHECK-LABEL: uminv_zero_fill:
+; CHECK: uminv d0, p0, z0.d
+; CHECK-NEXT: ret
+  %t1 = call i64 @llvm.aarch64.sve.uminv.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a)
+  %t2 = insertelement <vscale x 2 x i64> zeroinitializer, i64 %t1, i64 0
+  ret <vscale x 2 x i64> %t2
+}
+
+; Ensure explicit zeroing when inserting into a lane other than 0.
+; NOTE: This test doesn't care about the exact way an insert is code generated,
+; so only checks the presence of one instruction from the expected chain.
+define <vscale x 2 x i64> @zero_fill_non_zero_index(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) #0 {
+; CHECK-LABEL: zero_fill_non_zero_index:
+; CHECK: uminv d{{[0-9]+}}, p0, z0.d
+; CHECK: mov z{{[0-9]+}}.d, p{{[0-9]+}}/m, x{{[0-9]+}}
+; CHECK: ret
+  %t1 = call i64 @llvm.aarch64.sve.uminv.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a)
+  %t2 = insertelement <vscale x 2 x i64> zeroinitializer, i64 %t1, i64 1
+  ret <vscale x 2 x i64> %t2
+}
+
+; Ensure explicit zeroing when the result vector is larger than that produced by
+; the reduction instruction.
+define <vscale x 4 x i64> @zero_fill_type_mismatch(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) #0 {
+; CHECK-LABEL: zero_fill_type_mismatch:
+; CHECK: uminv d0, p0, z0.d
+; CHECK-NEXT: mov z1.d, #0
+; CHECK-NEXT: ret
+  %t1 = call i64 @llvm.aarch64.sve.uminv.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a)
+  %t2 = insertelement <vscale x 4 x i64> zeroinitializer, i64 %t1, i64 0
+  ret <vscale x 4 x i64> %t2
+}
+
+; Ensure explicit zeroing when extracting an element from an operation that
+; cannot guarantee lanes 1-N are zero.
+; NOTE: This test doesn't care about the exact way an insert is code generated,
+; so only checks the presence of one instruction from the expected chain.
+define <vscale x 2 x i64> @zero_fill_no_zero_upper_lanes(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) #0 {
+; CHECK-LABEL: zero_fill_no_zero_upper_lanes:
+; CHECK: umin z{{[0-9]+}}.d, p0/m, z0.d, z0.d
+; CHECK: mov z{{[0-9]+}}.d, p{{[0-9]+}}/m, x{{[0-9]+}}
+; CHECK: ret
+  %t1 = call <vscale x 2 x i64> @llvm.aarch64.sve.umin.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %a)
+  %t2 = extractelement <vscale x 2 x i64> %t1, i64 0
+  %t3 = insertelement <vscale x 2 x i64> zeroinitializer, i64 %t2, i64 0
+  ret <vscale x 2 x i64> %t3
+}
+
+declare i8 @llvm.aarch64.sve.andv.nxv2i8(<vscale x 2 x i1>, <vscale x 2 x i8>)
+declare i8 @llvm.aarch64.sve.andv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
+
+declare i8 @llvm.aarch64.sve.eorv.nxv2i8(<vscale x 2 x i1>, <vscale x 2 x i8>)
+declare i16 @llvm.aarch64.sve.eorv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
+
+declare float @llvm.aarch64.sve.fadda.nxv2f32(<vscale x 2 x i1>, float, <vscale x 2 x float>)
+declare double @llvm.aarch64.sve.fadda.nxv2f64(<vscale x 2 x i1>, double, <vscale x 2 x double>)
+
+declare float @llvm.aarch64.sve.faddv.nxv2f32(<vscale x 2 x i1>, <vscale x 2 x float>)
+declare float @llvm.aarch64.sve.faddv.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>)
+
+declare float @llvm.aarch64.sve.fmaxnmv.nxv2f32(<vscale x 2 x i1>, <vscale x 2 x float>)
+
+declare half @llvm.aarch64.sve.fmaxv.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>)
+declare float @llvm.aarch64.sve.fmaxv.nxv2f32(<vscale x 2 x i1>, <vscale x 2 x float>)
+
+declare float @llvm.aarch64.sve.fminv.nxv2f32(<vscale x 2 x i1>, <vscale x 2 x float>)
+
+declare float @llvm.aarch64.sve.fminnmv.nxv2f32(<vscale x 2 x i1>, <vscale x 2 x float>)
+
+declare i8 @llvm.aarch64.sve.orv.nxv2i8(<vscale x 2 x i1>, <vscale x 2 x i8>)
+declare i32 @llvm.aarch64.sve.orv.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
+
+declare i64 @llvm.aarch64.sve.saddv.nxv2i8(<vscale x 2 x i1>, <vscale x 2 x i8>)
+declare i64 @llvm.aarch64.sve.saddv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
+
+declare i8 @llvm.aarch64.sve.smaxv.nxv2i8(<vscale x 2 x i1>, <vscale x 2 x i8>)
+declare i64 @llvm.aarch64.sve.smaxv.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
+
+declare i8 @llvm.aarch64.sve.sminv.nxv2i8(<vscale x 2 x i1>, <vscale x 2 x i8>)
+declare i32 @llvm.aarch64.sve.sminv.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
+
+declare i64 @llvm.aarch64.sve.uaddv.nxv2i8(<vscale x 2 x i1>, <vscale x 2 x i8>)
+declare i64 @llvm.aarch64.sve.uaddv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
+
+declare i8 @llvm.aarch64.sve.umaxv.nxv2i8(<vscale x 2 x i1>, <vscale x 2 x i8>)
+declare i8 @llvm.aarch64.sve.umaxv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
+
+declare i8 @llvm.aarch64.sve.uminv.nxv2i8(<vscale x 2 x i1>, <vscale x 2 x i8>)
+declare i64 @llvm.aarch64.sve.uminv.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
+
+declare <vscale x 2 x i64> @llvm.aarch64.sve.umin.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+
+attributes #0 = { "target-features"="+sve" }


        


More information about the llvm-commits mailing list