[llvm] db40a74 - [SVE] Lower fixed length ISD::VECREDUCE_ADD to Scalable
Cameron McInally via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 23 07:08:23 PDT 2020
Author: Cameron McInally
Date: 2020-09-23T09:08:07-05:00
New Revision: db40a74344292410aa3e08c42834423013c4f192
URL: https://github.com/llvm/llvm-project/commit/db40a74344292410aa3e08c42834423013c4f192
DIFF: https://github.com/llvm/llvm-project/commit/db40a74344292410aa3e08c42834423013c4f192.diff
LOG: [SVE] Lower fixed length ISD::VECREDUCE_ADD to Scalable
Differential Revision: https://reviews.llvm.org/D87796
Added:
llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll
Modified:
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/lib/Target/AArch64/AArch64ISelLowering.h
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d7563d80a29e..3b772bd1ab38 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1213,6 +1213,7 @@ void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
setOperationAction(ISD::UDIV, VT, Custom);
setOperationAction(ISD::UMAX, VT, Custom);
setOperationAction(ISD::UMIN, VT, Custom);
+ setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Custom);
setOperationAction(ISD::XOR, VT, Custom);
setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
@@ -9623,9 +9624,13 @@ static SDValue getReductionSDNode(unsigned Op, SDLoc DL, SDValue ScalarOp,
SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op,
SelectionDAG &DAG) const {
+ SDValue VecOp = Op.getOperand(0);
+
SDLoc dl(Op);
switch (Op.getOpcode()) {
case ISD::VECREDUCE_ADD:
+ if (useSVEForFixedLengthVectorVT(VecOp.getValueType()))
+ return LowerFixedLengthReductionToSVE(AArch64ISD::UADDV_PRED, Op, DAG);
return getReductionSDNode(AArch64ISD::UADDV, dl, Op, DAG);
case ISD::VECREDUCE_SMAX:
return getReductionSDNode(AArch64ISD::SMAXV, dl, Op, DAG);
@@ -15952,6 +15957,32 @@ SDValue AArch64TargetLowering::LowerToScalableOp(SDValue Op,
return convertFromScalableVector(DAG, VT, ScalableRes);
}
+SDValue AArch64TargetLowering::LowerFixedLengthReductionToSVE(unsigned Opcode,
+ SDValue ScalarOp, SelectionDAG &DAG) const {
+ SDLoc DL(ScalarOp);
+ SDValue VecOp = ScalarOp.getOperand(0);
+ EVT SrcVT = VecOp.getValueType();
+
+ SDValue Pg = getPredicateForVector(DAG, DL, SrcVT);
+ EVT ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT);
+ VecOp = convertToScalableVector(DAG, ContainerVT, VecOp);
+
+ // UADDV always returns an i64 result.
+ EVT ResVT = (Opcode == AArch64ISD::UADDV_PRED) ? MVT::i64 :
+ SrcVT.getVectorElementType();
+
+ SDValue Rdx = DAG.getNode(Opcode, DL, getPackedSVEVectorVT(ResVT), Pg, VecOp);
+ SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT,
+ Rdx, DAG.getConstant(0, DL, MVT::i64));
+
+ // This is needed for UADDV, since it returns an i64 result. The VEC_REDUCE
+ // nodes expect an element size result.
+ if (ResVT != ScalarOp.getValueType())
+ Res = DAG.getNode(ISD::TRUNCATE, DL, ScalarOp.getValueType(), Res);
+
+ return Res;
+}
+
SDValue
AArch64TargetLowering::LowerFixedLengthVectorSelectToSVE(SDValue Op,
SelectionDAG &DAG) const {
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 224eb904e5f0..eac633cd980e 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -927,6 +927,8 @@ class AArch64TargetLowering : public TargetLowering {
SDValue LowerFixedLengthVectorIntExtendToSVE(SDValue Op,
SelectionDAG &DAG) const;
SDValue LowerFixedLengthVectorLoadToSVE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFixedLengthReductionToSVE(unsigned Opcode, SDValue ScalarOp,
+ SelectionDAG &DAG) const;
SDValue LowerFixedLengthVectorSelectToSVE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFixedLengthVectorSetccToSVE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFixedLengthVectorStoreToSVE(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll
new file mode 100644
index 000000000000..a6557e03e63e
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll
@@ -0,0 +1,319 @@
+; RUN: llc -aarch64-sve-vector-bits-min=128 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=16 -check-prefix=NO_SVE
+; RUN: llc -aarch64-sve-vector-bits-min=256 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=32 -check-prefixes=CHECK,VBITS_EQ_256
+; RUN: llc -aarch64-sve-vector-bits-min=384 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=32 -check-prefixes=CHECK
+; RUN: llc -aarch64-sve-vector-bits-min=512 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=640 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=768 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=896 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; Don't use SVE when its registers are no bigger than NEON.
+; NO_SVE-NOT: ptrue
+
+;
+; UADDV
+;
+
+; Don't use SVE for 64-bit vectors.
+define i8 @uaddv_v8i8(<8 x i8> %a) #0 {
+; CHECK-LABEL: uaddv_v8i8:
+; CHECK: addv b0, v0.8b
+; CHECK: ret
+ %res = call i8 @llvm.experimental.vector.reduce.add.v8i8(<8 x i8> %a)
+ ret i8 %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define i8 @uaddv_v16i8(<16 x i8> %a) #0 {
+; CHECK-LABEL: uaddv_v16i8:
+; CHECK: addv b0, v0.16b
+; CHECK: ret
+ %res = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %a)
+ ret i8 %res
+}
+
+define i8 @uaddv_v32i8(<32 x i8>* %a) #0 {
+; CHECK-LABEL: uaddv_v32i8:
+; VBITS_GE_256: ptrue [[PG:p[0-9]+]].b, vl32
+; VBITS_GE_256-DAG: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].b
+; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_256-NEXT: ret
+ %op = load <32 x i8>, <32 x i8>* %a
+ %res = call i8 @llvm.experimental.vector.reduce.add.v32i8(<32 x i8> %op)
+ ret i8 %res
+}
+
+define i8 @uaddv_v64i8(<64 x i8>* %a) #0 {
+; CHECK-LABEL: uaddv_v64i8:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].b, vl64
+; VBITS_GE_512-DAG: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].b
+; VBITS_GE_512-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_512-NEXT: ret
+ %op = load <64 x i8>, <64 x i8>* %a
+ %res = call i8 @llvm.experimental.vector.reduce.add.v64i8(<64 x i8> %op)
+ ret i8 %res
+}
+
+define i8 @uaddv_v128i8(<128 x i8>* %a) #0 {
+; CHECK-LABEL: uaddv_v128i8:
+; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].b, vl128
+; VBITS_GE_1024-DAG: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_1024-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].b
+; VBITS_GE_1024-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_1024-NEXT: ret
+ %op = load <128 x i8>, <128 x i8>* %a
+ %res = call i8 @llvm.experimental.vector.reduce.add.v128i8(<128 x i8> %op)
+ ret i8 %res
+}
+
+define i8 @uaddv_v256i8(<256 x i8>* %a) #0 {
+; CHECK-LABEL: uaddv_v256i8:
+; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].b, vl256
+; VBITS_GE_2048-DAG: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].b
+; VBITS_GE_2048-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_2048-NEXT: ret
+ %op = load <256 x i8>, <256 x i8>* %a
+ %res = call i8 @llvm.experimental.vector.reduce.add.v256i8(<256 x i8> %op)
+ ret i8 %res
+}
+
+; Don't use SVE for 64-bit vectors.
+define i16 @uaddv_v4i16(<4 x i16> %a) #0 {
+; CHECK-LABEL: uaddv_v4i16:
+; CHECK: addv h0, v0.4h
+; CHECK: ret
+ %res = call i16 @llvm.experimental.vector.reduce.add.v4i16(<4 x i16> %a)
+ ret i16 %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define i16 @uaddv_v8i16(<8 x i16> %a) #0 {
+; CHECK-LABEL: uaddv_v8i16:
+; CHECK: addv h0, v0.8h
+; CHECK: ret
+ %res = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %a)
+ ret i16 %res
+}
+
+define i16 @uaddv_v16i16(<16 x i16>* %a) #0 {
+; CHECK-LABEL: uaddv_v16i16:
+; VBITS_GE_256: ptrue [[PG:p[0-9]+]].h, vl16
+; VBITS_GE_256-DAG: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].h
+; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_256-NEXT: ret
+ %op = load <16 x i16>, <16 x i16>* %a
+ %res = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %op)
+ ret i16 %res
+}
+
+define i16 @uaddv_v32i16(<32 x i16>* %a) #0 {
+; CHECK-LABEL: uaddv_v32i16:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].h, vl32
+; VBITS_GE_512-DAG: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].h
+; VBITS_GE_512-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_512-NEXT: ret
+ %op = load <32 x i16>, <32 x i16>* %a
+ %res = call i16 @llvm.experimental.vector.reduce.add.v32i16(<32 x i16> %op)
+ ret i16 %res
+}
+
+define i16 @uaddv_v64i16(<64 x i16>* %a) #0 {
+; CHECK-LABEL: uaddv_v64i16:
+; VBITS_GE_1048: ptrue [[PG:p[0-9]+]].h, vl64
+; VBITS_GE_1048-DAG: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_1048-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].h
+; VBITS_GE_1048-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_1048-NEXT: ret
+ %op = load <64 x i16>, <64 x i16>* %a
+ %res = call i16 @llvm.experimental.vector.reduce.add.v64i16(<64 x i16> %op)
+ ret i16 %res
+}
+
+define i16 @uaddv_v128i16(<128 x i16>* %a) #0 {
+; CHECK-LABEL: uaddv_v128i16:
+; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl128
+; VBITS_GE_2048-DAG: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].h
+; VBITS_GE_2048-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_2048-NEXT: ret
+ %op = load <128 x i16>, <128 x i16>* %a
+ %res = call i16 @llvm.experimental.vector.reduce.add.v128i16(<128 x i16> %op)
+ ret i16 %res
+}
+
+; Don't use SVE for 64-bit vectors.
+define i32 @uaddv_v2i32(<2 x i32> %a) #0 {
+; CHECK-LABEL: uaddv_v2i32:
+; CHECK: addp v0.2s, v0.2s
+; CHECK: ret
+ %res = call i32 @llvm.experimental.vector.reduce.add.v2i32(<2 x i32> %a)
+ ret i32 %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define i32 @uaddv_v4i32(<4 x i32> %a) #0 {
+; CHECK-LABEL: uaddv_v4i32:
+; CHECK: addv s0, v0.4s
+; CHECK: ret
+ %res = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %a)
+ ret i32 %res
+}
+
+define i32 @uaddv_v8i32(<8 x i32>* %a) #0 {
+; CHECK-LABEL: uaddv_v8i32:
+; VBITS_GE_256: ptrue [[PG:p[0-9]+]].s, vl8
+; VBITS_GE_256-DAG: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].s
+; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_256-NEXT: ret
+ %op = load <8 x i32>, <8 x i32>* %a
+ %res = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %op)
+ ret i32 %res
+}
+
+define i32 @uaddv_v16i32(<16 x i32>* %a) #0 {
+; CHECK-LABEL: uaddv_v16i32:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].s, vl16
+; VBITS_GE_512-DAG: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].s
+; VBITS_GE_512-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_512-NEXT: ret
+ %op = load <16 x i32>, <16 x i32>* %a
+ %res = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %op)
+ ret i32 %res
+}
+
+define i32 @uaddv_v32i32(<32 x i32>* %a) #0 {
+; CHECK-LABEL: uaddv_v32i32:
+; VBITS_GE_1048: ptrue [[PG:p[0-9]+]].s, vl32
+; VBITS_GE_1048-DAG: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_1048-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].s
+; VBITS_GE_1048-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_1048-NEXT: ret
+ %op = load <32 x i32>, <32 x i32>* %a
+ %res = call i32 @llvm.experimental.vector.reduce.add.v32i32(<32 x i32> %op)
+ ret i32 %res
+}
+
+define i32 @uaddv_v64i32(<64 x i32>* %a) #0 {
+; CHECK-LABEL: uaddv_v64i32:
+; VBITS_GE_2096: ptrue [[PG:p[0-9]+]].s, vl64
+; VBITS_GE_2096-DAG: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_2096-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].s
+; VBITS_GE_2086-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_2096-NEXT: ret
+ %op = load <64 x i32>, <64 x i32>* %a
+ %res = call i32 @llvm.experimental.vector.reduce.add.v64i32(<64 x i32> %op)
+ ret i32 %res
+}
+
+; Nothing to do for 64-bit vectors..
+define i64 @uaddv_v1i64(<1 x i64> %a) #0 {
+; CHECK-LABEL: uaddv_v1i64:
+; CHECK: fmov x0, d0
+; CHECK: ret
+ %res = call i64 @llvm.experimental.vector.reduce.add.v1i64(<1 x i64> %a)
+ ret i64 %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define i64 @uaddv_v2i64(<2 x i64> %a) #0 {
+; CHECK-LABEL: uaddv_v2i64:
+; CHECK: addp d0, v0.2d
+; CHECK: ret
+ %res = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %a)
+ ret i64 %res
+}
+
+define i64 @uaddv_v4i64(<4 x i64>* %a) #0 {
+; CHECK-LABEL: uaddv_v4i64:
+; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl4
+; VBITS_GE_256-DAG: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_256-NEXT: ret
+ %op = load <4 x i64>, <4 x i64>* %a
+ %res = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %op)
+ ret i64 %res
+}
+
+define i64 @uaddv_v8i64(<8 x i64>* %a) #0 {
+; CHECK-LABEL: uaddv_v8i64:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl8
+; VBITS_GE_512-DAG: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; VBITS_GE_512-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_512-NEXT: ret
+ %op = load <8 x i64>, <8 x i64>* %a
+ %res = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %op)
+ ret i64 %res
+}
+
+define i64 @uaddv_v16i64(<16 x i64>* %a) #0 {
+; CHECK-LABEL: uaddv_v16i64:
+; VBITS_GE_1048: ptrue [[PG:p[0-9]+]].d, vl16
+; VBITS_GE_1048-DAG: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_1048-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; VBITS_GE_1048-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_1048-NEXT: ret
+ %op = load <16 x i64>, <16 x i64>* %a
+ %res = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %op)
+ ret i64 %res
+}
+
+define i64 @uaddv_v32i64(<32 x i64>* %a) #0 {
+; CHECK-LABEL: uaddv_v32i64:
+; VBITS_GE_2096: ptrue [[PG:p[0-9]+]].d, vl32
+; VBITS_GE_2096-DAG: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_2096-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; VBITS_GE_2096-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_2096-NEXT: ret
+ %op = load <32 x i64>, <32 x i64>* %a
+ %res = call i64 @llvm.experimental.vector.reduce.add.v32i64(<32 x i64> %op)
+ ret i64 %res
+}
+
+attributes #0 = { "target-features"="+sve" }
+
+declare i8 @llvm.experimental.vector.reduce.add.v8i8(<8 x i8>)
+declare i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8>)
+declare i8 @llvm.experimental.vector.reduce.add.v32i8(<32 x i8>)
+declare i8 @llvm.experimental.vector.reduce.add.v64i8(<64 x i8>)
+declare i8 @llvm.experimental.vector.reduce.add.v128i8(<128 x i8>)
+declare i8 @llvm.experimental.vector.reduce.add.v256i8(<256 x i8>)
+
+declare i16 @llvm.experimental.vector.reduce.add.v4i16(<4 x i16>)
+declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>)
+declare i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16>)
+declare i16 @llvm.experimental.vector.reduce.add.v32i16(<32 x i16>)
+declare i16 @llvm.experimental.vector.reduce.add.v64i16(<64 x i16>)
+declare i16 @llvm.experimental.vector.reduce.add.v128i16(<128 x i16>)
+
+declare i32 @llvm.experimental.vector.reduce.add.v2i32(<2 x i32>)
+declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32>)
+declare i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32>)
+declare i32 @llvm.experimental.vector.reduce.add.v32i32(<32 x i32>)
+declare i32 @llvm.experimental.vector.reduce.add.v64i32(<64 x i32>)
+
+declare i64 @llvm.experimental.vector.reduce.add.v1i64(<1 x i64>)
+declare i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64>)
+declare i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64>)
+declare i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64>)
+declare i64 @llvm.experimental.vector.reduce.add.v32i64(<32 x i64>)
More information about the llvm-commits
mailing list