[llvm] 974ddb5 - [SVE] Lower fixed length VECREDUCE_XOR operation

Cameron McInally via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 12 08:12:29 PDT 2020


Author: Cameron McInally
Date: 2020-10-12T10:12:15-05:00
New Revision: 974ddb54c9adfb533f4bd9665ef902ebe75fa7ee

URL: https://github.com/llvm/llvm-project/commit/974ddb54c9adfb533f4bd9665ef902ebe75fa7ee
DIFF: https://github.com/llvm/llvm-project/commit/974ddb54c9adfb533f4bd9665ef902ebe75fa7ee.diff

LOG: [SVE] Lower fixed length VECREDUCE_XOR operation

Differential Revision: https://reviews.llvm.org/D88974

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/sve-fixed-length-log-reduce.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index c2972ab4f2f7..b1c4903cb5e3 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1114,24 +1114,16 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::UMAX, MVT::v2i64, Custom);
       setOperationAction(ISD::UMIN, MVT::v1i64, Custom);
       setOperationAction(ISD::UMIN, MVT::v2i64, Custom);
-      setOperationAction(ISD::VECREDUCE_AND, MVT::v8i8, Custom);
-      setOperationAction(ISD::VECREDUCE_AND, MVT::v16i8, Custom);
-      setOperationAction(ISD::VECREDUCE_AND, MVT::v4i16, Custom);
-      setOperationAction(ISD::VECREDUCE_AND, MVT::v8i16, Custom);
-      setOperationAction(ISD::VECREDUCE_AND, MVT::v2i32, Custom);
-      setOperationAction(ISD::VECREDUCE_AND, MVT::v4i32, Custom);
-      setOperationAction(ISD::VECREDUCE_AND, MVT::v2i64, Custom);
-      setOperationAction(ISD::VECREDUCE_OR, MVT::v8i8, Custom);
-      setOperationAction(ISD::VECREDUCE_OR, MVT::v16i8, Custom);
-      setOperationAction(ISD::VECREDUCE_OR, MVT::v4i16, Custom);
-      setOperationAction(ISD::VECREDUCE_OR, MVT::v8i16, Custom);
-      setOperationAction(ISD::VECREDUCE_OR, MVT::v2i32, Custom);
-      setOperationAction(ISD::VECREDUCE_OR, MVT::v4i32, Custom);
-      setOperationAction(ISD::VECREDUCE_OR, MVT::v2i64, Custom);
       setOperationAction(ISD::VECREDUCE_SMAX, MVT::v2i64, Custom);
       setOperationAction(ISD::VECREDUCE_SMIN, MVT::v2i64, Custom);
       setOperationAction(ISD::VECREDUCE_UMAX, MVT::v2i64, Custom);
       setOperationAction(ISD::VECREDUCE_UMIN, MVT::v2i64, Custom);
+      for (auto VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16,
+                      MVT::v2i32, MVT::v4i32, MVT::v2i64}) {
+        setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
+        setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
+        setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
+      }
     }
   }
 
@@ -1275,6 +1267,7 @@ void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
   setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
   setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
   setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
+  setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
   setOperationAction(ISD::VSELECT, VT, Custom);
   setOperationAction(ISD::XOR, VT, Custom);
   setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
@@ -3953,6 +3946,7 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
   case ISD::VECREDUCE_ADD:
   case ISD::VECREDUCE_AND:
   case ISD::VECREDUCE_OR:
+  case ISD::VECREDUCE_XOR:
   case ISD::VECREDUCE_SMAX:
   case ISD::VECREDUCE_SMIN:
   case ISD::VECREDUCE_UMAX:
@@ -9742,6 +9736,7 @@ SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op,
   EVT SrcVT = Src.getValueType();
   bool OverrideNEON = Op.getOpcode() == ISD::VECREDUCE_AND ||
                       Op.getOpcode() == ISD::VECREDUCE_OR ||
+                      Op.getOpcode() == ISD::VECREDUCE_XOR ||
                       (Op.getOpcode() != ISD::VECREDUCE_ADD &&
                        SrcVT.getVectorElementType() == MVT::i64);
   if (useSVEForFixedLengthVectorVT(SrcVT, OverrideNEON)) {
@@ -9760,6 +9755,8 @@ SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op,
       return LowerFixedLengthReductionToSVE(AArch64ISD::UMAXV_PRED, Op, DAG);
     case ISD::VECREDUCE_UMIN:
       return LowerFixedLengthReductionToSVE(AArch64ISD::UMINV_PRED, Op, DAG);
+    case ISD::VECREDUCE_XOR:
+      return LowerFixedLengthReductionToSVE(AArch64ISD::EORV_PRED, Op, DAG);
     case ISD::VECREDUCE_FMAX:
       return LowerFixedLengthReductionToSVE(AArch64ISD::FMAXNMV_PRED, Op, DAG);
     case ISD::VECREDUCE_FMIN:

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-log-reduce.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-log-reduce.ll
index efab691b4a9f..ffe72b511e0d 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-log-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-log-reduce.ll
@@ -343,6 +343,329 @@ define i64 @andv_v32i64(<32 x i64>* %a) #0 {
   ret i64 %res
 }
 
+;
+; EORV
+;
+
+; No single instruction NEON EORV support. Use SVE.
+define i8 @eorv_v8i8(<8 x i8> %a) #0 {
+; CHECK-LABEL: eorv_v8i8:
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl8
+; CHECK: eorv b[[REDUCE:[0-9]+]], [[PG]], z0.b
+; CHECK: fmov w0, s[[REDUCE]]
+; CHECK: ret
+  %res = call i8 @llvm.experimental.vector.reduce.xor.v8i8(<8 x i8> %a)
+  ret i8 %res
+}
+
+; No single instruction NEON EORV support. Use SVE.
+define i8 @eorv_v16i8(<16 x i8> %a) #0 {
+; CHECK-LABEL: eorv_v16i8:
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl16
+; CHECK: eorv b[[REDUCE:[0-9]+]], [[PG]], z0.b
+; CHECK: fmov w0, s[[REDUCE]]
+; CHECK: ret
+  %res = call i8 @llvm.experimental.vector.reduce.xor.v16i8(<16 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @eorv_v32i8(<32 x i8>* %a) #0 {
+; CHECK-LABEL: eorv_v32i8:
+; VBITS_GE_256: ptrue [[PG:p[0-9]+]].b, vl32
+; VBITS_GE_256-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: eorv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_256-NEXT: ret
+  %op = load <32 x i8>, <32 x i8>* %a
+  %res = call i8 @llvm.experimental.vector.reduce.xor.v32i8(<32 x i8> %op)
+  ret i8 %res
+}
+
+define i8 @eorv_v64i8(<64 x i8>* %a) #0 {
+; CHECK-LABEL: eorv_v64i8:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].b, vl64
+; VBITS_GE_512-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: eorv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; VBITS_GE_512-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_512-NEXT: ret
+
+; Ensure sensible type legalisation.
+; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].b, vl32
+; VBITS_EQ_256-DAG: mov w[[A_HI:[0-9]+]], #32
+; VBITS_EQ_256-DAG: ld1b { [[LO:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_EQ_256-DAG: ld1b { [[HI:z[0-9]+]].b }, [[PG]]/z, [x0, x[[A_HI]]]
+; VBITS_EQ_256-DAG: eor [[EOR:z[0-9]+]].d, [[LO]].d, [[HI]].d
+; VBITS_EQ_256-DAG: eorv b[[REDUCE:[0-9]+]], [[PG]], [[EOR]].b
+; VBITS_EQ_256-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_EQ_256-NEXT: ret
+
+  %op = load <64 x i8>, <64 x i8>* %a
+  %res = call i8 @llvm.experimental.vector.reduce.xor.v64i8(<64 x i8> %op)
+  ret i8 %res
+}
+
+define i8 @eorv_v128i8(<128 x i8>* %a) #0 {
+; CHECK-LABEL: eorv_v128i8:
+; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].b, vl128
+; VBITS_GE_1024-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_1024-NEXT: eorv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; VBITS_GE_1024-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_1024-NEXT: ret
+  %op = load <128 x i8>, <128 x i8>* %a
+  %res = call i8 @llvm.experimental.vector.reduce.xor.v128i8(<128 x i8> %op)
+  ret i8 %res
+}
+
+define i8 @eorv_v256i8(<256 x i8>* %a) #0 {
+; CHECK-LABEL: eorv_v256i8:
+; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].b, vl256
+; VBITS_GE_2048-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: eorv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; VBITS_GE_2048-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_2048-NEXT: ret
+  %op = load <256 x i8>, <256 x i8>* %a
+  %res = call i8 @llvm.experimental.vector.reduce.xor.v256i8(<256 x i8> %op)
+  ret i8 %res
+}
+
+; No single instruction NEON EORV support. Use SVE.
+define i16 @eorv_v4i16(<4 x i16> %a) #0 {
+; CHECK-LABEL: eorv_v4i16:
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl4
+; CHECK: eorv h[[REDUCE:[0-9]+]], [[PG]], z0.h
+; CHECK: fmov w0, s[[REDUCE]]
+; CHECK: ret
+  %res = call i16 @llvm.experimental.vector.reduce.xor.v4i16(<4 x i16> %a)
+  ret i16 %res
+}
+
+; No single instruction NEON EORV support. Use SVE.
+define i16 @eorv_v8i16(<8 x i16> %a) #0 {
+; CHECK-LABEL: eorv_v8i16:
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl8
+; CHECK: eorv h[[REDUCE:[0-9]+]], [[PG]], z0.h
+; CHECK: fmov w0, s[[REDUCE]]
+; CHECK: ret
+  %res = call i16 @llvm.experimental.vector.reduce.xor.v8i16(<8 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @eorv_v16i16(<16 x i16>* %a) #0 {
+; CHECK-LABEL: eorv_v16i16:
+; VBITS_GE_256: ptrue [[PG:p[0-9]+]].h, vl16
+; VBITS_GE_256-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: eorv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_256-NEXT: ret
+  %op = load <16 x i16>, <16 x i16>* %a
+  %res = call i16 @llvm.experimental.vector.reduce.xor.v16i16(<16 x i16> %op)
+  ret i16 %res
+}
+
+define i16 @eorv_v32i16(<32 x i16>* %a) #0 {
+; CHECK-LABEL: eorv_v32i16:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].h, vl32
+; VBITS_GE_512-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: eorv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; VBITS_GE_512-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_512-NEXT: ret
+
+; Ensure sensible type legalisation.
+; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].h, vl16
+; VBITS_EQ_256-DAG: add x[[A_HI:[0-9]+]], x0, #32
+; VBITS_EQ_256-DAG: ld1h { [[LO:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_EQ_256-DAG: ld1h { [[HI:z[0-9]+]].h }, [[PG]]/z, [x[[A_HI]]]
+; VBITS_EQ_256-DAG: eor [[EOR:z[0-9]+]].d, [[LO]].d, [[HI]].d
+; VBITS_EQ_256-DAG: eorv h[[REDUCE:[0-9]+]], [[PG]], [[EOR]].h
+; VBITS_EQ_256-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_EQ_256-NEXT: ret
+  %op = load <32 x i16>, <32 x i16>* %a
+  %res = call i16 @llvm.experimental.vector.reduce.xor.v32i16(<32 x i16> %op)
+  ret i16 %res
+}
+
+define i16 @eorv_v64i16(<64 x i16>* %a) #0 {
+; CHECK-LABEL: eorv_v64i16:
+; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].h, vl64
+; VBITS_GE_1024-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_1024-NEXT: eorv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; VBITS_GE_1024-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_1024-NEXT: ret
+  %op = load <64 x i16>, <64 x i16>* %a
+  %res = call i16 @llvm.experimental.vector.reduce.xor.v64i16(<64 x i16> %op)
+  ret i16 %res
+}
+
+define i16 @eorv_v128i16(<128 x i16>* %a) #0 {
+; CHECK-LABEL: eorv_v128i16:
+; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl128
+; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: eorv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; VBITS_GE_2048-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_2048-NEXT: ret
+  %op = load <128 x i16>, <128 x i16>* %a
+  %res = call i16 @llvm.experimental.vector.reduce.xor.v128i16(<128 x i16> %op)
+  ret i16 %res
+}
+
+; No single instruction NEON EORV support. Use SVE.
+define i32 @eorv_v2i32(<2 x i32> %a) #0 {
+; CHECK-LABEL: eorv_v2i32:
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl2
+; CHECK: eorv [[REDUCE:s[0-9]+]], [[PG]], z0.s
+; CHECK: fmov w0, [[REDUCE]]
+; CHECK: ret
+  %res = call i32 @llvm.experimental.vector.reduce.xor.v2i32(<2 x i32> %a)
+  ret i32 %res
+}
+
+; No single instruction NEON EORV support. Use SVE.
+define i32 @eorv_v4i32(<4 x i32> %a) #0 {
+; CHECK-LABEL: eorv_v4i32:
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl4
+; CHECK: eorv [[REDUCE:s[0-9]+]], [[PG]], z0.s
+; CHECK: fmov w0, [[REDUCE]]
+; CHECK: ret
+  %res = call i32 @llvm.experimental.vector.reduce.xor.v4i32(<4 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @eorv_v8i32(<8 x i32>* %a) #0 {
+; CHECK-LABEL: eorv_v8i32:
+; VBITS_GE_256: ptrue [[PG:p[0-9]+]].s, vl8
+; VBITS_GE_256-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: eorv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; VBITS_GE_256-NEXT: fmov w0, [[REDUCE]]
+; VBITS_GE_256-NEXT: ret
+  %op = load <8 x i32>, <8 x i32>* %a
+  %res = call i32 @llvm.experimental.vector.reduce.xor.v8i32(<8 x i32> %op)
+  ret i32 %res
+}
+
+define i32 @eorv_v16i32(<16 x i32>* %a) #0 {
+; CHECK-LABEL: eorv_v16i32:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].s, vl16
+; VBITS_GE_512-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: eorv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; VBITS_GE_512-NEXT: fmov w0, [[REDUCE]]
+; VBITS_GE_512-NEXT: ret
+
+; Ensure sensible type legalisation.
+; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].s, vl8
+; VBITS_EQ_256-DAG: add x[[A_HI:[0-9]+]], x0, #32
+; VBITS_EQ_256-DAG: ld1w { [[LO:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_EQ_256-DAG: ld1w { [[HI:z[0-9]+]].s }, [[PG]]/z, [x[[A_HI]]]
+; VBITS_EQ_256-DAG: eor [[EOR:z[0-9]+]].d, [[LO]].d, [[HI]].d
+; VBITS_EQ_256-DAG: eorv [[REDUCE:s[0-9]+]], [[PG]], [[EOR]].s
+; VBITS_EQ_256-NEXT: fmov w0, [[REDUCE]]
+; VBITS_EQ_256-NEXT: ret
+  %op = load <16 x i32>, <16 x i32>* %a
+  %res = call i32 @llvm.experimental.vector.reduce.xor.v16i32(<16 x i32> %op)
+  ret i32 %res
+}
+
+define i32 @eorv_v32i32(<32 x i32>* %a) #0 {
+; CHECK-LABEL: eorv_v32i32:
+; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].s, vl32
+; VBITS_GE_1024-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_1024-NEXT: eorv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; VBITS_GE_1024-NEXT: fmov w0, [[REDUCE]]
+; VBITS_GE_1024-NEXT: ret
+  %op = load <32 x i32>, <32 x i32>* %a
+  %res = call i32 @llvm.experimental.vector.reduce.xor.v32i32(<32 x i32> %op)
+  ret i32 %res
+}
+
+define i32 @eorv_v64i32(<64 x i32>* %a) #0 {
+; CHECK-LABEL: eorv_v64i32:
+; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].s, vl64
+; VBITS_GE_2048-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: eorv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; VBITS_GE_2048-NEXT: fmov w0, [[REDUCE]]
+; VBITS_GE_2048-NEXT: ret
+  %op = load <64 x i32>, <64 x i32>* %a
+  %res = call i32 @llvm.experimental.vector.reduce.xor.v64i32(<64 x i32> %op)
+  ret i32 %res
+}
+
+; Nothing to do for single element vectors.
+define i64 @eorv_v1i64(<1 x i64> %a) #0 {
+; CHECK-LABEL: eorv_v1i64:
+; CHECK: fmov x0, d0
+; CHECK: ret
+  %res = call i64 @llvm.experimental.vector.reduce.xor.v1i64(<1 x i64> %a)
+  ret i64 %res
+}
+
+; Use SVE for 128-bit vectors
+define i64 @eorv_v2i64(<2 x i64> %a) #0 {
+; CHECK-LABEL: eorv_v2i64:
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl2
+; CHECK: eorv [[REDUCE:d[0-9]+]], [[PG]], z0.d
+; CHECK: fmov x0, [[REDUCE]]
+; CHECK: ret
+  %res = call i64 @llvm.experimental.vector.reduce.xor.v2i64(<2 x i64> %a)
+  ret i64 %res
+}
+
+define i64 @eorv_v4i64(<4 x i64>* %a) #0 {
+; CHECK-LABEL: eorv_v4i64:
+; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl4
+; VBITS_GE_256-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: eorv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_256-NEXT: ret
+  %op = load <4 x i64>, <4 x i64>* %a
+  %res = call i64 @llvm.experimental.vector.reduce.xor.v4i64(<4 x i64> %op)
+  ret i64 %res
+}
+
+define i64 @eorv_v8i64(<8 x i64>* %a) #0 {
+; CHECK-LABEL: eorv_v8i64:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl8
+; VBITS_GE_512-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: eorv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; VBITS_GE_512-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_512-NEXT: ret
+
+; Ensure sensible type legalisation.
+; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].d, vl4
+; VBITS_EQ_256-DAG: add x[[A_HI:[0-9]+]], x0, #32
+; VBITS_EQ_256-DAG: ld1d { [[LO:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_EQ_256-DAG: ld1d { [[HI:z[0-9]+]].d }, [[PG]]/z, [x[[A_HI]]]
+; VBITS_EQ_256-DAG: eor [[EOR:z[0-9]+]].d, [[LO]].d, [[HI]].d
+; VBITS_EQ_256-DAG: eorv [[REDUCE:d[0-9]+]], [[PG]], [[EOR]].d
+; VBITS_EQ_256-NEXT: fmov x0, [[REDUCE]]
+; VBITS_EQ_256-NEXT: ret
+  %op = load <8 x i64>, <8 x i64>* %a
+  %res = call i64 @llvm.experimental.vector.reduce.xor.v8i64(<8 x i64> %op)
+  ret i64 %res
+}
+
+define i64 @eorv_v16i64(<16 x i64>* %a) #0 {
+; CHECK-LABEL: eorv_v16i64:
+; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].d, vl16
+; VBITS_GE_1024-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_1024-NEXT: eorv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; VBITS_GE_1024-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_1024-NEXT: ret
+  %op = load <16 x i64>, <16 x i64>* %a
+  %res = call i64 @llvm.experimental.vector.reduce.xor.v16i64(<16 x i64> %op)
+  ret i64 %res
+}
+
+define i64 @eorv_v32i64(<32 x i64>* %a) #0 {
+; CHECK-LABEL: eorv_v32i64:
+; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl32
+; VBITS_GE_2048-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: eorv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; VBITS_GE_2048-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_2048-NEXT: ret
+  %op = load <32 x i64>, <32 x i64>* %a
+  %res = call i64 @llvm.experimental.vector.reduce.xor.v32i64(<32 x i64> %op)
+  ret i64 %res
+}
+
 ;
 ; ORV
 ;
@@ -723,3 +1046,31 @@ declare i64 @llvm.experimental.vector.reduce.or.v4i64(<4 x i64>)
 declare i64 @llvm.experimental.vector.reduce.or.v8i64(<8 x i64>)
 declare i64 @llvm.experimental.vector.reduce.or.v16i64(<16 x i64>)
 declare i64 @llvm.experimental.vector.reduce.or.v32i64(<32 x i64>)
+
+declare i8 @llvm.experimental.vector.reduce.xor.v8i8(<8 x i8>)
+declare i8 @llvm.experimental.vector.reduce.xor.v16i8(<16 x i8>)
+declare i8 @llvm.experimental.vector.reduce.xor.v32i8(<32 x i8>)
+declare i8 @llvm.experimental.vector.reduce.xor.v64i8(<64 x i8>)
+declare i8 @llvm.experimental.vector.reduce.xor.v128i8(<128 x i8>)
+declare i8 @llvm.experimental.vector.reduce.xor.v256i8(<256 x i8>)
+
+declare i16 @llvm.experimental.vector.reduce.xor.v4i16(<4 x i16>)
+declare i16 @llvm.experimental.vector.reduce.xor.v8i16(<8 x i16>)
+declare i16 @llvm.experimental.vector.reduce.xor.v16i16(<16 x i16>)
+declare i16 @llvm.experimental.vector.reduce.xor.v32i16(<32 x i16>)
+declare i16 @llvm.experimental.vector.reduce.xor.v64i16(<64 x i16>)
+declare i16 @llvm.experimental.vector.reduce.xor.v128i16(<128 x i16>)
+
+declare i32 @llvm.experimental.vector.reduce.xor.v2i32(<2 x i32>)
+declare i32 @llvm.experimental.vector.reduce.xor.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.xor.v8i32(<8 x i32>)
+declare i32 @llvm.experimental.vector.reduce.xor.v16i32(<16 x i32>)
+declare i32 @llvm.experimental.vector.reduce.xor.v32i32(<32 x i32>)
+declare i32 @llvm.experimental.vector.reduce.xor.v64i32(<64 x i32>)
+
+declare i64 @llvm.experimental.vector.reduce.xor.v1i64(<1 x i64>)
+declare i64 @llvm.experimental.vector.reduce.xor.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.xor.v4i64(<4 x i64>)
+declare i64 @llvm.experimental.vector.reduce.xor.v8i64(<8 x i64>)
+declare i64 @llvm.experimental.vector.reduce.xor.v16i64(<16 x i64>)
+declare i64 @llvm.experimental.vector.reduce.xor.v32i64(<32 x i64>)


        


More information about the llvm-commits mailing list