[llvm] e8413ac - [AArch64] Expand some vector of i64 reductions on NEON

Cameron McInally via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 23 14:01:48 PDT 2020


Author: Cameron McInally
Date: 2020-09-23T16:01:24-05:00
New Revision: e8413ac97f6ca2b1897cc9555ad9b0194456629f

URL: https://github.com/llvm/llvm-project/commit/e8413ac97f6ca2b1897cc9555ad9b0194456629f
DIFF: https://github.com/llvm/llvm-project/commit/e8413ac97f6ca2b1897cc9555ad9b0194456629f.diff

LOG: [AArch64] Expand some vector of i64 reductions on NEON

With the exception of VECREDUCE_ADD, there are no NEON instructions to support vector of i64 reductions. This patch removes the Custom lowerings for those and adds some test coverage to confirm.

Differential Revision: https://reviews.llvm.org/D88161

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/aarch64-addv.ll
    llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 86354f2c06bc..772f88d4f9d1 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -889,26 +889,30 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
 
+    // Saturates
     for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32,
                     MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
-      // Vector reductions
-      setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
-      setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
-      setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
-      setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
-      setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
-
-      // Saturates
       setOperationAction(ISD::SADDSAT, VT, Legal);
       setOperationAction(ISD::UADDSAT, VT, Legal);
       setOperationAction(ISD::SSUBSAT, VT, Legal);
       setOperationAction(ISD::USUBSAT, VT, Legal);
     }
+
+    // Vector reductions
     for (MVT VT : { MVT::v4f16, MVT::v2f32,
                     MVT::v8f16, MVT::v4f32, MVT::v2f64 }) {
       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
     }
+    for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32,
+                    MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
+      setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
+      setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
+      setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
+      setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
+      setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
+    }
+    setOperationAction(ISD::VECREDUCE_ADD, MVT::v2i64, Custom);
 
     setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Legal);
     setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-addv.ll b/llvm/test/CodeGen/AArch64/aarch64-addv.ll
index 2fd57f70a65d..f1e0f299bd49 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-addv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-addv.ll
@@ -33,6 +33,7 @@ define i32 @add_S( <4 x i32>* %arr)  {
 define i64 @add_D(<2 x i64>* %arr)  {
 ; CHECK-LABEL: add_D
 ; CHECK-NOT: addv
+; CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d
   %bin.rdx = load <2 x i64>, <2 x i64>* %arr
   %r = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %bin.rdx)
   ret i64 %r

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll
index 99b6ebe2f1da..7c9415211681 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll
@@ -9,6 +9,7 @@ declare i32 @llvm.experimental.vector.reduce.umax.v1i32(<1 x i32> %a)
 declare i64 @llvm.experimental.vector.reduce.umax.v1i64(<1 x i64> %a)
 declare i128 @llvm.experimental.vector.reduce.umax.v1i128(<1 x i128> %a)
 
+declare i64 @llvm.experimental.vector.reduce.umax.v2i64(<2 x i64> %a)
 declare i8 @llvm.experimental.vector.reduce.umax.v3i8(<3 x i8> %a)
 declare i8 @llvm.experimental.vector.reduce.umax.v9i8(<9 x i8> %a)
 declare i32 @llvm.experimental.vector.reduce.umax.v3i32(<3 x i32> %a)
@@ -82,6 +83,19 @@ define i128 @test_v1i128(<1 x i128> %a) nounwind {
   ret i128 %b
 }
 
+; No i64 vector support for UMAX.
+define i64 @test_v2i64(<2 x i64> %a) nounwind {
+; CHECK-LABEL: test_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:   mov     x8, v0.d[1]
+; CHECK-NEXT:   fmov    x9, d0
+; CHECK-NEXT:   cmp     x9, x8
+; CHECK-NEXT:   csel    x0, x9, x8, hi
+; CHECK-NEXT:   ret
+  %b = call i64 @llvm.experimental.vector.reduce.umax.v2i64(<2 x i64> %a)
+  ret i64 %b
+}
+
 define i8 @test_v3i8(<3 x i8> %a) nounwind {
 ; CHECK-LABEL: test_v3i8:
 ; CHECK:       // %bb.0:


        


More information about the llvm-commits mailing list