[llvm] bc2e843 - [ARM] A couple of small MVE reduction tests from intrinsics. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Sun Feb 14 10:26:34 PST 2021


Author: David Green
Date: 2021-02-14T18:26:22Z
New Revision: bc2e843839ee53800cfd29f7f893bca2b9cbe515

URL: https://github.com/llvm/llvm-project/commit/bc2e843839ee53800cfd29f7f893bca2b9cbe515
DIFF: https://github.com/llvm/llvm-project/commit/bc2e843839ee53800cfd29f7f893bca2b9cbe515.diff

LOG: [ARM] A couple of small MVE reduction tests from intrinsics. NFC

Also added a PhaseOrdering test, to make sure they are not broken by
VectorCombine cost changes.

Added: 
    llvm/test/Transforms/PhaseOrdering/arm_floatreduce.ll

Modified: 
    llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
    llvm/test/CodeGen/Thumb2/mve-float32regloops.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll b/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
index 1d44639a48f9..82005b9b00a5 100644
--- a/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
@@ -1618,6 +1618,28 @@ do.end:                                           ; preds = %if.end
   ret void
 }
 
+define arm_aapcs_vfpcc half @vecAddAcrossF16Mve(<8 x half> %in) {
+; CHECK-LABEL: vecAddAcrossF16Mve:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vrev32.16 q1, q0
+; CHECK-NEXT:    vadd.f16 q0, q1, q0
+; CHECK-NEXT:    vrev64.32 q1, q0
+; CHECK-NEXT:    vadd.f16 q0, q0, q1
+; CHECK-NEXT:    vadd.f16 s0, s0, s2
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = shufflevector <8 x half> %in, <8 x half> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+  %1 = fadd fast <8 x half> %0, %in
+  %2 = bitcast <8 x half> %1 to <4 x i32>
+  %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef>
+  %4 = bitcast <4 x i32> %3 to <8 x half>
+  %5 = fadd fast <8 x half> %1, %4
+  %6 = extractelement <8 x half> %5, i32 0
+  %7 = extractelement <8 x half> %5, i32 4
+  %add = fadd fast half %6, %7
+  ret half %add
+}
+
 declare { i32, <8 x i16> } @llvm.arm.mve.vshlc.v8i16(<8 x i16>, i32, i32)
 declare void @llvm.assume(i1)
 declare <8 x i1> @llvm.arm.mve.vctp16(i32)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll b/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
index 073dd8bdb0b7..0156cfe25f8e 100644
--- a/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
@@ -2218,6 +2218,25 @@ do.end:                                           ; preds = %if.end
   ret void
 }
 
+define arm_aapcs_vfpcc float @vecAddAcrossF32Mve(<4 x float> %in) {
+; CHECK-LABEL: vecAddAcrossF32Mve:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vadd.f32 s4, s0, s1
+; CHECK-NEXT:    vadd.f32 s4, s4, s2
+; CHECK-NEXT:    vadd.f32 s0, s4, s3
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = extractelement <4 x float> %in, i32 0
+  %1 = extractelement <4 x float> %in, i32 1
+  %add = fadd fast float %0, %1
+  %2 = extractelement <4 x float> %in, i32 2
+  %add1 = fadd fast float %add, %2
+  %3 = extractelement <4 x float> %in, i32 3
+  %add2 = fadd fast float %add1, %3
+  ret float %add2
+}
+
+
 declare { i32, <4 x i32> } @llvm.arm.mve.vshlc.v4i32(<4 x i32>, i32, i32) #1
 declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
 declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)

diff  --git a/llvm/test/Transforms/PhaseOrdering/arm_floatreduce.ll b/llvm/test/Transforms/PhaseOrdering/arm_floatreduce.ll
new file mode 100644
index 000000000000..94a907cd89f8
--- /dev/null
+++ b/llvm/test/Transforms/PhaseOrdering/arm_floatreduce.ll
@@ -0,0 +1,58 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -O3 -S                   | FileCheck %s
+; RUN: opt < %s -passes='default<O3>' -S | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "thumbv8.1m.main-none-none-eabi"
+
+define arm_aapcs_vfpcc half @vecAddAcrossF16Mve(<8 x half> %in) #0 {
+; CHECK-LABEL: @vecAddAcrossF16Mve(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = shufflevector <8 x half> [[IN:%.*]], <8 x half> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+; CHECK-NEXT:    [[TMP1:%.*]] = fadd fast <8 x half> [[TMP0]], [[IN]]
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x half> [[TMP1]] to <4 x i32>
+; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef>
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to <8 x half>
+; CHECK-NEXT:    [[TMP5:%.*]] = fadd fast <8 x half> [[TMP1]], [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <8 x half> [[TMP5]], i32 0
+; CHECK-NEXT:    [[TMP7:%.*]] = extractelement <8 x half> [[TMP5]], i32 4
+; CHECK-NEXT:    [[ADD:%.*]] = fadd fast half [[TMP6]], [[TMP7]]
+; CHECK-NEXT:    ret half [[ADD]]
+;
+entry:
+  %0 = shufflevector <8 x half> %in, <8 x half> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+  %1 = fadd fast <8 x half> %0, %in
+  %2 = bitcast <8 x half> %1 to <4 x i32>
+  %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+  %4 = bitcast <4 x i32> %3 to <8 x half>
+  %5 = fadd fast <8 x half> %1, %4
+  %6 = extractelement <8 x half> %5, i32 0
+  %7 = extractelement <8 x half> %5, i32 4
+  %add = fadd fast half %6, %7
+  ret half %add
+}
+
+define arm_aapcs_vfpcc float @vecAddAcrossF32Mve(<4 x float> %in) {
+; CHECK-LABEL: @vecAddAcrossF32Mve(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = extractelement <4 x float> [[IN:%.*]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <4 x float> [[IN]], i32 1
+; CHECK-NEXT:    [[ADD:%.*]] = fadd fast float [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x float> [[IN]], i32 2
+; CHECK-NEXT:    [[ADD1:%.*]] = fadd fast float [[ADD]], [[TMP2]]
+; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <4 x float> [[IN]], i32 3
+; CHECK-NEXT:    [[ADD2:%.*]] = fadd fast float [[ADD1]], [[TMP3]]
+; CHECK-NEXT:    ret float [[ADD2]]
+;
+entry:
+  %0 = extractelement <4 x float> %in, i32 0
+  %1 = extractelement <4 x float> %in, i32 1
+  %add = fadd fast float %0, %1
+  %2 = extractelement <4 x float> %in, i32 2
+  %add1 = fadd fast float %add, %2
+  %3 = extractelement <4 x float> %in, i32 3
+  %add2 = fadd fast float %add1, %3
+  ret float %add2
+}
+
+attributes #0 = { "target-features"="+mve.fp" }


        


More information about the llvm-commits mailing list