[llvm] a43aebc - [SLP] Test for minimum/maximum reduction

Anna Thomas via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 12 12:22:47 PDT 2023


Author: Anna Thomas
Date: 2023-07-12T15:22:37-04:00
New Revision: a43aebcd91c31a00dbe056a9bd829a26ae8dea75

URL: https://github.com/llvm/llvm-project/commit/a43aebcd91c31a00dbe056a9bd829a26ae8dea75
DIFF: https://github.com/llvm/llvm-project/commit/a43aebcd91c31a00dbe056a9bd829a26ae8dea75.diff

LOG: [SLP] Test for minimum/maximum reduction

minimum/maximum tests from D154463. This contains tests where we vectorize
minimum/maximum as well as the tests where we currently do not identify
reduction patterns.

Differential Revision: https://reviews.llvm.org/D155096

Added: 
    llvm/test/Transforms/SLPVectorizer/X86/fmaximum-fminimum.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/fmaximum-fminimum.ll b/llvm/test/Transforms/SLPVectorizer/X86/fmaximum-fminimum.ll
new file mode 100644
index 00000000000000..9e565ea27023be
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/fmaximum-fminimum.ll
@@ -0,0 +1,335 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
+; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=AVX
+declare float @llvm.maximum.f32(float, float)
+declare float @llvm.minimum.f32(float, float)
+declare double @llvm.maximum.f64(double, double)
+declare double @llvm.minimum.f64(double, double)
+
+ at srcA64 = common global [8 x double] zeroinitializer, align 64
+ at srcB64 = common global [8 x double] zeroinitializer, align 64
+ at srcC64 = common global [8 x double] zeroinitializer, align 64
+ at srcA32 = common global [16 x float] zeroinitializer, align 64
+ at srcB32 = common global [16 x float] zeroinitializer, align 64
+ at srcC32 = common global [16 x float] zeroinitializer, align 64
+ at dst64 = common global [8 x double] zeroinitializer, align 64
+ at dst32 = common global [16 x float] zeroinitializer, align 64
+
+define void @fmaximum_2f64() {
+; SSE-LABEL: define void @fmaximum_2f64() {
+; SSE-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr @srcA64, align 8
+; SSE-NEXT:    [[TMP2:%.*]] = load <2 x double>, ptr @srcB64, align 8
+; SSE-NEXT:    [[TMP3:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[TMP1]], <2 x double> [[TMP2]])
+; SSE-NEXT:    store <2 x double> [[TMP3]], ptr @dst64, align 8
+; SSE-NEXT:    ret void
+;
+; AVX-LABEL: define void @fmaximum_2f64
+; AVX-SAME: () #[[ATTR1:[0-9]+]] {
+; AVX-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr @srcA64, align 8
+; AVX-NEXT:    [[TMP2:%.*]] = load <2 x double>, ptr @srcB64, align 8
+; AVX-NEXT:    [[TMP3:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[TMP1]], <2 x double> [[TMP2]])
+; AVX-NEXT:    store <2 x double> [[TMP3]], ptr @dst64, align 8
+; AVX-NEXT:    ret void
+;
+  %a0 = load double, ptr @srcA64, align 8
+  %a1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 1), align 8
+  %b0 = load double, ptr @srcB64, align 8
+  %b1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 1), align 8
+  %fmaximum0 = call double @llvm.maximum.f64(double %a0, double %b0)
+  %fmaximum1 = call double @llvm.maximum.f64(double %a1, double %b1)
+  store double %fmaximum0, ptr @dst64, align 8
+  store double %fmaximum1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
+  ret void
+}
+
+define void @fmaximum_4f64() {
+; SSE-LABEL: define void @fmaximum_4f64() {
+; SSE-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr @srcA64, align 8
+; SSE-NEXT:    [[TMP2:%.*]] = load <2 x double>, ptr @srcB64, align 8
+; SSE-NEXT:    [[TMP3:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[TMP1]], <2 x double> [[TMP2]])
+; SSE-NEXT:    store <2 x double> [[TMP3]], ptr @dst64, align 8
+; SSE-NEXT:    [[TMP4:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 2), align 8
+; SSE-NEXT:    [[TMP5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 2), align 8
+; SSE-NEXT:    [[TMP6:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[TMP4]], <2 x double> [[TMP5]])
+; SSE-NEXT:    store <2 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
+; SSE-NEXT:    ret void
+;
+; AVX-LABEL: define void @fmaximum_4f64
+; AVX-SAME: () #[[ATTR1]] {
+; AVX-NEXT:    [[TMP1:%.*]] = load <4 x double>, ptr @srcA64, align 8
+; AVX-NEXT:    [[TMP2:%.*]] = load <4 x double>, ptr @srcB64, align 8
+; AVX-NEXT:    [[TMP3:%.*]] = call <4 x double> @llvm.maximum.v4f64(<4 x double> [[TMP1]], <4 x double> [[TMP2]])
+; AVX-NEXT:    store <4 x double> [[TMP3]], ptr @dst64, align 8
+; AVX-NEXT:    ret void
+;
+  %a0 = load double, ptr @srcA64, align 8
+  %a1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 1), align 8
+  %a2 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 2), align 8
+  %a3 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 3), align 8
+  %b0 = load double, ptr @srcB64, align 8
+  %b1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 1), align 8
+  %b2 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 2), align 8
+  %b3 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 3), align 8
+  %fmaximum0 = call double @llvm.maximum.f64(double %a0, double %b0)
+  %fmaximum1 = call double @llvm.maximum.f64(double %a1, double %b1)
+  %fmaximum2 = call double @llvm.maximum.f64(double %a2, double %b2)
+  %fmaximum3 = call double @llvm.maximum.f64(double %a3, double %b3)
+  store double %fmaximum0, ptr @dst64, align 8
+  store double %fmaximum1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 8
+  store double %fmaximum2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 8
+  store double %fmaximum3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 8
+  ret void
+}
+
+define void @fmaximum_8f64() {
+; SSE-LABEL: define void @fmaximum_8f64() {
+; SSE-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr @srcA64, align 4
+; SSE-NEXT:    [[TMP2:%.*]] = load <2 x double>, ptr @srcB64, align 4
+; SSE-NEXT:    [[TMP3:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[TMP1]], <2 x double> [[TMP2]])
+; SSE-NEXT:    store <2 x double> [[TMP3]], ptr @dst64, align 4
+; SSE-NEXT:    [[TMP4:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 2), align 4
+; SSE-NEXT:    [[TMP5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 2), align 4
+; SSE-NEXT:    [[TMP6:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[TMP4]], <2 x double> [[TMP5]])
+; SSE-NEXT:    store <2 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 4
+; SSE-NEXT:    [[TMP7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 4), align 4
+; SSE-NEXT:    [[TMP8:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 4), align 4
+; SSE-NEXT:    [[TMP9:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[TMP7]], <2 x double> [[TMP8]])
+; SSE-NEXT:    store <2 x double> [[TMP9]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 4
+; SSE-NEXT:    [[TMP10:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 6), align 4
+; SSE-NEXT:    [[TMP11:%.*]] = load <2 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 6), align 4
+; SSE-NEXT:    [[TMP12:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[TMP10]], <2 x double> [[TMP11]])
+; SSE-NEXT:    store <2 x double> [[TMP12]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 4
+; SSE-NEXT:    ret void
+;
+; AVX-LABEL: define void @fmaximum_8f64
+; AVX-SAME: () #[[ATTR1]] {
+; AVX-NEXT:    [[TMP1:%.*]] = load <4 x double>, ptr @srcA64, align 4
+; AVX-NEXT:    [[TMP2:%.*]] = load <4 x double>, ptr @srcB64, align 4
+; AVX-NEXT:    [[TMP3:%.*]] = call <4 x double> @llvm.maximum.v4f64(<4 x double> [[TMP1]], <4 x double> [[TMP2]])
+; AVX-NEXT:    store <4 x double> [[TMP3]], ptr @dst64, align 4
+; AVX-NEXT:    [[TMP4:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 4), align 4
+; AVX-NEXT:    [[TMP5:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 4), align 4
+; AVX-NEXT:    [[TMP6:%.*]] = call <4 x double> @llvm.maximum.v4f64(<4 x double> [[TMP4]], <4 x double> [[TMP5]])
+; AVX-NEXT:    store <4 x double> [[TMP6]], ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 4
+; AVX-NEXT:    ret void
+;
+  %a0 = load double, ptr @srcA64, align 4
+  %a1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 1), align 4
+  %a2 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 2), align 4
+  %a3 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 3), align 4
+  %a4 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 4), align 4
+  %a5 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 5), align 4
+  %a6 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 6), align 4
+  %a7 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcA64, i32 0, i64 7), align 4
+  %b0 = load double, ptr @srcB64, align 4
+  %b1 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 1), align 4
+  %b2 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 2), align 4
+  %b3 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 3), align 4
+  %b4 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 4), align 4
+  %b5 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 5), align 4
+  %b6 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 6), align 4
+  %b7 = load double, ptr getelementptr inbounds ([8 x double], ptr @srcB64, i32 0, i64 7), align 4
+  %fmaximum0 = call double @llvm.maximum.f64(double %a0, double %b0)
+  %fmaximum1 = call double @llvm.maximum.f64(double %a1, double %b1)
+  %fmaximum2 = call double @llvm.maximum.f64(double %a2, double %b2)
+  %fmaximum3 = call double @llvm.maximum.f64(double %a3, double %b3)
+  %fmaximum4 = call double @llvm.maximum.f64(double %a4, double %b4)
+  %fmaximum5 = call double @llvm.maximum.f64(double %a5, double %b5)
+  %fmaximum6 = call double @llvm.maximum.f64(double %a6, double %b6)
+  %fmaximum7 = call double @llvm.maximum.f64(double %a7, double %b7)
+  store double %fmaximum0, ptr @dst64, align 4
+  store double %fmaximum1, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 1), align 4
+  store double %fmaximum2, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 2), align 4
+  store double %fmaximum3, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 3), align 4
+  store double %fmaximum4, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 4), align 4
+  store double %fmaximum5, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 5), align 4
+  store double %fmaximum6, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 6), align 4
+  store double %fmaximum7, ptr getelementptr inbounds ([8 x double], ptr @dst64, i32 0, i64 7), align 4
+  ret void
+}
+
+define double @reduction_v2f64(ptr %p) {
+; SSE-LABEL: define double @reduction_v2f64
+; SSE-SAME: (ptr [[P:%.*]]) {
+; SSE-NEXT:    [[G1:%.*]] = getelementptr inbounds double, ptr [[P]], i64 1
+; SSE-NEXT:    [[T0:%.*]] = load double, ptr [[P]], align 4
+; SSE-NEXT:    [[T1:%.*]] = load double, ptr [[G1]], align 4
+; SSE-NEXT:    [[M1:%.*]] = tail call double @llvm.maximum.f64(double [[T1]], double [[T0]])
+; SSE-NEXT:    ret double [[M1]]
+;
+; AVX-LABEL: define double @reduction_v2f64
+; AVX-SAME: (ptr [[P:%.*]]) #[[ATTR1]] {
+; AVX-NEXT:    [[G1:%.*]] = getelementptr inbounds double, ptr [[P]], i64 1
+; AVX-NEXT:    [[T0:%.*]] = load double, ptr [[P]], align 4
+; AVX-NEXT:    [[T1:%.*]] = load double, ptr [[G1]], align 4
+; AVX-NEXT:    [[M1:%.*]] = tail call double @llvm.maximum.f64(double [[T1]], double [[T0]])
+; AVX-NEXT:    ret double [[M1]]
+;
+  %g1 = getelementptr inbounds double, ptr %p, i64 1
+  %t0 = load double, ptr %p, align 4
+  %t1 = load double, ptr %g1, align 4
+  %m1 = tail call double @llvm.maximum.f64(double %t1, double %t0)
+  ret double %m1
+}
+
+define float @reduction_v4f32(ptr %p) {
+; SSE-LABEL: define float @reduction_v4f32
+; SSE-SAME: (ptr [[P:%.*]]) {
+; SSE-NEXT:    [[G1:%.*]] = getelementptr inbounds float, ptr [[P]], i64 1
+; SSE-NEXT:    [[G2:%.*]] = getelementptr inbounds float, ptr [[P]], i64 2
+; SSE-NEXT:    [[G3:%.*]] = getelementptr inbounds float, ptr [[P]], i64 3
+; SSE-NEXT:    [[T0:%.*]] = load float, ptr [[P]], align 4
+; SSE-NEXT:    [[T1:%.*]] = load float, ptr [[G1]], align 4
+; SSE-NEXT:    [[T2:%.*]] = load float, ptr [[G2]], align 4
+; SSE-NEXT:    [[T3:%.*]] = load float, ptr [[G3]], align 4
+; SSE-NEXT:    [[M1:%.*]] = tail call float @llvm.maximum.f32(float [[T1]], float [[T0]])
+; SSE-NEXT:    [[M2:%.*]] = tail call float @llvm.maximum.f32(float [[T2]], float [[M1]])
+; SSE-NEXT:    [[M3:%.*]] = tail call float @llvm.maximum.f32(float [[T3]], float [[M2]])
+; SSE-NEXT:    ret float [[M3]]
+;
+; AVX-LABEL: define float @reduction_v4f32
+; AVX-SAME: (ptr [[P:%.*]]) #[[ATTR1]] {
+; AVX-NEXT:    [[G1:%.*]] = getelementptr inbounds float, ptr [[P]], i64 1
+; AVX-NEXT:    [[G2:%.*]] = getelementptr inbounds float, ptr [[P]], i64 2
+; AVX-NEXT:    [[G3:%.*]] = getelementptr inbounds float, ptr [[P]], i64 3
+; AVX-NEXT:    [[T0:%.*]] = load float, ptr [[P]], align 4
+; AVX-NEXT:    [[T1:%.*]] = load float, ptr [[G1]], align 4
+; AVX-NEXT:    [[T2:%.*]] = load float, ptr [[G2]], align 4
+; AVX-NEXT:    [[T3:%.*]] = load float, ptr [[G3]], align 4
+; AVX-NEXT:    [[M1:%.*]] = tail call float @llvm.maximum.f32(float [[T1]], float [[T0]])
+; AVX-NEXT:    [[M2:%.*]] = tail call float @llvm.maximum.f32(float [[T2]], float [[M1]])
+; AVX-NEXT:    [[M3:%.*]] = tail call float @llvm.maximum.f32(float [[T3]], float [[M2]])
+; AVX-NEXT:    ret float [[M3]]
+;
+  %g1 = getelementptr inbounds float, ptr %p, i64 1
+  %g2 = getelementptr inbounds float, ptr %p, i64 2
+  %g3 = getelementptr inbounds float, ptr %p, i64 3
+  %t0 = load float, ptr %p, align 4
+  %t1 = load float, ptr %g1, align 4
+  %t2 = load float, ptr %g2, align 4
+  %t3 = load float, ptr %g3, align 4
+  %m1 = tail call float @llvm.maximum.f32(float %t1, float %t0)
+  %m2 = tail call float @llvm.maximum.f32(float %t2, float %m1)
+  %m3 = tail call float @llvm.maximum.f32(float %t3, float %m2)
+  ret float %m3
+}
+
+define double @reduction_v4f64_fminimum(ptr %p) {
+; SSE-LABEL: define double @reduction_v4f64_fminimum
+; SSE-SAME: (ptr [[P:%.*]]) {
+; SSE-NEXT:    [[G1:%.*]] = getelementptr inbounds double, ptr [[P]], i64 1
+; SSE-NEXT:    [[G2:%.*]] = getelementptr inbounds double, ptr [[P]], i64 2
+; SSE-NEXT:    [[G3:%.*]] = getelementptr inbounds double, ptr [[P]], i64 3
+; SSE-NEXT:    [[T0:%.*]] = load double, ptr [[P]], align 4
+; SSE-NEXT:    [[T1:%.*]] = load double, ptr [[G1]], align 4
+; SSE-NEXT:    [[T2:%.*]] = load double, ptr [[G2]], align 4
+; SSE-NEXT:    [[T3:%.*]] = load double, ptr [[G3]], align 4
+; SSE-NEXT:    [[M1:%.*]] = tail call double @llvm.minimum.f64(double [[T1]], double [[T0]])
+; SSE-NEXT:    [[M2:%.*]] = tail call double @llvm.minimum.f64(double [[T2]], double [[M1]])
+; SSE-NEXT:    [[M3:%.*]] = tail call double @llvm.minimum.f64(double [[T3]], double [[M2]])
+; SSE-NEXT:    ret double [[M3]]
+;
+; AVX-LABEL: define double @reduction_v4f64_fminimum
+; AVX-SAME: (ptr [[P:%.*]]) #[[ATTR1]] {
+; AVX-NEXT:    [[G1:%.*]] = getelementptr inbounds double, ptr [[P]], i64 1
+; AVX-NEXT:    [[G2:%.*]] = getelementptr inbounds double, ptr [[P]], i64 2
+; AVX-NEXT:    [[G3:%.*]] = getelementptr inbounds double, ptr [[P]], i64 3
+; AVX-NEXT:    [[T0:%.*]] = load double, ptr [[P]], align 4
+; AVX-NEXT:    [[T1:%.*]] = load double, ptr [[G1]], align 4
+; AVX-NEXT:    [[T2:%.*]] = load double, ptr [[G2]], align 4
+; AVX-NEXT:    [[T3:%.*]] = load double, ptr [[G3]], align 4
+; AVX-NEXT:    [[M1:%.*]] = tail call double @llvm.minimum.f64(double [[T1]], double [[T0]])
+; AVX-NEXT:    [[M2:%.*]] = tail call double @llvm.minimum.f64(double [[T2]], double [[M1]])
+; AVX-NEXT:    [[M3:%.*]] = tail call double @llvm.minimum.f64(double [[T3]], double [[M2]])
+; AVX-NEXT:    ret double [[M3]]
+;
+  %g1 = getelementptr inbounds double, ptr %p, i64 1
+  %g2 = getelementptr inbounds double, ptr %p, i64 2
+  %g3 = getelementptr inbounds double, ptr %p, i64 3
+  %t0 = load double, ptr %p, align 4
+  %t1 = load double, ptr %g1, align 4
+  %t2 = load double, ptr %g2, align 4
+  %t3 = load double, ptr %g3, align 4
+  %m1 = tail call double @llvm.minimum.f64(double %t1, double %t0)
+  %m2 = tail call double @llvm.minimum.f64(double %t2, double %m1)
+  %m3 = tail call double @llvm.minimum.f64(double %t3, double %m2)
+  ret double %m3
+}
+
+define float @reduction_v8f32_fminimum(ptr %p) {
+; SSE-LABEL: define float @reduction_v8f32_fminimum
+; SSE-SAME: (ptr [[P:%.*]]) {
+; SSE-NEXT:    [[G1:%.*]] = getelementptr inbounds float, ptr [[P]], i64 1
+; SSE-NEXT:    [[G2:%.*]] = getelementptr inbounds float, ptr [[P]], i64 2
+; SSE-NEXT:    [[G3:%.*]] = getelementptr inbounds float, ptr [[P]], i64 3
+; SSE-NEXT:    [[G4:%.*]] = getelementptr inbounds float, ptr [[P]], i64 4
+; SSE-NEXT:    [[G5:%.*]] = getelementptr inbounds float, ptr [[P]], i64 5
+; SSE-NEXT:    [[G6:%.*]] = getelementptr inbounds float, ptr [[P]], i64 6
+; SSE-NEXT:    [[G7:%.*]] = getelementptr inbounds float, ptr [[P]], i64 7
+; SSE-NEXT:    [[T0:%.*]] = load float, ptr [[P]], align 4
+; SSE-NEXT:    [[T1:%.*]] = load float, ptr [[G1]], align 4
+; SSE-NEXT:    [[T2:%.*]] = load float, ptr [[G2]], align 4
+; SSE-NEXT:    [[T3:%.*]] = load float, ptr [[G3]], align 4
+; SSE-NEXT:    [[T4:%.*]] = load float, ptr [[G4]], align 4
+; SSE-NEXT:    [[T5:%.*]] = load float, ptr [[G5]], align 4
+; SSE-NEXT:    [[T6:%.*]] = load float, ptr [[G6]], align 4
+; SSE-NEXT:    [[T7:%.*]] = load float, ptr [[G7]], align 4
+; SSE-NEXT:    [[M1:%.*]] = tail call float @llvm.minimum.f32(float [[T1]], float [[T0]])
+; SSE-NEXT:    [[M2:%.*]] = tail call float @llvm.minimum.f32(float [[T2]], float [[M1]])
+; SSE-NEXT:    [[M3:%.*]] = tail call float @llvm.minimum.f32(float [[T3]], float [[M2]])
+; SSE-NEXT:    [[M4:%.*]] = tail call float @llvm.minimum.f32(float [[T4]], float [[M3]])
+; SSE-NEXT:    [[M5:%.*]] = tail call float @llvm.minimum.f32(float [[M4]], float [[T6]])
+; SSE-NEXT:    [[M6:%.*]] = tail call float @llvm.minimum.f32(float [[M5]], float [[T5]])
+; SSE-NEXT:    [[M7:%.*]] = tail call float @llvm.minimum.f32(float [[M6]], float [[T7]])
+; SSE-NEXT:    ret float [[M7]]
+;
+; AVX-LABEL: define float @reduction_v8f32_fminimum
+; AVX-SAME: (ptr [[P:%.*]]) #[[ATTR1]] {
+; AVX-NEXT:    [[G1:%.*]] = getelementptr inbounds float, ptr [[P]], i64 1
+; AVX-NEXT:    [[G2:%.*]] = getelementptr inbounds float, ptr [[P]], i64 2
+; AVX-NEXT:    [[G3:%.*]] = getelementptr inbounds float, ptr [[P]], i64 3
+; AVX-NEXT:    [[G4:%.*]] = getelementptr inbounds float, ptr [[P]], i64 4
+; AVX-NEXT:    [[G5:%.*]] = getelementptr inbounds float, ptr [[P]], i64 5
+; AVX-NEXT:    [[G6:%.*]] = getelementptr inbounds float, ptr [[P]], i64 6
+; AVX-NEXT:    [[G7:%.*]] = getelementptr inbounds float, ptr [[P]], i64 7
+; AVX-NEXT:    [[T0:%.*]] = load float, ptr [[P]], align 4
+; AVX-NEXT:    [[T1:%.*]] = load float, ptr [[G1]], align 4
+; AVX-NEXT:    [[T2:%.*]] = load float, ptr [[G2]], align 4
+; AVX-NEXT:    [[T3:%.*]] = load float, ptr [[G3]], align 4
+; AVX-NEXT:    [[T4:%.*]] = load float, ptr [[G4]], align 4
+; AVX-NEXT:    [[T5:%.*]] = load float, ptr [[G5]], align 4
+; AVX-NEXT:    [[T6:%.*]] = load float, ptr [[G6]], align 4
+; AVX-NEXT:    [[T7:%.*]] = load float, ptr [[G7]], align 4
+; AVX-NEXT:    [[M1:%.*]] = tail call float @llvm.minimum.f32(float [[T1]], float [[T0]])
+; AVX-NEXT:    [[M2:%.*]] = tail call float @llvm.minimum.f32(float [[T2]], float [[M1]])
+; AVX-NEXT:    [[M3:%.*]] = tail call float @llvm.minimum.f32(float [[T3]], float [[M2]])
+; AVX-NEXT:    [[M4:%.*]] = tail call float @llvm.minimum.f32(float [[T4]], float [[M3]])
+; AVX-NEXT:    [[M5:%.*]] = tail call float @llvm.minimum.f32(float [[M4]], float [[T6]])
+; AVX-NEXT:    [[M6:%.*]] = tail call float @llvm.minimum.f32(float [[M5]], float [[T5]])
+; AVX-NEXT:    [[M7:%.*]] = tail call float @llvm.minimum.f32(float [[M6]], float [[T7]])
+; AVX-NEXT:    ret float [[M7]]
+;
+  %g1 = getelementptr inbounds float, ptr %p, i64 1
+  %g2 = getelementptr inbounds float, ptr %p, i64 2
+  %g3 = getelementptr inbounds float, ptr %p, i64 3
+  %g4 = getelementptr inbounds float, ptr %p, i64 4
+  %g5 = getelementptr inbounds float, ptr %p, i64 5
+  %g6 = getelementptr inbounds float, ptr %p, i64 6
+  %g7 = getelementptr inbounds float, ptr %p, i64 7
+  %t0 = load float, ptr %p, align 4
+  %t1 = load float, ptr %g1, align 4
+  %t2 = load float, ptr %g2, align 4
+  %t3 = load float, ptr %g3, align 4
+  %t4 = load float, ptr %g4, align 4
+  %t5 = load float, ptr %g5, align 4
+  %t6 = load float, ptr %g6, align 4
+  %t7 = load float, ptr %g7, align 4
+  %m1 = tail call float @llvm.minimum.f32(float %t1, float %t0)
+  %m2 = tail call float @llvm.minimum.f32(float %t2, float %m1)
+  %m3 = tail call float @llvm.minimum.f32(float %t3, float %m2)
+  %m4 = tail call float @llvm.minimum.f32(float %t4, float %m3)
+  %m5 = tail call float @llvm.minimum.f32(float %m4, float %t6)
+  %m6 = tail call float @llvm.minimum.f32(float %m5, float %t5)
+  %m7 = tail call float @llvm.minimum.f32(float %m6, float %t7)
+  ret float %m7
+}


        


More information about the llvm-commits mailing list