[llvm] e119a93 - [AArch64][SME]: Add precursory tests for D138309

Hassnaa Hamdi via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 24 09:11:33 PST 2022


Author: Hassnaa Hamdi
Date: 2022-11-24T17:11:23Z
New Revision: e119a93d4e471d848747baafe88d0d16328c7490

URL: https://github.com/llvm/llvm-project/commit/e119a93d4e471d848747baafe88d0d16328c7490
DIFF: https://github.com/llvm/llvm-project/commit/e119a93d4e471d848747baafe88d0d16328c7490.diff

LOG: [AArch64][SME]: Add precursory tests for D138309

Add testing files:
 - int-reduce.ll
 - fp-reduce.ll

Added: 
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
new file mode 100644
index 0000000000000..89a40e01947ee
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
@@ -0,0 +1,474 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; FADDA
+;
+
+define half @fadda_v4f16(half %start, <4 x half> %a) #0 {
+; CHECK-LABEL: fadda_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $h0 killed $h0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    fadda h0, p0, h0, z1.h
+; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call half @llvm.vector.reduce.fadd.v4f16(half %start, <4 x half> %a)
+  ret half %res
+}
+
+define half @fadda_v8f16(half %start, <8 x half> %a) #0 {
+; CHECK-LABEL: fadda_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $h0 killed $h0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    fadda h0, p0, h0, z1.h
+; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call half @llvm.vector.reduce.fadd.v8f16(half %start, <8 x half> %a)
+  ret half %res
+}
+
+define half @fadda_v16f16(half %start, <16 x half>* %a) #0 {
+; CHECK-LABEL: fadda_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    // kill: def $h0 killed $h0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    fadda h0, p0, h0, z1.h
+; CHECK-NEXT:    fadda h0, p0, h0, z2.h
+; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $z0
+; CHECK-NEXT:    ret
+  %op = load <16 x half>, <16 x half>* %a
+  %res = call half @llvm.vector.reduce.fadd.v16f16(half %start, <16 x half> %op)
+  ret half %res
+}
+
+define float @fadda_v2f32(float %start, <2 x float> %a) #0 {
+; CHECK-LABEL: fadda_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $s0 killed $s0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    fadda s0, p0, s0, z1.s
+; CHECK-NEXT:    // kill: def $s0 killed $s0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call float @llvm.vector.reduce.fadd.v2f32(float %start, <2 x float> %a)
+  ret float %res
+}
+
+define float @fadda_v4f32(float %start, <4 x float> %a) #0 {
+; CHECK-LABEL: fadda_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $s0 killed $s0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    fadda s0, p0, s0, z1.s
+; CHECK-NEXT:    // kill: def $s0 killed $s0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call float @llvm.vector.reduce.fadd.v4f32(float %start, <4 x float> %a)
+  ret float %res
+}
+
+define float @fadda_v8f32(float %start, <8 x float>* %a) #0 {
+; CHECK-LABEL: fadda_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    // kill: def $s0 killed $s0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    fadda s0, p0, s0, z1.s
+; CHECK-NEXT:    fadda s0, p0, s0, z2.s
+; CHECK-NEXT:    // kill: def $s0 killed $s0 killed $z0
+; CHECK-NEXT:    ret
+  %op = load <8 x float>, <8 x float>* %a
+  %res = call float @llvm.vector.reduce.fadd.v8f32(float %start, <8 x float> %op)
+  ret float %res
+}
+
+define double @fadda_v1f64(double %start, <1 x double> %a) #0 {
+; CHECK-LABEL: fadda_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    fadd d0, d0, d1
+; CHECK-NEXT:    ret
+  %res = call double @llvm.vector.reduce.fadd.v1f64(double %start, <1 x double> %a)
+  ret double %res
+}
+
+define double @fadda_v2f64(double %start, <2 x double> %a) #0 {
+; CHECK-LABEL: fadda_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    fadda d0, p0, d0, z1.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call double @llvm.vector.reduce.fadd.v2f64(double %start, <2 x double> %a)
+  ret double %res
+}
+
+define double @fadda_v4f64(double %start, <4 x double>* %a) #0 {
+; CHECK-LABEL: fadda_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fadda d0, p0, d0, z1.d
+; CHECK-NEXT:    fadda d0, p0, d0, z2.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %op = load <4 x double>, <4 x double>* %a
+  %res = call double @llvm.vector.reduce.fadd.v4f64(double %start, <4 x double> %op)
+  ret double %res
+}
+
+;
+; FADDV
+;
+
+define half @faddv_v4f16(half %start, <4 x half> %a) #0 {
+; CHECK-LABEL: faddv_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    faddv h1, p0, z1.h
+; CHECK-NEXT:    fadd h0, h0, h1
+; CHECK-NEXT:    ret
+  %res = call fast half @llvm.vector.reduce.fadd.v4f16(half %start, <4 x half> %a)
+  ret half %res
+}
+
+define half @faddv_v8f16(half %start, <8 x half> %a) #0 {
+; CHECK-LABEL: faddv_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    faddv h1, p0, z1.h
+; CHECK-NEXT:    fadd h0, h0, h1
+; CHECK-NEXT:    ret
+  %res = call fast half @llvm.vector.reduce.fadd.v8f16(half %start, <8 x half> %a)
+  ret half %res
+}
+
+define half @faddv_v16f16(half %start, <16 x half>* %a) #0 {
+; CHECK-LABEL: faddv_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q2, q1, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    fadd z1.h, p0/m, z1.h, z2.h
+; CHECK-NEXT:    faddv h1, p0, z1.h
+; CHECK-NEXT:    fadd h0, h0, h1
+; CHECK-NEXT:    ret
+  %op = load <16 x half>, <16 x half>* %a
+  %res = call fast half @llvm.vector.reduce.fadd.v16f16(half %start, <16 x half> %op)
+  ret half %res
+}
+
+define float @faddv_v2f32(float %start, <2 x float> %a) #0 {
+; CHECK-LABEL: faddv_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    faddp s1, v1.2s
+; CHECK-NEXT:    fadd s0, s0, s1
+; CHECK-NEXT:    ret
+  %res = call fast float @llvm.vector.reduce.fadd.v2f32(float %start, <2 x float> %a)
+  ret float %res
+}
+
+define float @faddv_v4f32(float %start, <4 x float> %a) #0 {
+; CHECK-LABEL: faddv_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    faddv s1, p0, z1.s
+; CHECK-NEXT:    fadd s0, s0, s1
+; CHECK-NEXT:    ret
+  %res = call fast float @llvm.vector.reduce.fadd.v4f32(float %start, <4 x float> %a)
+  ret float %res
+}
+
+define float @faddv_v8f32(float %start, <8 x float>* %a) #0 {
+; CHECK-LABEL: faddv_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q2, q1, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    fadd z1.s, p0/m, z1.s, z2.s
+; CHECK-NEXT:    faddv s1, p0, z1.s
+; CHECK-NEXT:    fadd s0, s0, s1
+; CHECK-NEXT:    ret
+  %op = load <8 x float>, <8 x float>* %a
+  %res = call fast float @llvm.vector.reduce.fadd.v8f32(float %start, <8 x float> %op)
+  ret float %res
+}
+
+define double @faddv_v1f64(double %start, <1 x double> %a) #0 {
+; CHECK-LABEL: faddv_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    fadd d0, d0, d1
+; CHECK-NEXT:    ret
+  %res = call fast double @llvm.vector.reduce.fadd.v1f64(double %start, <1 x double> %a)
+  ret double %res
+}
+
+define double @faddv_v2f64(double %start, <2 x double> %a) #0 {
+; CHECK-LABEL: faddv_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    faddp d1, v1.2d
+; CHECK-NEXT:    fadd d0, d0, d1
+; CHECK-NEXT:    ret
+  %res = call fast double @llvm.vector.reduce.fadd.v2f64(double %start, <2 x double> %a)
+  ret double %res
+}
+
+define double @faddv_v4f64(double %start, <4 x double>* %a) #0 {
+; CHECK-LABEL: faddv_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q2, q1, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fadd z1.d, p0/m, z1.d, z2.d
+; CHECK-NEXT:    faddp d1, v1.2d
+; CHECK-NEXT:    fadd d0, d0, d1
+; CHECK-NEXT:    ret
+  %op = load <4 x double>, <4 x double>* %a
+  %res = call fast double @llvm.vector.reduce.fadd.v4f64(double %start, <4 x double> %op)
+  ret double %res
+}
+
+;
+; FMAXV
+;
+
+define half @fmaxv_v4f16(<4 x half> %a) #0 {
+; CHECK-LABEL: fmaxv_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmaxnmv h0, v0.4h
+; CHECK-NEXT:    ret
+  %res = call half @llvm.vector.reduce.fmax.v4f16(<4 x half> %a)
+  ret half %res
+}
+
+define half @fmaxv_v8f16(<8 x half> %a) #0 {
+; CHECK-LABEL: fmaxv_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmaxnmv h0, v0.8h
+; CHECK-NEXT:    ret
+  %res = call half @llvm.vector.reduce.fmax.v8f16(<8 x half> %a)
+  ret half %res
+}
+
+define half @fmaxv_v16f16(<16 x half>* %a) #0 {
+; CHECK-LABEL: fmaxv_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    fmaxnm z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fmaxnmv h0, v0.8h
+; CHECK-NEXT:    ret
+  %op = load <16 x half>, <16 x half>* %a
+  %res = call half @llvm.vector.reduce.fmax.v16f16(<16 x half> %op)
+  ret half %res
+}
+
+define float @fmaxv_v2f32(<2 x float> %a) #0 {
+; CHECK-LABEL: fmaxv_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmaxnmp s0, v0.2s
+; CHECK-NEXT:    ret
+  %res = call float @llvm.vector.reduce.fmax.v2f32(<2 x float> %a)
+  ret float %res
+}
+
+define float @fmaxv_v4f32(<4 x float> %a) #0 {
+; CHECK-LABEL: fmaxv_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmaxnmv s0, v0.4s
+; CHECK-NEXT:    ret
+  %res = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %a)
+  ret float %res
+}
+
+define float @fmaxv_v8f32(<8 x float>* %a) #0 {
+; CHECK-LABEL: fmaxv_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    fmaxnm z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fmaxnmv s0, v0.4s
+; CHECK-NEXT:    ret
+  %op = load <8 x float>, <8 x float>* %a
+  %res = call float @llvm.vector.reduce.fmax.v8f32(<8 x float> %op)
+  ret float %res
+}
+
+define double @fmaxv_v1f64(<1 x double> %a) #0 {
+; CHECK-LABEL: fmaxv_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call double @llvm.vector.reduce.fmax.v1f64(<1 x double> %a)
+  ret double %res
+}
+
+define double @fmaxv_v2f64(<2 x double> %a) #0 {
+; CHECK-LABEL: fmaxv_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmaxnmp d0, v0.2d
+; CHECK-NEXT:    ret
+  %res = call double @llvm.vector.reduce.fmax.v2f64(<2 x double> %a)
+  ret double %res
+}
+
+define double @fmaxv_v4f64(<4 x double>* %a) #0 {
+; CHECK-LABEL: fmaxv_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fmaxnm z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fmaxnmp d0, v0.2d
+; CHECK-NEXT:    ret
+  %op = load <4 x double>, <4 x double>* %a
+  %res = call double @llvm.vector.reduce.fmax.v4f64(<4 x double> %op)
+  ret double %res
+}
+
+;
+; FMINV
+;
+
+define half @fminv_v4f16(<4 x half> %a) #0 {
+; CHECK-LABEL: fminv_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fminnmv h0, v0.4h
+; CHECK-NEXT:    ret
+  %res = call half @llvm.vector.reduce.fmin.v4f16(<4 x half> %a)
+  ret half %res
+}
+
+define half @fminv_v8f16(<8 x half> %a) #0 {
+; CHECK-LABEL: fminv_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fminnmv h0, v0.8h
+; CHECK-NEXT:    ret
+  %res = call half @llvm.vector.reduce.fmin.v8f16(<8 x half> %a)
+  ret half %res
+}
+
+define half @fminv_v16f16(<16 x half>* %a) #0 {
+; CHECK-LABEL: fminv_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    fminnm z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fminnmv h0, v0.8h
+; CHECK-NEXT:    ret
+  %op = load <16 x half>, <16 x half>* %a
+  %res = call half @llvm.vector.reduce.fmin.v16f16(<16 x half> %op)
+  ret half %res
+}
+
+define float @fminv_v2f32(<2 x float> %a) #0 {
+; CHECK-LABEL: fminv_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fminnmp s0, v0.2s
+; CHECK-NEXT:    ret
+  %res = call float @llvm.vector.reduce.fmin.v2f32(<2 x float> %a)
+  ret float %res
+}
+
+define float @fminv_v4f32(<4 x float> %a) #0 {
+; CHECK-LABEL: fminv_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fminnmv s0, v0.4s
+; CHECK-NEXT:    ret
+  %res = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %a)
+  ret float %res
+}
+
+define float @fminv_v8f32(<8 x float>* %a) #0 {
+; CHECK-LABEL: fminv_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    fminnm z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fminnmv s0, v0.4s
+; CHECK-NEXT:    ret
+  %op = load <8 x float>, <8 x float>* %a
+  %res = call float @llvm.vector.reduce.fmin.v8f32(<8 x float> %op)
+  ret float %res
+}
+
+define double @fminv_v1f64(<1 x double> %a) #0 {
+; CHECK-LABEL: fminv_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call double @llvm.vector.reduce.fmin.v1f64(<1 x double> %a)
+  ret double %res
+}
+
+define double @fminv_v2f64(<2 x double> %a) #0 {
+; CHECK-LABEL: fminv_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fminnmp d0, v0.2d
+; CHECK-NEXT:    ret
+  %res = call double @llvm.vector.reduce.fmin.v2f64(<2 x double> %a)
+  ret double %res
+}
+
+define double @fminv_v4f64(<4 x double>* %a) #0 {
+; CHECK-LABEL: fminv_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fminnm z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fminnmp d0, v0.2d
+; CHECK-NEXT:    ret
+  %op = load <4 x double>, <4 x double>* %a
+  %res = call double @llvm.vector.reduce.fmin.v4f64(<4 x double> %op)
+  ret double %res
+}
+
+attributes #0 = { "target-features"="+sve" }
+
+declare half @llvm.vector.reduce.fadd.v4f16(half, <4 x half>)
+declare half @llvm.vector.reduce.fadd.v8f16(half, <8 x half>)
+declare half @llvm.vector.reduce.fadd.v16f16(half, <16 x half>)
+
+declare float @llvm.vector.reduce.fadd.v2f32(float, <2 x float>)
+declare float @llvm.vector.reduce.fadd.v4f32(float, <4 x float>)
+declare float @llvm.vector.reduce.fadd.v8f32(float, <8 x float>)
+
+declare double @llvm.vector.reduce.fadd.v1f64(double, <1 x double>)
+declare double @llvm.vector.reduce.fadd.v2f64(double, <2 x double>)
+declare double @llvm.vector.reduce.fadd.v4f64(double, <4 x double>)
+
+declare half @llvm.vector.reduce.fmax.v4f16(<4 x half>)
+declare half @llvm.vector.reduce.fmax.v8f16(<8 x half>)
+declare half @llvm.vector.reduce.fmax.v16f16(<16 x half>)
+
+declare float @llvm.vector.reduce.fmax.v2f32(<2 x float>)
+declare float @llvm.vector.reduce.fmax.v4f32(<4 x float>)
+declare float @llvm.vector.reduce.fmax.v8f32(<8 x float>)
+
+declare double @llvm.vector.reduce.fmax.v1f64(<1 x double>)
+declare double @llvm.vector.reduce.fmax.v2f64(<2 x double>)
+declare double @llvm.vector.reduce.fmax.v4f64(<4 x double>)
+
+declare half @llvm.vector.reduce.fmin.v4f16(<4 x half>)
+declare half @llvm.vector.reduce.fmin.v8f16(<8 x half>)
+declare half @llvm.vector.reduce.fmin.v16f16(<16 x half>)
+
+declare float @llvm.vector.reduce.fmin.v2f32(<2 x float>)
+declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>)
+declare float @llvm.vector.reduce.fmin.v8f32(<8 x float>)
+
+declare double @llvm.vector.reduce.fmin.v1f64(<1 x double>)
+declare double @llvm.vector.reduce.fmin.v2f64(<2 x double>)
+declare double @llvm.vector.reduce.fmin.v4f64(<4 x double>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll
new file mode 100644
index 0000000000000..08c8af10b5c20
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll
@@ -0,0 +1,739 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; UADDV
+;
+
+define i8 @uaddv_v8i8(<8 x i8> %a) #0 {
+; CHECK-LABEL: uaddv_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addv b0, v0.8b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @uaddv_v16i8(<16 x i8> %a) #0 {
+; CHECK-LABEL: uaddv_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @uaddv_v32i8(<32 x i8>* %a) #0 {
+; CHECK-LABEL: uaddv_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    add z0.b, z1.b, z0.b
+; CHECK-NEXT:    addv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <32 x i8>, <32 x i8>* %a
+  %res = call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %op)
+  ret i8 %res
+}
+
+define i16 @uaddv_v4i16(<4 x i16> %a) #0 {
+; CHECK-LABEL: uaddv_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addv h0, v0.4h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @uaddv_v8i16(<8 x i16> %a) #0 {
+; CHECK-LABEL: uaddv_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @uaddv_v16i16(<16 x i16>* %a) #0 {
+; CHECK-LABEL: uaddv_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    add z0.h, z1.h, z0.h
+; CHECK-NEXT:    addv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <16 x i16>, <16 x i16>* %a
+  %res = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %op)
+  ret i16 %res
+}
+
+define i32 @uaddv_v2i32(<2 x i32> %a) #0 {
+; CHECK-LABEL: uaddv_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addp v0.2s, v0.2s, v0.2s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @uaddv_v4i32(<4 x i32> %a) #0 {
+; CHECK-LABEL: uaddv_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @uaddv_v8i32(<8 x i32>* %a) #0 {
+; CHECK-LABEL: uaddv_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    add z0.s, z1.s, z0.s
+; CHECK-NEXT:    addv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <8 x i32>, <8 x i32>* %a
+  %res = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %op)
+  ret i32 %res
+}
+
+define i64 @uaddv_v2i64(<2 x i64> %a) #0 {
+; CHECK-LABEL: uaddv_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addp d0, v0.2d
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %res = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %a)
+  ret i64 %res
+}
+
+define i64 @uaddv_v4i64(<4 x i64>* %a) #0 {
+; CHECK-LABEL: uaddv_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    add z0.d, z1.d, z0.d
+; CHECK-NEXT:    addp d0, v0.2d
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %op = load <4 x i64>, <4 x i64>* %a
+  %res = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %op)
+  ret i64 %res
+}
+
+;
+; SMAXV
+;
+
+define i8 @smaxv_v8i8(<8 x i8> %a) #0 {
+; CHECK-LABEL: smaxv_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smaxv b0, v0.8b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.smax.v8i8(<8 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @smaxv_v16i8(<16 x i8> %a) #0 {
+; CHECK-LABEL: smaxv_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smaxv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @smaxv_v32i8(<32 x i8>* %a) #0 {
+; CHECK-LABEL: smaxv_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    smax z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT:    smaxv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <32 x i8>, <32 x i8>* %a
+  %res = call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %op)
+  ret i8 %res
+}
+
+define i16 @smaxv_v4i16(<4 x i16> %a) #0 {
+; CHECK-LABEL: smaxv_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smaxv h0, v0.4h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.smax.v4i16(<4 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @smaxv_v8i16(<8 x i16> %a) #0 {
+; CHECK-LABEL: smaxv_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smaxv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @smaxv_v16i16(<16 x i16>* %a) #0 {
+; CHECK-LABEL: smaxv_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    smax z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    smaxv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <16 x i16>, <16 x i16>* %a
+  %res = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %op)
+  ret i16 %res
+}
+
+define i32 @smaxv_v2i32(<2 x i32> %a) #0 {
+; CHECK-LABEL: smaxv_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smaxp v0.2s, v0.2s, v0.2s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i32 @llvm.vector.reduce.smax.v2i32(<2 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @smaxv_v4i32(<4 x i32> %a) #0 {
+; CHECK-LABEL: smaxv_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smaxv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @smaxv_v8i32(<8 x i32>* %a) #0 {
+; CHECK-LABEL: smaxv_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    smax z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    smaxv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <8 x i32>, <8 x i32>* %a
+  %res = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> %op)
+  ret i32 %res
+}
+
+; No NEON 64-bit vector SMAXV support. Use SVE.
+define i64 @smaxv_v2i64(<2 x i64> %a) #0 {
+; CHECK-LABEL: smaxv_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    smaxv d0, p0, z0.d
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %res = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> %a)
+  ret i64 %res
+}
+
+define i64 @smaxv_v4i64(<4 x i64>* %a) #0 {
+; CHECK-LABEL: smaxv_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    smax z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    smaxv d0, p0, z0.d
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %op = load <4 x i64>, <4 x i64>* %a
+  %res = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> %op)
+  ret i64 %res
+}
+
+;
+; SMINV
+;
+
+define i8 @sminv_v8i8(<8 x i8> %a) #0 {
+; CHECK-LABEL: sminv_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sminv b0, v0.8b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.smin.v8i8(<8 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @sminv_v16i8(<16 x i8> %a) #0 {
+; CHECK-LABEL: sminv_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sminv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @sminv_v32i8(<32 x i8>* %a) #0 {
+; CHECK-LABEL: sminv_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    smin z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT:    sminv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <32 x i8>, <32 x i8>* %a
+  %res = call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %op)
+  ret i8 %res
+}
+
+define i16 @sminv_v4i16(<4 x i16> %a) #0 {
+; CHECK-LABEL: sminv_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sminv h0, v0.4h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.smin.v4i16(<4 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @sminv_v8i16(<8 x i16> %a) #0 {
+; CHECK-LABEL: sminv_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sminv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @sminv_v16i16(<16 x i16>* %a) #0 {
+; CHECK-LABEL: sminv_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    smin z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    sminv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <16 x i16>, <16 x i16>* %a
+  %res = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %op)
+  ret i16 %res
+}
+
+define i32 @sminv_v2i32(<2 x i32> %a) #0 {
+; CHECK-LABEL: sminv_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sminp v0.2s, v0.2s, v0.2s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i32 @llvm.vector.reduce.smin.v2i32(<2 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @sminv_v4i32(<4 x i32> %a) #0 {
+; CHECK-LABEL: sminv_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sminv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @sminv_v8i32(<8 x i32>* %a) #0 {
+; CHECK-LABEL: sminv_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    smin z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    sminv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <8 x i32>, <8 x i32>* %a
+  %res = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %op)
+  ret i32 %res
+}
+
+; No NEON 64-bit vector SMINV support. Use SVE.
+define i64 @sminv_v2i64(<2 x i64> %a) #0 {
+; CHECK-LABEL: sminv_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    sminv d0, p0, z0.d
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %res = call i64 @llvm.vector.reduce.smin.v2i64(<2 x i64> %a)
+  ret i64 %res
+}
+
+define i64 @sminv_v4i64(<4 x i64>* %a) #0 {
+; CHECK-LABEL: sminv_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    smin z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    sminv d0, p0, z0.d
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %op = load <4 x i64>, <4 x i64>* %a
+  %res = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> %op)
+  ret i64 %res
+}
+
+;
+; UMAXV
+;
+
+define i8 @umaxv_v8i8(<8 x i8> %a) #0 {
+; CHECK-LABEL: umaxv_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umaxv b0, v0.8b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @umaxv_v16i8(<16 x i8> %a) #0 {
+; CHECK-LABEL: umaxv_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umaxv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @umaxv_v32i8(<32 x i8>* %a) #0 {
+; CHECK-LABEL: umaxv_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    umax z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT:    umaxv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <32 x i8>, <32 x i8>* %a
+  %res = call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %op)
+  ret i8 %res
+}
+
+define i16 @umaxv_v4i16(<4 x i16> %a) #0 {
+; CHECK-LABEL: umaxv_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umaxv h0, v0.4h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @umaxv_v8i16(<8 x i16> %a) #0 {
+; CHECK-LABEL: umaxv_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umaxv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @umaxv_v16i16(<16 x i16>* %a) #0 {
+; CHECK-LABEL: umaxv_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    umax z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    umaxv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <16 x i16>, <16 x i16>* %a
+  %res = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %op)
+  ret i16 %res
+}
+
+define i32 @umaxv_v2i32(<2 x i32> %a) #0 {
+; CHECK-LABEL: umaxv_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umaxp v0.2s, v0.2s, v0.2s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @umaxv_v4i32(<4 x i32> %a) #0 {
+; CHECK-LABEL: umaxv_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umaxv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @umaxv_v8i32(<8 x i32>* %a) #0 {
+; CHECK-LABEL: umaxv_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    umax z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    umaxv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <8 x i32>, <8 x i32>* %a
+  %res = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %op)
+  ret i32 %res
+}
+
+; No NEON 64-bit vector UMAXV support. Use SVE.
+define i64 @umaxv_v2i64(<2 x i64> %a) #0 {
+; CHECK-LABEL: umaxv_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    umaxv d0, p0, z0.d
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %res = call i64 @llvm.vector.reduce.umax.v2i64(<2 x i64> %a)
+  ret i64 %res
+}
+
+define i64 @umaxv_v4i64(<4 x i64>* %a) #0 {
+; CHECK-LABEL: umaxv_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    umax z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    umaxv d0, p0, z0.d
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %op = load <4 x i64>, <4 x i64>* %a
+  %res = call i64 @llvm.vector.reduce.umax.v4i64(<4 x i64> %op)
+  ret i64 %res
+}
+
+;
+; UMINV
+;
+
+define i8 @uminv_v8i8(<8 x i8> %a) #0 {
+; CHECK-LABEL: uminv_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uminv b0, v0.8b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @uminv_v16i8(<16 x i8> %a) #0 {
+; CHECK-LABEL: uminv_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uminv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @uminv_v32i8(<32 x i8>* %a) #0 {
+; CHECK-LABEL: uminv_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    umin z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT:    uminv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <32 x i8>, <32 x i8>* %a
+  %res = call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %op)
+  ret i8 %res
+}
+
+define i16 @uminv_v4i16(<4 x i16> %a) #0 {
+; CHECK-LABEL: uminv_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uminv h0, v0.4h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.umin.v4i16(<4 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @uminv_v8i16(<8 x i16> %a) #0 {
+; CHECK-LABEL: uminv_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uminv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @uminv_v16i16(<16 x i16>* %a) #0 {
+; CHECK-LABEL: uminv_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    umin z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    uminv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <16 x i16>, <16 x i16>* %a
+  %res = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %op)
+  ret i16 %res
+}
+
+define i32 @uminv_v2i32(<2 x i32> %a) #0 {
+; CHECK-LABEL: uminv_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uminp v0.2s, v0.2s, v0.2s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i32 @llvm.vector.reduce.umin.v2i32(<2 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @uminv_v4i32(<4 x i32> %a) #0 {
+; CHECK-LABEL: uminv_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uminv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %res = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @uminv_v8i32(<8 x i32>* %a) #0 {
+; CHECK-LABEL: uminv_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    umin z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    uminv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+  %op = load <8 x i32>, <8 x i32>* %a
+  %res = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %op)
+  ret i32 %res
+}
+
+; No NEON 64-bit vector UMINV support. Use SVE.
+define i64 @uminv_v2i64(<2 x i64> %a) #0 {
+; CHECK-LABEL: uminv_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    uminv d0, p0, z0.d
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %res = call i64 @llvm.vector.reduce.umin.v2i64(<2 x i64> %a)
+  ret i64 %res
+}
+
+define i64 @uminv_v4i64(<4 x i64>* %a) #0 {
+; CHECK-LABEL: uminv_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q1, q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    umin z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    uminv d0, p0, z0.d
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+  %op = load <4 x i64>, <4 x i64>* %a
+  %res = call i64 @llvm.vector.reduce.umin.v4i64(<4 x i64> %op)
+  ret i64 %res
+}
+
+attributes #0 = { "target-features"="+sve" }
+
+declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.add.v32i8(<32 x i8>)
+
+declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
+
+declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>)
+
+declare i8 @llvm.vector.reduce.smax.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.smax.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.smax.v32i8(<32 x i8>)
+
+declare i16 @llvm.vector.reduce.smax.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.smax.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.smax.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.smax.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.smax.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.smax.v8i32(<8 x i32>)
+
+declare i64 @llvm.vector.reduce.smax.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.smax.v4i64(<4 x i64>)
+
+declare i8 @llvm.vector.reduce.smin.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.smin.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.smin.v32i8(<32 x i8>)
+
+declare i16 @llvm.vector.reduce.smin.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.smin.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.smin.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.smin.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.smin.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.smin.v8i32(<8 x i32>)
+
+declare i64 @llvm.vector.reduce.smin.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.smin.v4i64(<4 x i64>)
+
+declare i8 @llvm.vector.reduce.umax.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.umax.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.umax.v32i8(<32 x i8>)
+
+declare i16 @llvm.vector.reduce.umax.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.umax.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.umax.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.umax.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.umax.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.umax.v8i32(<8 x i32>)
+
+declare i64 @llvm.vector.reduce.umax.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.umax.v4i64(<4 x i64>)
+
+declare i8 @llvm.vector.reduce.umin.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.umin.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.umin.v32i8(<32 x i8>)
+
+declare i16 @llvm.vector.reduce.umin.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.umin.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.umin.v16i16(<16 x i16>)
+
+declare i32 @llvm.vector.reduce.umin.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.umin.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.umin.v8i32(<8 x i32>)
+
+declare i64 @llvm.vector.reduce.umin.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.umin.v4i64(<4 x i64>)


        


More information about the llvm-commits mailing list