[llvm] 03fa1fe - [AArch64][SME]: Add precursory tests for D138440

Hassnaa Hamdi via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 24 09:45:29 PST 2022


Author: Hassnaa Hamdi
Date: 2022-11-24T17:45:17Z
New Revision: 03fa1fedf1f424c04298cb79f0da4dc1f0933339

URL: https://github.com/llvm/llvm-project/commit/03fa1fedf1f424c04298cb79f0da4dc1f0933339
DIFF: https://github.com/llvm/llvm-project/commit/03fa1fedf1f424c04298cb79f0da4dc1f0933339.diff

LOG: [AArch64][SME]: Add precursory tests for D138440

Add testing files:
 - fcopysign.ll
 - fp-rounding.ll

Added: 
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fcopysign.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fcopysign.ll
new file mode 100644
index 000000000000..f1e2a2ce7442
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fcopysign.ll
@@ -0,0 +1,403 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;============ f16
+
+define void @test_copysign_v4f16_v4f16(ptr %ap, ptr %bp) #0 {
+; CHECK-LABEL: test_copysign_v4f16_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI0_0
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <4 x half>, ptr %ap
+  %b = load <4 x half>, ptr %bp
+  %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %b)
+  store <4 x half> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v8f16_v8f16(ptr %ap, ptr %bp) #0 {
+; CHECK-LABEL: test_copysign_v8f16_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI1_0
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI1_0]
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <8 x half>, ptr %ap
+  %b = load <8 x half>, ptr %bp
+  %r = call <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %b)
+  store <8 x half> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v16f16_v16f16(ptr %ap, ptr %bp) #0 {
+; CHECK-LABEL: test_copysign_v16f16_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI2_0
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ldp q3, q4, [x1]
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI2_0]
+; CHECK-NEXT:    bif v0.16b, v3.16b, v2.16b
+; CHECK-NEXT:    bif v1.16b, v4.16b, v2.16b
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %a = load <16 x half>, ptr %ap
+  %b = load <16 x half>, ptr %bp
+  %r = call <16 x half> @llvm.copysign.v16f16(<16 x half> %a, <16 x half> %b)
+  store <16 x half> %r, ptr %ap
+  ret void
+}
+
+;============ f32
+
+define void @test_copysign_v2f32_v2f32(ptr %ap, ptr %bp) #0 {
+; CHECK-LABEL: test_copysign_v2f32_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI3_0
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI3_0]
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <2 x float>, ptr %ap
+  %b = load <2 x float>, ptr %bp
+  %r = call <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %b)
+  store <2 x float> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v4f32_v4f32(ptr %ap, ptr %bp) #0 {
+; CHECK-LABEL: test_copysign_v4f32_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI4_0
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI4_0]
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <4 x float>, ptr %ap
+  %b = load <4 x float>, ptr %bp
+  %r = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %b)
+  store <4 x float> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v8f32_v8f32(ptr %ap, ptr %bp) #0 {
+; CHECK-LABEL: test_copysign_v8f32_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI5_0
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ldp q3, q4, [x1]
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI5_0]
+; CHECK-NEXT:    bif v0.16b, v3.16b, v2.16b
+; CHECK-NEXT:    bif v1.16b, v4.16b, v2.16b
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %a = load <8 x float>, ptr %ap
+  %b = load <8 x float>, ptr %bp
+  %r = call <8 x float> @llvm.copysign.v8f32(<8 x float> %a, <8 x float> %b)
+  store <8 x float> %r, ptr %ap
+  ret void
+}
+
+;============ f64
+
+define void @test_copysign_v2f64_v2f64(ptr %ap, ptr %bp) #0 {
+; CHECK-LABEL: test_copysign_v2f64_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI6_0
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q2, [x1]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI6_0]
+; CHECK-NEXT:    fneg z1.d, p0/m, z1.d
+; CHECK-NEXT:    bif v0.16b, v2.16b, v1.16b
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <2 x double>, ptr %ap
+  %b = load <2 x double>, ptr %bp
+  %r = call <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %b)
+  store <2 x double> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v4f64_v4f64(ptr %ap, ptr %bp) #0 {
+; CHECK-LABEL: test_copysign_v4f64_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI7_0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ldp q3, q4, [x1]
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI7_0]
+; CHECK-NEXT:    fneg z2.d, p0/m, z2.d
+; CHECK-NEXT:    bif v0.16b, v3.16b, v2.16b
+; CHECK-NEXT:    bif v1.16b, v4.16b, v2.16b
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %a = load <4 x double>, ptr %ap
+  %b = load <4 x double>, ptr %bp
+  %r = call <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %b)
+  store <4 x double> %r, ptr %ap
+  ret void
+}
+
+;============ v2f32
+
+define void @test_copysign_v2f32_v2f64(ptr %ap, ptr %bp) #0 {
+; CHECK-LABEL: test_copysign_v2f32_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    adrp x8, .LCPI8_0
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI8_0]
+; CHECK-NEXT:    bit v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <2 x float>, ptr %ap
+  %b = load <2 x double>, ptr %bp
+  %tmp0 = fptrunc <2 x double> %b to <2 x float>
+  %r = call <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %tmp0)
+  store <2 x float> %r, ptr %ap
+  ret void
+}
+
+;============ v4f32
+
+; SplitVecOp #1
+define void @test_copysign_v4f32_v4f64(ptr %ap, ptr %bp) #0 {
+; CHECK-LABEL: test_copysign_v4f32_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    adrp x8, .LCPI9_0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    fcvtn v1.2s, v1.2d
+; CHECK-NEXT:    ldr q2, [x0]
+; CHECK-NEXT:    ldr q3, [x8, :lo12:.LCPI9_0]
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z1.s
+; CHECK-NEXT:    bit v0.16b, v2.16b, v3.16b
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <4 x float>, ptr %ap
+  %b = load <4 x double>, ptr %bp
+  %tmp0 = fptrunc <4 x double> %b to <4 x float>
+  %r = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %tmp0)
+  store <4 x float> %r, ptr %ap
+  ret void
+}
+
+;============ v2f64
+
+define void @test_copysign_v2f64_v2f32(ptr %ap, ptr %bp) #0 {
+; CHECK-LABEL: test_copysign_v2f64_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldr s0, [x1, #4]
+; CHECK-NEXT:    adrp x8, .LCPI10_0
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp, #8]
+; CHECK-NEXT:    ldr s0, [x1]
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI10_0]
+; CHECK-NEXT:    ldr q2, [sp]
+; CHECK-NEXT:    fneg z0.d, p0/m, z0.d
+; CHECK-NEXT:    bsl v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %a = load <2 x double>, ptr %ap
+  %b = load < 2 x float>, ptr %bp
+  %tmp0 = fpext <2 x float> %b to <2 x double>
+  %r = call <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %tmp0)
+  store <2 x double> %r, ptr %ap
+  ret void
+}
+
+;============ v4f64
+
+; SplitVecRes mismatched
+define void @test_copysign_v4f64_v4f32(ptr %ap, ptr %bp) #0 {
+; CHECK-LABEL: test_copysign_v4f64_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #32
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    ldr s0, [x1, #12]
+; CHECK-NEXT:    adrp x8, .LCPI11_0
+; CHECK-NEXT:    ldp q2, q1, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp, #24]
+; CHECK-NEXT:    ldr q3, [x8, :lo12:.LCPI11_0]
+; CHECK-NEXT:    ldr s0, [x1, #8]
+; CHECK-NEXT:    fneg z3.d, p0/m, z3.d
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp, #16]
+; CHECK-NEXT:    ldr s0, [x1, #4]
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp, #8]
+; CHECK-NEXT:    ldr s0, [x1]
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp]
+; CHECK-NEXT:    ldp q4, q0, [sp]
+; CHECK-NEXT:    bit v0.16b, v1.16b, v3.16b
+; CHECK-NEXT:    mov v1.16b, v3.16b
+; CHECK-NEXT:    bsl v1.16b, v2.16b, v4.16b
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    ret
+  %a = load <4 x double>, ptr %ap
+  %b = load <4 x float>, ptr %bp
+  %tmp0 = fpext <4 x float> %b to <4 x double>
+  %r = call <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %tmp0)
+  store <4 x double> %r, ptr %ap
+  ret void
+}
+
+;============ v4f16
+
+define void @test_copysign_v4f16_v4f32(ptr %ap, ptr %bp) #0 {
+; CHECK-LABEL: test_copysign_v4f16_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    fcvt z0.h, p0/m, z0.s
+; CHECK-NEXT:    mov z1.s, z0.s[3]
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    fmov w9, s1
+; CHECK-NEXT:    mov z2.s, z0.s[2]
+; CHECK-NEXT:    mov z0.s, z0.s[1]
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    fmov w10, s2
+; CHECK-NEXT:    strh w8, [sp, #8]
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    strh w9, [sp, #14]
+; CHECK-NEXT:    adrp x9, .LCPI12_0
+; CHECK-NEXT:    strh w10, [sp, #12]
+; CHECK-NEXT:    strh w8, [sp, #10]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    ldr d2, [x9, :lo12:.LCPI12_0]
+; CHECK-NEXT:    bit v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %a = load <4 x half>, ptr %ap
+  %b = load <4 x float>, ptr %bp
+  %tmp0 = fptrunc <4 x float> %b to <4 x half>
+  %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %tmp0)
+  store <4 x half> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v4f16_v4f64(ptr %ap, ptr %bp) #0 {
+; CHECK-LABEL: test_copysign_v4f16_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldp q1, q0, [x1]
+; CHECK-NEXT:    adrp x8, .LCPI13_0
+; CHECK-NEXT:    fcvt h3, d1
+; CHECK-NEXT:    mov z1.d, z1.d[1]
+; CHECK-NEXT:    fcvt h1, d1
+; CHECK-NEXT:    fcvt h2, d0
+; CHECK-NEXT:    mov z0.d, z0.d[1]
+; CHECK-NEXT:    fcvt h0, d0
+; CHECK-NEXT:    ldr d4, [x0]
+; CHECK-NEXT:    str h3, [sp, #8]
+; CHECK-NEXT:    str h1, [sp, #10]
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI13_0]
+; CHECK-NEXT:    str h2, [sp, #12]
+; CHECK-NEXT:    str h0, [sp, #14]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    bit v0.8b, v4.8b, v1.8b
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %a = load <4 x half>, ptr %ap
+  %b = load <4 x double>, ptr %bp
+  %tmp0 = fptrunc <4 x double> %b to <4 x half>
+  %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %tmp0)
+  store <4 x half> %r, ptr %ap
+  ret void
+}
+
+;============ v8f16
+
+define void @test_copysign_v8f16_v8f32(ptr %ap, ptr %bp) #0 {
+; CHECK-LABEL: test_copysign_v8f16_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    fcvt z0.h, p0/m, z0.s
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    mov z5.s, z0.s[2]
+; CHECK-NEXT:    fcvt z1.h, p0/m, z1.s
+; CHECK-NEXT:    mov z6.s, z0.s[1]
+; CHECK-NEXT:    fmov w8, s1
+; CHECK-NEXT:    mov z3.s, z1.s[2]
+; CHECK-NEXT:    mov z2.s, z1.s[3]
+; CHECK-NEXT:    mov z4.s, z1.s[1]
+; CHECK-NEXT:    mov z1.s, z0.s[3]
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    strh w8, [sp, #8]
+; CHECK-NEXT:    fmov w8, s3
+; CHECK-NEXT:    fmov w10, s2
+; CHECK-NEXT:    strh w9, [sp]
+; CHECK-NEXT:    fmov w9, s4
+; CHECK-NEXT:    strh w8, [sp, #12]
+; CHECK-NEXT:    fmov w8, s5
+; CHECK-NEXT:    strh w10, [sp, #14]
+; CHECK-NEXT:    fmov w10, s1
+; CHECK-NEXT:    strh w9, [sp, #10]
+; CHECK-NEXT:    fmov w9, s6
+; CHECK-NEXT:    strh w8, [sp, #4]
+; CHECK-NEXT:    adrp x8, .LCPI14_0
+; CHECK-NEXT:    strh w10, [sp, #6]
+; CHECK-NEXT:    strh w9, [sp, #2]
+; CHECK-NEXT:    ldr q1, [sp]
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI14_0]
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %a = load <8 x half>, ptr %ap
+  %b = load <8 x float>, ptr %bp
+  %tmp0 = fptrunc <8 x float> %b to <8 x half>
+  %r = call <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %tmp0)
+  store <8 x half> %r, ptr %ap
+  ret void
+}
+
+declare <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %b) #0
+declare <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %b) #0
+declare <16 x half> @llvm.copysign.v16f16(<16 x half> %a, <16 x half> %b) #0
+
+declare <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %b) #0
+declare <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %b) #0
+declare <8 x float> @llvm.copysign.v8f32(<8 x float> %a, <8 x float> %b) #0
+
+declare <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %b) #0
+declare <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %b) #0
+
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll
new file mode 100644
index 000000000000..59ae6d0db876
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll
@@ -0,0 +1,916 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; CEIL -> FRINTP
+;
+
+define <2 x half> @frintp_v2f16(<2 x half> %op) #0 {
+; CHECK-LABEL: frintp_v2f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <2 x half> @llvm.ceil.v2f16(<2 x half> %op)
+  ret <2 x half> %res
+}
+
+define <4 x half> @frintp_v4f16(<4 x half> %op) #0 {
+; CHECK-LABEL: frintp_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <4 x half> @llvm.ceil.v4f16(<4 x half> %op)
+  ret <4 x half> %res
+}
+
+define <8 x half> @frintp_v8f16(<8 x half> %op) #0 {
+; CHECK-LABEL: frintp_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.8h, v0.8h
+; CHECK-NEXT:    ret
+  %res = call <8 x half> @llvm.ceil.v8f16(<8 x half> %op)
+  ret <8 x half> %res
+}
+
+define void @frintp_v16f16(<16 x half>* %a) #0 {
+; CHECK-LABEL: frintp_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frintp v0.8h, v0.8h
+; CHECK-NEXT:    frintp v1.8h, v1.8h
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <16 x half>, <16 x half>* %a
+  %res = call <16 x half> @llvm.ceil.v16f16(<16 x half> %op)
+  store <16 x half> %res, <16 x half>* %a
+  ret void
+}
+
+define <2 x float> @frintp_v2f32(<2 x float> %op) #0 {
+; CHECK-LABEL: frintp_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %res = call <2 x float> @llvm.ceil.v2f32(<2 x float> %op)
+  ret <2 x float> %res
+}
+
+define <4 x float> @frintp_v4f32(<4 x float> %op) #0 {
+; CHECK-LABEL: frintp_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %res = call <4 x float> @llvm.ceil.v4f32(<4 x float> %op)
+  ret <4 x float> %res
+}
+
+define void @frintp_v8f32(<8 x float>* %a) #0 {
+; CHECK-LABEL: frintp_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NEXT:    frintp v1.4s, v1.4s
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <8 x float>, <8 x float>* %a
+  %res = call <8 x float> @llvm.ceil.v8f32(<8 x float> %op)
+  store <8 x float> %res, <8 x float>* %a
+  ret void
+}
+
+define <1 x double> @frintp_v1f64(<1 x double> %op) #0 {
+; CHECK-LABEL: frintp_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    frintp d0, d0
+; CHECK-NEXT:    ret
+  %res = call <1 x double> @llvm.ceil.v1f64(<1 x double> %op)
+  ret <1 x double> %res
+}
+
+define <2 x double> @frintp_v2f64(<2 x double> %op) #0 {
+; CHECK-LABEL: frintp_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintp v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %res = call <2 x double> @llvm.ceil.v2f64(<2 x double> %op)
+  ret <2 x double> %res
+}
+
+define void @frintp_v4f64(<4 x double>* %a) #0 {
+; CHECK-LABEL: frintp_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frintp v0.2d, v0.2d
+; CHECK-NEXT:    frintp v1.2d, v1.2d
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <4 x double>, <4 x double>* %a
+  %res = call <4 x double> @llvm.ceil.v4f64(<4 x double> %op)
+  store <4 x double> %res, <4 x double>* %a
+  ret void
+}
+
+;
+; FLOOR -> FRINTM
+;
+
+define <2 x half> @frintm_v2f16(<2 x half> %op) #0 {
+; CHECK-LABEL: frintm_v2f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <2 x half> @llvm.floor.v2f16(<2 x half> %op)
+  ret <2 x half> %res
+}
+
+define <4 x half> @frintm_v4f16(<4 x half> %op) #0 {
+; CHECK-LABEL: frintm_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <4 x half> @llvm.floor.v4f16(<4 x half> %op)
+  ret <4 x half> %res
+}
+
+define <8 x half> @frintm_v8f16(<8 x half> %op) #0 {
+; CHECK-LABEL: frintm_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.8h, v0.8h
+; CHECK-NEXT:    ret
+  %res = call <8 x half> @llvm.floor.v8f16(<8 x half> %op)
+  ret <8 x half> %res
+}
+
+define void @frintm_v16f16(<16 x half>* %a) #0 {
+; CHECK-LABEL: frintm_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frintm v0.8h, v0.8h
+; CHECK-NEXT:    frintm v1.8h, v1.8h
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <16 x half>, <16 x half>* %a
+  %res = call <16 x half> @llvm.floor.v16f16(<16 x half> %op)
+  store <16 x half> %res, <16 x half>* %a
+  ret void
+}
+
+define <2 x float> @frintm_v2f32(<2 x float> %op) #0 {
+; CHECK-LABEL: frintm_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %res = call <2 x float> @llvm.floor.v2f32(<2 x float> %op)
+  ret <2 x float> %res
+}
+
+define <4 x float> @frintm_v4f32(<4 x float> %op) #0 {
+; CHECK-LABEL: frintm_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %res = call <4 x float> @llvm.floor.v4f32(<4 x float> %op)
+  ret <4 x float> %res
+}
+
+define void @frintm_v8f32(<8 x float>* %a) #0 {
+; CHECK-LABEL: frintm_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NEXT:    frintm v1.4s, v1.4s
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <8 x float>, <8 x float>* %a
+  %res = call <8 x float> @llvm.floor.v8f32(<8 x float> %op)
+  store <8 x float> %res, <8 x float>* %a
+  ret void
+}
+
+define <1 x double> @frintm_v1f64(<1 x double> %op) #0 {
+; CHECK-LABEL: frintm_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    frintm d0, d0
+; CHECK-NEXT:    ret
+  %res = call <1 x double> @llvm.floor.v1f64(<1 x double> %op)
+  ret <1 x double> %res
+}
+
+define <2 x double> @frintm_v2f64(<2 x double> %op) #0 {
+; CHECK-LABEL: frintm_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintm v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %res = call <2 x double> @llvm.floor.v2f64(<2 x double> %op)
+  ret <2 x double> %res
+}
+
+define void @frintm_v4f64(<4 x double>* %a) #0 {
+; CHECK-LABEL: frintm_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frintm v0.2d, v0.2d
+; CHECK-NEXT:    frintm v1.2d, v1.2d
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <4 x double>, <4 x double>* %a
+  %res = call <4 x double> @llvm.floor.v4f64(<4 x double> %op)
+  store <4 x double> %res, <4 x double>* %a
+  ret void
+}
+
+;
+; FNEARBYINT -> FRINTI
+;
+
+define <2 x half> @frinti_v2f16(<2 x half> %op) #0 {
+; CHECK-LABEL: frinti_v2f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinti v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <2 x half> @llvm.nearbyint.v2f16(<2 x half> %op)
+  ret <2 x half> %res
+}
+
+define <4 x half> @frinti_v4f16(<4 x half> %op) #0 {
+; CHECK-LABEL: frinti_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinti v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <4 x half> @llvm.nearbyint.v4f16(<4 x half> %op)
+  ret <4 x half> %res
+}
+
+define <8 x half> @frinti_v8f16(<8 x half> %op) #0 {
+; CHECK-LABEL: frinti_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinti v0.8h, v0.8h
+; CHECK-NEXT:    ret
+  %res = call <8 x half> @llvm.nearbyint.v8f16(<8 x half> %op)
+  ret <8 x half> %res
+}
+
+define void @frinti_v16f16(<16 x half>* %a) #0 {
+; CHECK-LABEL: frinti_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frinti v0.8h, v0.8h
+; CHECK-NEXT:    frinti v1.8h, v1.8h
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <16 x half>, <16 x half>* %a
+  %res = call <16 x half> @llvm.nearbyint.v16f16(<16 x half> %op)
+  store <16 x half> %res, <16 x half>* %a
+  ret void
+}
+
+define <2 x float> @frinti_v2f32(<2 x float> %op) #0 {
+; CHECK-LABEL: frinti_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinti v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %res = call <2 x float> @llvm.nearbyint.v2f32(<2 x float> %op)
+  ret <2 x float> %res
+}
+
+define <4 x float> @frinti_v4f32(<4 x float> %op) #0 {
+; CHECK-LABEL: frinti_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinti v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %res = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %op)
+  ret <4 x float> %res
+}
+
+define void @frinti_v8f32(<8 x float>* %a) #0 {
+; CHECK-LABEL: frinti_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frinti v0.4s, v0.4s
+; CHECK-NEXT:    frinti v1.4s, v1.4s
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <8 x float>, <8 x float>* %a
+  %res = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %op)
+  store <8 x float> %res, <8 x float>* %a
+  ret void
+}
+
+define <1 x double> @frinti_v1f64(<1 x double> %op) #0 {
+; CHECK-LABEL: frinti_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    frinti d0, d0
+; CHECK-NEXT:    ret
+  %res = call <1 x double> @llvm.nearbyint.v1f64(<1 x double> %op)
+  ret <1 x double> %res
+}
+
+define <2 x double> @frinti_v2f64(<2 x double> %op) #0 {
+; CHECK-LABEL: frinti_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinti v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %res = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %op)
+  ret <2 x double> %res
+}
+
+define void @frinti_v4f64(<4 x double>* %a) #0 {
+; CHECK-LABEL: frinti_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frinti v0.2d, v0.2d
+; CHECK-NEXT:    frinti v1.2d, v1.2d
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <4 x double>, <4 x double>* %a
+  %res = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %op)
+  store <4 x double> %res, <4 x double>* %a
+  ret void
+}
+
+;
+; RINT -> FRINTX
+;
+
+define <2 x half> @frintx_v2f16(<2 x half> %op) #0 {
+; CHECK-LABEL: frintx_v2f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <2 x half> @llvm.rint.v2f16(<2 x half> %op)
+  ret <2 x half> %res
+}
+
+define <4 x half> @frintx_v4f16(<4 x half> %op) #0 {
+; CHECK-LABEL: frintx_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <4 x half> @llvm.rint.v4f16(<4 x half> %op)
+  ret <4 x half> %res
+}
+
+define <8 x half> @frintx_v8f16(<8 x half> %op) #0 {
+; CHECK-LABEL: frintx_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx v0.8h, v0.8h
+; CHECK-NEXT:    ret
+  %res = call <8 x half> @llvm.rint.v8f16(<8 x half> %op)
+  ret <8 x half> %res
+}
+
+define void @frintx_v16f16(<16 x half>* %a) #0 {
+; CHECK-LABEL: frintx_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frintx v0.8h, v0.8h
+; CHECK-NEXT:    frintx v1.8h, v1.8h
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <16 x half>, <16 x half>* %a
+  %res = call <16 x half> @llvm.rint.v16f16(<16 x half> %op)
+  store <16 x half> %res, <16 x half>* %a
+  ret void
+}
+
+define <2 x float> @frintx_v2f32(<2 x float> %op) #0 {
+; CHECK-LABEL: frintx_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %res = call <2 x float> @llvm.rint.v2f32(<2 x float> %op)
+  ret <2 x float> %res
+}
+
+define <4 x float> @frintx_v4f32(<4 x float> %op) #0 {
+; CHECK-LABEL: frintx_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %res = call <4 x float> @llvm.rint.v4f32(<4 x float> %op)
+  ret <4 x float> %res
+}
+
+define void @frintx_v8f32(<8 x float>* %a) #0 {
+; CHECK-LABEL: frintx_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    frintx v1.4s, v1.4s
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <8 x float>, <8 x float>* %a
+  %res = call <8 x float> @llvm.rint.v8f32(<8 x float> %op)
+  store <8 x float> %res, <8 x float>* %a
+  ret void
+}
+
+define <1 x double> @frintx_v1f64(<1 x double> %op) #0 {
+; CHECK-LABEL: frintx_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    ret
+  %res = call <1 x double> @llvm.rint.v1f64(<1 x double> %op)
+  ret <1 x double> %res
+}
+
+define <2 x double> @frintx_v2f64(<2 x double> %op) #0 {
+; CHECK-LABEL: frintx_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %res = call <2 x double> @llvm.rint.v2f64(<2 x double> %op)
+  ret <2 x double> %res
+}
+
+define void @frintx_v4f64(<4 x double>* %a) #0 {
+; CHECK-LABEL: frintx_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frintx v0.2d, v0.2d
+; CHECK-NEXT:    frintx v1.2d, v1.2d
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <4 x double>, <4 x double>* %a
+  %res = call <4 x double> @llvm.rint.v4f64(<4 x double> %op)
+  store <4 x double> %res, <4 x double>* %a
+  ret void
+}
+
+;
+; ROUND -> FRINTA
+;
+
+define <2 x half> @frinta_v2f16(<2 x half> %op) #0 {
+; CHECK-LABEL: frinta_v2f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <2 x half> @llvm.round.v2f16(<2 x half> %op)
+  ret <2 x half> %res
+}
+
+define <4 x half> @frinta_v4f16(<4 x half> %op) #0 {
+; CHECK-LABEL: frinta_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <4 x half> @llvm.round.v4f16(<4 x half> %op)
+  ret <4 x half> %res
+}
+
+define <8 x half> @frinta_v8f16(<8 x half> %op) #0 {
+; CHECK-LABEL: frinta_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.8h, v0.8h
+; CHECK-NEXT:    ret
+  %res = call <8 x half> @llvm.round.v8f16(<8 x half> %op)
+  ret <8 x half> %res
+}
+
+define void @frinta_v16f16(<16 x half>* %a) #0 {
+; CHECK-LABEL: frinta_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frinta v0.8h, v0.8h
+; CHECK-NEXT:    frinta v1.8h, v1.8h
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <16 x half>, <16 x half>* %a
+  %res = call <16 x half> @llvm.round.v16f16(<16 x half> %op)
+  store <16 x half> %res, <16 x half>* %a
+  ret void
+}
+
+define <2 x float> @frinta_v2f32(<2 x float> %op) #0 {
+; CHECK-LABEL: frinta_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %res = call <2 x float> @llvm.round.v2f32(<2 x float> %op)
+  ret <2 x float> %res
+}
+
+define <4 x float> @frinta_v4f32(<4 x float> %op) #0 {
+; CHECK-LABEL: frinta_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %res = call <4 x float> @llvm.round.v4f32(<4 x float> %op)
+  ret <4 x float> %res
+}
+
+define void @frinta_v8f32(<8 x float>* %a) #0 {
+; CHECK-LABEL: frinta_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NEXT:    frinta v1.4s, v1.4s
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <8 x float>, <8 x float>* %a
+  %res = call <8 x float> @llvm.round.v8f32(<8 x float> %op)
+  store <8 x float> %res, <8 x float>* %a
+  ret void
+}
+
+define <1 x double> @frinta_v1f64(<1 x double> %op) #0 {
+; CHECK-LABEL: frinta_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    frinta d0, d0
+; CHECK-NEXT:    ret
+  %res = call <1 x double> @llvm.round.v1f64(<1 x double> %op)
+  ret <1 x double> %res
+}
+
+define <2 x double> @frinta_v2f64(<2 x double> %op) #0 {
+; CHECK-LABEL: frinta_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frinta v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %res = call <2 x double> @llvm.round.v2f64(<2 x double> %op)
+  ret <2 x double> %res
+}
+
+define void @frinta_v4f64(<4 x double>* %a) #0 {
+; CHECK-LABEL: frinta_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frinta v0.2d, v0.2d
+; CHECK-NEXT:    frinta v1.2d, v1.2d
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <4 x double>, <4 x double>* %a
+  %res = call <4 x double> @llvm.round.v4f64(<4 x double> %op)
+  store <4 x double> %res, <4 x double>* %a
+  ret void
+}
+
+;
+; ROUNDEVEN -> FRINTN
+;
+
+define <2 x half> @frintn_v2f16(<2 x half> %op) #0 {
+; CHECK-LABEL: frintn_v2f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <2 x half> @llvm.roundeven.v2f16(<2 x half> %op)
+  ret <2 x half> %res
+}
+
+define <4 x half> @frintn_v4f16(<4 x half> %op) #0 {
+; CHECK-LABEL: frintn_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %op)
+  ret <4 x half> %res
+}
+
+define <8 x half> @frintn_v8f16(<8 x half> %op) #0 {
+; CHECK-LABEL: frintn_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.8h, v0.8h
+; CHECK-NEXT:    ret
+  %res = call <8 x half> @llvm.roundeven.v8f16(<8 x half> %op)
+  ret <8 x half> %res
+}
+
+define void @frintn_v16f16(<16 x half>* %a) #0 {
+; CHECK-LABEL: frintn_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frintn v0.8h, v0.8h
+; CHECK-NEXT:    frintn v1.8h, v1.8h
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <16 x half>, <16 x half>* %a
+  %res = call <16 x half> @llvm.roundeven.v16f16(<16 x half> %op)
+  store <16 x half> %res, <16 x half>* %a
+  ret void
+}
+
+define <2 x float> @frintn_v2f32(<2 x float> %op) #0 {
+; CHECK-LABEL: frintn_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %res = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %op)
+  ret <2 x float> %res
+}
+
+define <4 x float> @frintn_v4f32(<4 x float> %op) #0 {
+; CHECK-LABEL: frintn_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %res = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %op)
+  ret <4 x float> %res
+}
+
+define void @frintn_v8f32(<8 x float>* %a) #0 {
+; CHECK-LABEL: frintn_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NEXT:    frintn v1.4s, v1.4s
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <8 x float>, <8 x float>* %a
+  %res = call <8 x float> @llvm.roundeven.v8f32(<8 x float> %op)
+  store <8 x float> %res, <8 x float>* %a
+  ret void
+}
+
+define <1 x double> @frintn_v1f64(<1 x double> %op) #0 {
+; CHECK-LABEL: frintn_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    frintn d0, d0
+; CHECK-NEXT:    ret
+  %res = call <1 x double> @llvm.roundeven.v1f64(<1 x double> %op)
+  ret <1 x double> %res
+}
+
+define <2 x double> @frintn_v2f64(<2 x double> %op) #0 {
+; CHECK-LABEL: frintn_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %res = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %op)
+  ret <2 x double> %res
+}
+
+define void @frintn_v4f64(<4 x double>* %a) #0 {
+; CHECK-LABEL: frintn_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frintn v0.2d, v0.2d
+; CHECK-NEXT:    frintn v1.2d, v1.2d
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <4 x double>, <4 x double>* %a
+  %res = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %op)
+  store <4 x double> %res, <4 x double>* %a
+  ret void
+}
+
+;
+; TRUNC -> FRINTZ
+;
+
+define <2 x half> @frintz_v2f16(<2 x half> %op) #0 {
+; CHECK-LABEL: frintz_v2f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <2 x half> @llvm.trunc.v2f16(<2 x half> %op)
+  ret <2 x half> %res
+}
+
+define <4 x half> @frintz_v4f16(<4 x half> %op) #0 {
+; CHECK-LABEL: frintz_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.4h, v0.4h
+; CHECK-NEXT:    ret
+  %res = call <4 x half> @llvm.trunc.v4f16(<4 x half> %op)
+  ret <4 x half> %res
+}
+
+define <8 x half> @frintz_v8f16(<8 x half> %op) #0 {
+; CHECK-LABEL: frintz_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.8h, v0.8h
+; CHECK-NEXT:    ret
+  %res = call <8 x half> @llvm.trunc.v8f16(<8 x half> %op)
+  ret <8 x half> %res
+}
+
+define void @frintz_v16f16(<16 x half>* %a) #0 {
+; CHECK-LABEL: frintz_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frintz v0.8h, v0.8h
+; CHECK-NEXT:    frintz v1.8h, v1.8h
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <16 x half>, <16 x half>* %a
+  %res = call <16 x half> @llvm.trunc.v16f16(<16 x half> %op)
+  store <16 x half> %res, <16 x half>* %a
+  ret void
+}
+
+define <2 x float> @frintz_v2f32(<2 x float> %op) #0 {
+; CHECK-LABEL: frintz_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.2s, v0.2s
+; CHECK-NEXT:    ret
+  %res = call <2 x float> @llvm.trunc.v2f32(<2 x float> %op)
+  ret <2 x float> %res
+}
+
+define <4 x float> @frintz_v4f32(<4 x float> %op) #0 {
+; CHECK-LABEL: frintz_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NEXT:    ret
+  %res = call <4 x float> @llvm.trunc.v4f32(<4 x float> %op)
+  ret <4 x float> %res
+}
+
+define void @frintz_v8f32(<8 x float>* %a) #0 {
+; CHECK-LABEL: frintz_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NEXT:    frintz v1.4s, v1.4s
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <8 x float>, <8 x float>* %a
+  %res = call <8 x float> @llvm.trunc.v8f32(<8 x float> %op)
+  store <8 x float> %res, <8 x float>* %a
+  ret void
+}
+
+define <1 x double> @frintz_v1f64(<1 x double> %op) #0 {
+; CHECK-LABEL: frintz_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    frintz d0, d0
+; CHECK-NEXT:    ret
+  %res = call <1 x double> @llvm.trunc.v1f64(<1 x double> %op)
+  ret <1 x double> %res
+}
+
+define <2 x double> @frintz_v2f64(<2 x double> %op) #0 {
+; CHECK-LABEL: frintz_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintz v0.2d, v0.2d
+; CHECK-NEXT:    ret
+  %res = call <2 x double> @llvm.trunc.v2f64(<2 x double> %op)
+  ret <2 x double> %res
+}
+
+define void @frintz_v4f64(<4 x double>* %a) #0 {
+; CHECK-LABEL: frintz_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    frintz v0.2d, v0.2d
+; CHECK-NEXT:    frintz v1.2d, v1.2d
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <4 x double>, <4 x double>* %a
+  %res = call <4 x double> @llvm.trunc.v4f64(<4 x double> %op)
+  store <4 x double> %res, <4 x double>* %a
+  ret void
+}
+
+attributes #0 = { "target-features"="+sve" }
+
+declare <2 x half> @llvm.ceil.v2f16(<2 x half>)
+declare <4 x half> @llvm.ceil.v4f16(<4 x half>)
+declare <8 x half> @llvm.ceil.v8f16(<8 x half>)
+declare <16 x half> @llvm.ceil.v16f16(<16 x half>)
+declare <32 x half> @llvm.ceil.v32f16(<32 x half>)
+declare <64 x half> @llvm.ceil.v64f16(<64 x half>)
+declare <128 x half> @llvm.ceil.v128f16(<128 x half>)
+declare <2 x float> @llvm.ceil.v2f32(<2 x float>)
+declare <4 x float> @llvm.ceil.v4f32(<4 x float>)
+declare <8 x float> @llvm.ceil.v8f32(<8 x float>)
+declare <16 x float> @llvm.ceil.v16f32(<16 x float>)
+declare <32 x float> @llvm.ceil.v32f32(<32 x float>)
+declare <64 x float> @llvm.ceil.v64f32(<64 x float>)
+declare <1 x double> @llvm.ceil.v1f64(<1 x double>)
+declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
+declare <4 x double> @llvm.ceil.v4f64(<4 x double>)
+declare <8 x double> @llvm.ceil.v8f64(<8 x double>)
+declare <16 x double> @llvm.ceil.v16f64(<16 x double>)
+declare <32 x double> @llvm.ceil.v32f64(<32 x double>)
+
+declare <2 x half> @llvm.floor.v2f16(<2 x half>)
+declare <4 x half> @llvm.floor.v4f16(<4 x half>)
+declare <8 x half> @llvm.floor.v8f16(<8 x half>)
+declare <16 x half> @llvm.floor.v16f16(<16 x half>)
+declare <32 x half> @llvm.floor.v32f16(<32 x half>)
+declare <64 x half> @llvm.floor.v64f16(<64 x half>)
+declare <128 x half> @llvm.floor.v128f16(<128 x half>)
+declare <2 x float> @llvm.floor.v2f32(<2 x float>)
+declare <4 x float> @llvm.floor.v4f32(<4 x float>)
+declare <8 x float> @llvm.floor.v8f32(<8 x float>)
+declare <16 x float> @llvm.floor.v16f32(<16 x float>)
+declare <32 x float> @llvm.floor.v32f32(<32 x float>)
+declare <64 x float> @llvm.floor.v64f32(<64 x float>)
+declare <1 x double> @llvm.floor.v1f64(<1 x double>)
+declare <2 x double> @llvm.floor.v2f64(<2 x double>)
+declare <4 x double> @llvm.floor.v4f64(<4 x double>)
+declare <8 x double> @llvm.floor.v8f64(<8 x double>)
+declare <16 x double> @llvm.floor.v16f64(<16 x double>)
+declare <32 x double> @llvm.floor.v32f64(<32 x double>)
+
+declare <2 x half> @llvm.nearbyint.v2f16(<2 x half>)
+declare <4 x half> @llvm.nearbyint.v4f16(<4 x half>)
+declare <8 x half> @llvm.nearbyint.v8f16(<8 x half>)
+declare <16 x half> @llvm.nearbyint.v16f16(<16 x half>)
+declare <32 x half> @llvm.nearbyint.v32f16(<32 x half>)
+declare <64 x half> @llvm.nearbyint.v64f16(<64 x half>)
+declare <128 x half> @llvm.nearbyint.v128f16(<128 x half>)
+declare <2 x float> @llvm.nearbyint.v2f32(<2 x float>)
+declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>)
+declare <8 x float> @llvm.nearbyint.v8f32(<8 x float>)
+declare <16 x float> @llvm.nearbyint.v16f32(<16 x float>)
+declare <32 x float> @llvm.nearbyint.v32f32(<32 x float>)
+declare <64 x float> @llvm.nearbyint.v64f32(<64 x float>)
+declare <1 x double> @llvm.nearbyint.v1f64(<1 x double>)
+declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>)
+declare <4 x double> @llvm.nearbyint.v4f64(<4 x double>)
+declare <8 x double> @llvm.nearbyint.v8f64(<8 x double>)
+declare <16 x double> @llvm.nearbyint.v16f64(<16 x double>)
+declare <32 x double> @llvm.nearbyint.v32f64(<32 x double>)
+
+declare <2 x half> @llvm.rint.v2f16(<2 x half>)
+declare <4 x half> @llvm.rint.v4f16(<4 x half>)
+declare <8 x half> @llvm.rint.v8f16(<8 x half>)
+declare <16 x half> @llvm.rint.v16f16(<16 x half>)
+declare <32 x half> @llvm.rint.v32f16(<32 x half>)
+declare <64 x half> @llvm.rint.v64f16(<64 x half>)
+declare <128 x half> @llvm.rint.v128f16(<128 x half>)
+declare <2 x float> @llvm.rint.v2f32(<2 x float>)
+declare <4 x float> @llvm.rint.v4f32(<4 x float>)
+declare <8 x float> @llvm.rint.v8f32(<8 x float>)
+declare <16 x float> @llvm.rint.v16f32(<16 x float>)
+declare <32 x float> @llvm.rint.v32f32(<32 x float>)
+declare <64 x float> @llvm.rint.v64f32(<64 x float>)
+declare <1 x double> @llvm.rint.v1f64(<1 x double>)
+declare <2 x double> @llvm.rint.v2f64(<2 x double>)
+declare <4 x double> @llvm.rint.v4f64(<4 x double>)
+declare <8 x double> @llvm.rint.v8f64(<8 x double>)
+declare <16 x double> @llvm.rint.v16f64(<16 x double>)
+declare <32 x double> @llvm.rint.v32f64(<32 x double>)
+
+declare <2 x half> @llvm.round.v2f16(<2 x half>)
+declare <4 x half> @llvm.round.v4f16(<4 x half>)
+declare <8 x half> @llvm.round.v8f16(<8 x half>)
+declare <16 x half> @llvm.round.v16f16(<16 x half>)
+declare <32 x half> @llvm.round.v32f16(<32 x half>)
+declare <64 x half> @llvm.round.v64f16(<64 x half>)
+declare <128 x half> @llvm.round.v128f16(<128 x half>)
+declare <2 x float> @llvm.round.v2f32(<2 x float>)
+declare <4 x float> @llvm.round.v4f32(<4 x float>)
+declare <8 x float> @llvm.round.v8f32(<8 x float>)
+declare <16 x float> @llvm.round.v16f32(<16 x float>)
+declare <32 x float> @llvm.round.v32f32(<32 x float>)
+declare <64 x float> @llvm.round.v64f32(<64 x float>)
+declare <1 x double> @llvm.round.v1f64(<1 x double>)
+declare <2 x double> @llvm.round.v2f64(<2 x double>)
+declare <4 x double> @llvm.round.v4f64(<4 x double>)
+declare <8 x double> @llvm.round.v8f64(<8 x double>)
+declare <16 x double> @llvm.round.v16f64(<16 x double>)
+declare <32 x double> @llvm.round.v32f64(<32 x double>)
+
+declare <2 x half> @llvm.roundeven.v2f16(<2 x half>)
+declare <4 x half> @llvm.roundeven.v4f16(<4 x half>)
+declare <8 x half> @llvm.roundeven.v8f16(<8 x half>)
+declare <16 x half> @llvm.roundeven.v16f16(<16 x half>)
+declare <32 x half> @llvm.roundeven.v32f16(<32 x half>)
+declare <64 x half> @llvm.roundeven.v64f16(<64 x half>)
+declare <128 x half> @llvm.roundeven.v128f16(<128 x half>)
+declare <2 x float> @llvm.roundeven.v2f32(<2 x float>)
+declare <4 x float> @llvm.roundeven.v4f32(<4 x float>)
+declare <8 x float> @llvm.roundeven.v8f32(<8 x float>)
+declare <16 x float> @llvm.roundeven.v16f32(<16 x float>)
+declare <32 x float> @llvm.roundeven.v32f32(<32 x float>)
+declare <64 x float> @llvm.roundeven.v64f32(<64 x float>)
+declare <1 x double> @llvm.roundeven.v1f64(<1 x double>)
+declare <2 x double> @llvm.roundeven.v2f64(<2 x double>)
+declare <4 x double> @llvm.roundeven.v4f64(<4 x double>)
+declare <8 x double> @llvm.roundeven.v8f64(<8 x double>)
+declare <16 x double> @llvm.roundeven.v16f64(<16 x double>)
+declare <32 x double> @llvm.roundeven.v32f64(<32 x double>)
+
+declare <2 x half> @llvm.trunc.v2f16(<2 x half>)
+declare <4 x half> @llvm.trunc.v4f16(<4 x half>)
+declare <8 x half> @llvm.trunc.v8f16(<8 x half>)
+declare <16 x half> @llvm.trunc.v16f16(<16 x half>)
+declare <32 x half> @llvm.trunc.v32f16(<32 x half>)
+declare <64 x half> @llvm.trunc.v64f16(<64 x half>)
+declare <128 x half> @llvm.trunc.v128f16(<128 x half>)
+declare <2 x float> @llvm.trunc.v2f32(<2 x float>)
+declare <4 x float> @llvm.trunc.v4f32(<4 x float>)
+declare <8 x float> @llvm.trunc.v8f32(<8 x float>)
+declare <16 x float> @llvm.trunc.v16f32(<16 x float>)
+declare <32 x float> @llvm.trunc.v32f32(<32 x float>)
+declare <64 x float> @llvm.trunc.v64f32(<64 x float>)
+declare <1 x double> @llvm.trunc.v1f64(<1 x double>)
+declare <2 x double> @llvm.trunc.v2f64(<2 x double>)
+declare <4 x double> @llvm.trunc.v4f64(<4 x double>)
+declare <8 x double> @llvm.trunc.v8f64(<8 x double>)
+declare <16 x double> @llvm.trunc.v16f64(<16 x double>)
+declare <32 x double> @llvm.trunc.v32f64(<32 x double>)


        


More information about the llvm-commits mailing list