[llvm] f809f97 - [AArch64][SME]: Generate streaming-compatible code for FP rounding operations.

Hassnaa Hamdi via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 24 09:45:31 PST 2022


Author: Hassnaa Hamdi
Date: 2022-11-24T17:45:17Z
New Revision: f809f97a19dde4be0ee7bf5f5042bf45bbee363f

URL: https://github.com/llvm/llvm-project/commit/f809f97a19dde4be0ee7bf5f5042bf45bbee363f
DIFF: https://github.com/llvm/llvm-project/commit/f809f97a19dde4be0ee7bf5f5042bf45bbee363f.diff

LOG: [AArch64][SME]: Generate streaming-compatible code for FP rounding operations.

1- To generate code compatible to streaming mode:
 - enable custom lowering for fcopysign, isd::fp_round, isd::fceil, isd::ffloor, isd::fnearbyint,
   isd::frint, isd::fround, isd::froundeven, isd::ftrunc.

Differential Revision: https://reviews.llvm.org/D138440

Added: 
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll

Removed: 
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fcopysign.ll


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index bdb0fa39dcf6..eb1262d0be3b 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1649,6 +1649,14 @@ void AArch64TargetLowering::addTypeForStreamingSVE(MVT VT) {
   setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
   setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
   setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
+  setOperationAction(ISD::FP_ROUND, VT, Custom);
+  setOperationAction(ISD::FCEIL, VT, Custom);
+  setOperationAction(ISD::FFLOOR, VT, Custom);
+  setOperationAction(ISD::FNEARBYINT, VT, Custom);
+  setOperationAction(ISD::FRINT, VT, Custom);
+  setOperationAction(ISD::FROUND, VT, Custom);
+  setOperationAction(ISD::FROUNDEVEN, VT, Custom);
+  setOperationAction(ISD::FTRUNC, VT, Custom);
 }
 
 void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
@@ -8324,7 +8332,8 @@ SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
     IntVT =
         getPackedSVEVectorVT(VT.getVectorElementType().changeTypeToInteger());
 
-  if (VT.isFixedLengthVector() && useSVEForFixedLengthVectorVT(VT)) {
+  if (VT.isFixedLengthVector() &&
+    useSVEForFixedLengthVectorVT(VT, Subtarget->forceStreamingCompatibleSVE())) {
     EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
 
     In1 = convertToScalableVector(DAG, ContainerVT, In1);

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
similarity index 73%
rename from llvm/test/CodeGen/AArch64/sve-streaming-mode-fcopysign.ll
rename to llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
index f1e2a2ce7442..5c60abd3b8c6 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
@@ -10,11 +10,11 @@ target triple = "aarch64-unknown-linux-gnu"
 define void @test_copysign_v4f16_v4f16(ptr %ap, ptr %bp) #0 {
 ; CHECK-LABEL: test_copysign_v4f16_v4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI0_0
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
-; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI0_0]
-; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    and z0.h, z0.h, #0x7fff
+; CHECK-NEXT:    and z1.h, z1.h, #0x8000
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %a = load <4 x half>, ptr %ap
@@ -27,11 +27,11 @@ define void @test_copysign_v4f16_v4f16(ptr %ap, ptr %bp) #0 {
 define void @test_copysign_v8f16_v8f16(ptr %ap, ptr %bp) #0 {
 ; CHECK-LABEL: test_copysign_v8f16_v8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI1_0
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
-; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI1_0]
-; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    and z0.h, z0.h, #0x7fff
+; CHECK-NEXT:    and z1.h, z1.h, #0x8000
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, ptr %ap
@@ -44,12 +44,14 @@ define void @test_copysign_v8f16_v8f16(ptr %ap, ptr %bp) #0 {
 define void @test_copysign_v16f16_v16f16(ptr %ap, ptr %bp) #0 {
 ; CHECK-LABEL: test_copysign_v16f16_v16f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI2_0
-; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    ldp q3, q4, [x1]
-; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI2_0]
-; CHECK-NEXT:    bif v0.16b, v3.16b, v2.16b
-; CHECK-NEXT:    bif v1.16b, v4.16b, v2.16b
+; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    and z0.h, z0.h, #0x8000
+; CHECK-NEXT:    ldp q2, q3, [x0]
+; CHECK-NEXT:    and z1.h, z1.h, #0x8000
+; CHECK-NEXT:    and z2.h, z2.h, #0x7fff
+; CHECK-NEXT:    orr z0.d, z2.d, z0.d
+; CHECK-NEXT:    and z3.h, z3.h, #0x7fff
+; CHECK-NEXT:    orr z1.d, z3.d, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %a = load <16 x half>, ptr %ap
@@ -64,11 +66,11 @@ define void @test_copysign_v16f16_v16f16(ptr %ap, ptr %bp) #0 {
 define void @test_copysign_v2f32_v2f32(ptr %ap, ptr %bp) #0 {
 ; CHECK-LABEL: test_copysign_v2f32_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI3_0
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
-; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI3_0]
-; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    and z0.s, z0.s, #0x7fffffff
+; CHECK-NEXT:    and z1.s, z1.s, #0x80000000
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %a = load <2 x float>, ptr %ap
@@ -81,11 +83,11 @@ define void @test_copysign_v2f32_v2f32(ptr %ap, ptr %bp) #0 {
 define void @test_copysign_v4f32_v4f32(ptr %ap, ptr %bp) #0 {
 ; CHECK-LABEL: test_copysign_v4f32_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI4_0
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
-; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI4_0]
-; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    and z0.s, z0.s, #0x7fffffff
+; CHECK-NEXT:    and z1.s, z1.s, #0x80000000
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %ap
@@ -98,12 +100,14 @@ define void @test_copysign_v4f32_v4f32(ptr %ap, ptr %bp) #0 {
 define void @test_copysign_v8f32_v8f32(ptr %ap, ptr %bp) #0 {
 ; CHECK-LABEL: test_copysign_v8f32_v8f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI5_0
-; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    ldp q3, q4, [x1]
-; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI5_0]
-; CHECK-NEXT:    bif v0.16b, v3.16b, v2.16b
-; CHECK-NEXT:    bif v1.16b, v4.16b, v2.16b
+; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    and z0.s, z0.s, #0x80000000
+; CHECK-NEXT:    ldp q2, q3, [x0]
+; CHECK-NEXT:    and z1.s, z1.s, #0x80000000
+; CHECK-NEXT:    and z2.s, z2.s, #0x7fffffff
+; CHECK-NEXT:    orr z0.d, z2.d, z0.d
+; CHECK-NEXT:    and z3.s, z3.s, #0x7fffffff
+; CHECK-NEXT:    orr z1.d, z3.d, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %a = load <8 x float>, ptr %ap
@@ -118,13 +122,11 @@ define void @test_copysign_v8f32_v8f32(ptr %ap, ptr %bp) #0 {
 define void @test_copysign_v2f64_v2f64(ptr %ap, ptr %bp) #0 {
 ; CHECK-LABEL: test_copysign_v2f64_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI6_0
 ; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    ldr q2, [x1]
-; CHECK-NEXT:    ptrue p0.d, vl2
-; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI6_0]
-; CHECK-NEXT:    fneg z1.d, p0/m, z1.d
-; CHECK-NEXT:    bif v0.16b, v2.16b, v1.16b
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    and z0.d, z0.d, #0x7fffffffffffffff
+; CHECK-NEXT:    and z1.d, z1.d, #0x8000000000000000
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, ptr %ap
@@ -137,14 +139,14 @@ define void @test_copysign_v2f64_v2f64(ptr %ap, ptr %bp) #0 {
 define void @test_copysign_v4f64_v4f64(ptr %ap, ptr %bp) #0 {
 ; CHECK-LABEL: test_copysign_v4f64_v4f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI7_0
-; CHECK-NEXT:    ptrue p0.d, vl2
-; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    ldp q3, q4, [x1]
-; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI7_0]
-; CHECK-NEXT:    fneg z2.d, p0/m, z2.d
-; CHECK-NEXT:    bif v0.16b, v3.16b, v2.16b
-; CHECK-NEXT:    bif v1.16b, v4.16b, v2.16b
+; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    and z0.d, z0.d, #0x8000000000000000
+; CHECK-NEXT:    ldp q2, q3, [x0]
+; CHECK-NEXT:    and z1.d, z1.d, #0x8000000000000000
+; CHECK-NEXT:    and z2.d, z2.d, #0x7fffffffffffffff
+; CHECK-NEXT:    orr z0.d, z2.d, z0.d
+; CHECK-NEXT:    and z3.d, z3.d, #0x7fffffffffffffff
+; CHECK-NEXT:    orr z1.d, z3.d, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %a = load <4 x double>, ptr %ap
@@ -159,13 +161,22 @@ define void @test_copysign_v4f64_v4f64(ptr %ap, ptr %bp) #0 {
 define void @test_copysign_v2f32_v2f64(ptr %ap, ptr %bp) #0 {
 ; CHECK-LABEL: test_copysign_v2f32_v2f64:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldr q0, [x1]
-; CHECK-NEXT:    adrp x8, .LCPI8_0
-; CHECK-NEXT:    ldr d1, [x0]
-; CHECK-NEXT:    fcvtn v0.2s, v0.2d
-; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI8_0]
-; CHECK-NEXT:    bit v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvt z0.s, p0/m, z0.d
+; CHECK-NEXT:    mov z1.d, z0.d[1]
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    fmov x9, d1
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    stp w8, w9, [sp, #8]
+; CHECK-NEXT:    and z0.s, z0.s, #0x7fffffff
+; CHECK-NEXT:    ldr d1, [sp, #8]
+; CHECK-NEXT:    and z1.s, z1.s, #0x80000000
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
 ; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %a = load <2 x float>, ptr %ap
   %b = load <2 x double>, ptr %bp
@@ -181,16 +192,27 @@ define void @test_copysign_v2f32_v2f64(ptr %ap, ptr %bp) #0 {
 define void @test_copysign_v4f32_v4f64(ptr %ap, ptr %bp) #0 {
 ; CHECK-LABEL: test_copysign_v4f32_v4f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldp q0, q1, [x1]
-; CHECK-NEXT:    adrp x8, .LCPI9_0
-; CHECK-NEXT:    ptrue p0.s, vl2
-; CHECK-NEXT:    fcvtn v0.2s, v0.2d
-; CHECK-NEXT:    fcvtn v1.2s, v1.2d
-; CHECK-NEXT:    ldr q2, [x0]
-; CHECK-NEXT:    ldr q3, [x8, :lo12:.LCPI9_0]
-; CHECK-NEXT:    splice z0.s, p0, z0.s, z1.s
-; CHECK-NEXT:    bit v0.16b, v2.16b, v3.16b
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldp q1, q0, [x1]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvt z1.s, p0/m, z1.d
+; CHECK-NEXT:    fmov x10, d1
+; CHECK-NEXT:    fcvt z0.s, p0/m, z0.d
+; CHECK-NEXT:    mov z2.d, z0.d[1]
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    fmov x9, d2
+; CHECK-NEXT:    mov z2.d, z1.d[1]
+; CHECK-NEXT:    fmov x11, d2
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    stp w8, w9, [sp, #8]
+; CHECK-NEXT:    stp w10, w11, [sp]
+; CHECK-NEXT:    and z0.s, z0.s, #0x7fffffff
+; CHECK-NEXT:    ldr q1, [sp]
+; CHECK-NEXT:    and z1.s, z1.s, #0x80000000
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
 ; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %ap
   %b = load <4 x double>, ptr %bp
@@ -208,18 +230,16 @@ define void @test_copysign_v2f64_v2f32(ptr %ap, ptr %bp) #0 {
 ; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldr s0, [x1, #4]
-; CHECK-NEXT:    adrp x8, .LCPI10_0
 ; CHECK-NEXT:    ldr q1, [x0]
-; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    and z1.d, z1.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    str d0, [sp, #8]
 ; CHECK-NEXT:    ldr s0, [x1]
 ; CHECK-NEXT:    fcvt d0, s0
 ; CHECK-NEXT:    str d0, [sp]
-; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI10_0]
-; CHECK-NEXT:    ldr q2, [sp]
-; CHECK-NEXT:    fneg z0.d, p0/m, z0.d
-; CHECK-NEXT:    bsl v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ldr q0, [sp]
+; CHECK-NEXT:    and z0.d, z0.d, #0x8000000000000000
+; CHECK-NEXT:    orr z0.d, z1.d, z0.d
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
@@ -240,14 +260,12 @@ define void @test_copysign_v4f64_v4f32(ptr %ap, ptr %bp) #0 {
 ; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    ldr s0, [x1, #12]
-; CHECK-NEXT:    adrp x8, .LCPI11_0
 ; CHECK-NEXT:    ldp q2, q1, [x0]
-; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    and z2.d, z2.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    str d0, [sp, #24]
-; CHECK-NEXT:    ldr q3, [x8, :lo12:.LCPI11_0]
+; CHECK-NEXT:    and z1.d, z1.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    ldr s0, [x1, #8]
-; CHECK-NEXT:    fneg z3.d, p0/m, z3.d
 ; CHECK-NEXT:    fcvt d0, s0
 ; CHECK-NEXT:    str d0, [sp, #16]
 ; CHECK-NEXT:    ldr s0, [x1, #4]
@@ -256,10 +274,11 @@ define void @test_copysign_v4f64_v4f32(ptr %ap, ptr %bp) #0 {
 ; CHECK-NEXT:    ldr s0, [x1]
 ; CHECK-NEXT:    fcvt d0, s0
 ; CHECK-NEXT:    str d0, [sp]
-; CHECK-NEXT:    ldp q4, q0, [sp]
-; CHECK-NEXT:    bit v0.16b, v1.16b, v3.16b
-; CHECK-NEXT:    mov v1.16b, v3.16b
-; CHECK-NEXT:    bsl v1.16b, v2.16b, v4.16b
+; CHECK-NEXT:    ldp q3, q0, [sp]
+; CHECK-NEXT:    and z3.d, z3.d, #0x8000000000000000
+; CHECK-NEXT:    and z0.d, z0.d, #0x8000000000000000
+; CHECK-NEXT:    orr z0.d, z1.d, z0.d
+; CHECK-NEXT:    orr z1.d, z2.d, z3.d
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
@@ -281,22 +300,22 @@ define void @test_copysign_v4f16_v4f32(ptr %ap, ptr %bp) #0 {
 ; CHECK-NEXT:    ldr q0, [x1]
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fcvt z0.h, p0/m, z0.s
-; CHECK-NEXT:    mov z1.s, z0.s[3]
 ; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    fmov w9, s1
+; CHECK-NEXT:    mov z1.s, z0.s[3]
 ; CHECK-NEXT:    mov z2.s, z0.s[2]
 ; CHECK-NEXT:    mov z0.s, z0.s[1]
+; CHECK-NEXT:    fmov w9, s1
 ; CHECK-NEXT:    ldr d1, [x0]
 ; CHECK-NEXT:    fmov w10, s2
 ; CHECK-NEXT:    strh w8, [sp, #8]
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    strh w9, [sp, #14]
-; CHECK-NEXT:    adrp x9, .LCPI12_0
+; CHECK-NEXT:    and z1.h, z1.h, #0x7fff
 ; CHECK-NEXT:    strh w10, [sp, #12]
 ; CHECK-NEXT:    strh w8, [sp, #10]
 ; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    ldr d2, [x9, :lo12:.LCPI12_0]
-; CHECK-NEXT:    bit v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    and z0.h, z0.h, #0x8000
+; CHECK-NEXT:    orr z0.d, z1.d, z0.d
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
@@ -314,7 +333,6 @@ define void @test_copysign_v4f16_v4f64(ptr %ap, ptr %bp) #0 {
 ; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldp q1, q0, [x1]
-; CHECK-NEXT:    adrp x8, .LCPI13_0
 ; CHECK-NEXT:    fcvt h3, d1
 ; CHECK-NEXT:    mov z1.d, z1.d[1]
 ; CHECK-NEXT:    fcvt h1, d1
@@ -324,11 +342,12 @@ define void @test_copysign_v4f16_v4f64(ptr %ap, ptr %bp) #0 {
 ; CHECK-NEXT:    ldr d4, [x0]
 ; CHECK-NEXT:    str h3, [sp, #8]
 ; CHECK-NEXT:    str h1, [sp, #10]
-; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI13_0]
 ; CHECK-NEXT:    str h2, [sp, #12]
+; CHECK-NEXT:    and z4.h, z4.h, #0x7fff
 ; CHECK-NEXT:    str h0, [sp, #14]
 ; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    bit v0.8b, v4.8b, v1.8b
+; CHECK-NEXT:    and z0.h, z0.h, #0x8000
+; CHECK-NEXT:    orr z0.d, z4.d, z0.d
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
@@ -355,29 +374,29 @@ define void @test_copysign_v8f16_v8f32(ptr %ap, ptr %bp) #0 {
 ; CHECK-NEXT:    fcvt z1.h, p0/m, z1.s
 ; CHECK-NEXT:    mov z6.s, z0.s[1]
 ; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov z3.s, z1.s[2]
 ; CHECK-NEXT:    mov z2.s, z1.s[3]
+; CHECK-NEXT:    mov z3.s, z1.s[2]
 ; CHECK-NEXT:    mov z4.s, z1.s[1]
 ; CHECK-NEXT:    mov z1.s, z0.s[3]
+; CHECK-NEXT:    fmov w10, s2
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    strh w8, [sp, #8]
 ; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    fmov w10, s2
 ; CHECK-NEXT:    strh w9, [sp]
 ; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    strh w8, [sp, #12]
-; CHECK-NEXT:    fmov w8, s5
 ; CHECK-NEXT:    strh w10, [sp, #14]
 ; CHECK-NEXT:    fmov w10, s1
+; CHECK-NEXT:    and z0.h, z0.h, #0x7fff
+; CHECK-NEXT:    strh w8, [sp, #12]
+; CHECK-NEXT:    fmov w8, s5
 ; CHECK-NEXT:    strh w9, [sp, #10]
 ; CHECK-NEXT:    fmov w9, s6
-; CHECK-NEXT:    strh w8, [sp, #4]
-; CHECK-NEXT:    adrp x8, .LCPI14_0
 ; CHECK-NEXT:    strh w10, [sp, #6]
+; CHECK-NEXT:    strh w8, [sp, #4]
 ; CHECK-NEXT:    strh w9, [sp, #2]
 ; CHECK-NEXT:    ldr q1, [sp]
-; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI14_0]
-; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    and z1.h, z1.h, #0x8000
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll
index 59ae6d0db876..7264e776e0f0 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll
@@ -10,7 +10,10 @@ target triple = "aarch64-unknown-linux-gnu"
 define <2 x half> @frintp_v2f16(<2 x half> %op) #0 {
 ; CHECK-LABEL: frintp_v2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintp v0.4h, v0.4h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    frintp z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x half> @llvm.ceil.v2f16(<2 x half> %op)
   ret <2 x half> %res
@@ -19,7 +22,10 @@ define <2 x half> @frintp_v2f16(<2 x half> %op) #0 {
 define <4 x half> @frintp_v4f16(<4 x half> %op) #0 {
 ; CHECK-LABEL: frintp_v4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintp v0.4h, v0.4h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    frintp z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <4 x half> @llvm.ceil.v4f16(<4 x half> %op)
   ret <4 x half> %res
@@ -28,7 +34,10 @@ define <4 x half> @frintp_v4f16(<4 x half> %op) #0 {
 define <8 x half> @frintp_v8f16(<8 x half> %op) #0 {
 ; CHECK-LABEL: frintp_v8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintp v0.8h, v0.8h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    frintp z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <8 x half> @llvm.ceil.v8f16(<8 x half> %op)
   ret <8 x half> %res
@@ -38,8 +47,9 @@ define void @frintp_v16f16(<16 x half>* %a) #0 {
 ; CHECK-LABEL: frintp_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frintp v0.8h, v0.8h
-; CHECK-NEXT:    frintp v1.8h, v1.8h
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    frintp z0.h, p0/m, z0.h
+; CHECK-NEXT:    frintp z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <16 x half>, <16 x half>* %a
@@ -51,7 +61,10 @@ define void @frintp_v16f16(<16 x half>* %a) #0 {
 define <2 x float> @frintp_v2f32(<2 x float> %op) #0 {
 ; CHECK-LABEL: frintp_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintp v0.2s, v0.2s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    frintp z0.s, p0/m, z0.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x float> @llvm.ceil.v2f32(<2 x float> %op)
   ret <2 x float> %res
@@ -60,7 +73,10 @@ define <2 x float> @frintp_v2f32(<2 x float> %op) #0 {
 define <4 x float> @frintp_v4f32(<4 x float> %op) #0 {
 ; CHECK-LABEL: frintp_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintp v0.4s, v0.4s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    frintp z0.s, p0/m, z0.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <4 x float> @llvm.ceil.v4f32(<4 x float> %op)
   ret <4 x float> %res
@@ -70,8 +86,9 @@ define void @frintp_v8f32(<8 x float>* %a) #0 {
 ; CHECK-LABEL: frintp_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frintp v0.4s, v0.4s
-; CHECK-NEXT:    frintp v1.4s, v1.4s
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    frintp z0.s, p0/m, z0.s
+; CHECK-NEXT:    frintp z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <8 x float>, <8 x float>* %a
@@ -93,7 +110,10 @@ define <1 x double> @frintp_v1f64(<1 x double> %op) #0 {
 define <2 x double> @frintp_v2f64(<2 x double> %op) #0 {
 ; CHECK-LABEL: frintp_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintp v0.2d, v0.2d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    frintp z0.d, p0/m, z0.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x double> @llvm.ceil.v2f64(<2 x double> %op)
   ret <2 x double> %res
@@ -103,8 +123,9 @@ define void @frintp_v4f64(<4 x double>* %a) #0 {
 ; CHECK-LABEL: frintp_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frintp v0.2d, v0.2d
-; CHECK-NEXT:    frintp v1.2d, v1.2d
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    frintp z0.d, p0/m, z0.d
+; CHECK-NEXT:    frintp z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <4 x double>, <4 x double>* %a
@@ -120,7 +141,10 @@ define void @frintp_v4f64(<4 x double>* %a) #0 {
 define <2 x half> @frintm_v2f16(<2 x half> %op) #0 {
 ; CHECK-LABEL: frintm_v2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintm v0.4h, v0.4h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    frintm z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x half> @llvm.floor.v2f16(<2 x half> %op)
   ret <2 x half> %res
@@ -129,7 +153,10 @@ define <2 x half> @frintm_v2f16(<2 x half> %op) #0 {
 define <4 x half> @frintm_v4f16(<4 x half> %op) #0 {
 ; CHECK-LABEL: frintm_v4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintm v0.4h, v0.4h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    frintm z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <4 x half> @llvm.floor.v4f16(<4 x half> %op)
   ret <4 x half> %res
@@ -138,7 +165,10 @@ define <4 x half> @frintm_v4f16(<4 x half> %op) #0 {
 define <8 x half> @frintm_v8f16(<8 x half> %op) #0 {
 ; CHECK-LABEL: frintm_v8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintm v0.8h, v0.8h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    frintm z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <8 x half> @llvm.floor.v8f16(<8 x half> %op)
   ret <8 x half> %res
@@ -148,8 +178,9 @@ define void @frintm_v16f16(<16 x half>* %a) #0 {
 ; CHECK-LABEL: frintm_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frintm v0.8h, v0.8h
-; CHECK-NEXT:    frintm v1.8h, v1.8h
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    frintm z0.h, p0/m, z0.h
+; CHECK-NEXT:    frintm z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <16 x half>, <16 x half>* %a
@@ -161,7 +192,10 @@ define void @frintm_v16f16(<16 x half>* %a) #0 {
 define <2 x float> @frintm_v2f32(<2 x float> %op) #0 {
 ; CHECK-LABEL: frintm_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintm v0.2s, v0.2s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    frintm z0.s, p0/m, z0.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x float> @llvm.floor.v2f32(<2 x float> %op)
   ret <2 x float> %res
@@ -170,7 +204,10 @@ define <2 x float> @frintm_v2f32(<2 x float> %op) #0 {
 define <4 x float> @frintm_v4f32(<4 x float> %op) #0 {
 ; CHECK-LABEL: frintm_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintm v0.4s, v0.4s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    frintm z0.s, p0/m, z0.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <4 x float> @llvm.floor.v4f32(<4 x float> %op)
   ret <4 x float> %res
@@ -180,8 +217,9 @@ define void @frintm_v8f32(<8 x float>* %a) #0 {
 ; CHECK-LABEL: frintm_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frintm v0.4s, v0.4s
-; CHECK-NEXT:    frintm v1.4s, v1.4s
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    frintm z0.s, p0/m, z0.s
+; CHECK-NEXT:    frintm z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <8 x float>, <8 x float>* %a
@@ -203,7 +241,10 @@ define <1 x double> @frintm_v1f64(<1 x double> %op) #0 {
 define <2 x double> @frintm_v2f64(<2 x double> %op) #0 {
 ; CHECK-LABEL: frintm_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintm v0.2d, v0.2d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    frintm z0.d, p0/m, z0.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x double> @llvm.floor.v2f64(<2 x double> %op)
   ret <2 x double> %res
@@ -213,8 +254,9 @@ define void @frintm_v4f64(<4 x double>* %a) #0 {
 ; CHECK-LABEL: frintm_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frintm v0.2d, v0.2d
-; CHECK-NEXT:    frintm v1.2d, v1.2d
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    frintm z0.d, p0/m, z0.d
+; CHECK-NEXT:    frintm z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <4 x double>, <4 x double>* %a
@@ -230,7 +272,10 @@ define void @frintm_v4f64(<4 x double>* %a) #0 {
 define <2 x half> @frinti_v2f16(<2 x half> %op) #0 {
 ; CHECK-LABEL: frinti_v2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frinti v0.4h, v0.4h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    frinti z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x half> @llvm.nearbyint.v2f16(<2 x half> %op)
   ret <2 x half> %res
@@ -239,7 +284,10 @@ define <2 x half> @frinti_v2f16(<2 x half> %op) #0 {
 define <4 x half> @frinti_v4f16(<4 x half> %op) #0 {
 ; CHECK-LABEL: frinti_v4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frinti v0.4h, v0.4h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    frinti z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <4 x half> @llvm.nearbyint.v4f16(<4 x half> %op)
   ret <4 x half> %res
@@ -248,7 +296,10 @@ define <4 x half> @frinti_v4f16(<4 x half> %op) #0 {
 define <8 x half> @frinti_v8f16(<8 x half> %op) #0 {
 ; CHECK-LABEL: frinti_v8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frinti v0.8h, v0.8h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    frinti z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <8 x half> @llvm.nearbyint.v8f16(<8 x half> %op)
   ret <8 x half> %res
@@ -258,8 +309,9 @@ define void @frinti_v16f16(<16 x half>* %a) #0 {
 ; CHECK-LABEL: frinti_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frinti v0.8h, v0.8h
-; CHECK-NEXT:    frinti v1.8h, v1.8h
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    frinti z0.h, p0/m, z0.h
+; CHECK-NEXT:    frinti z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <16 x half>, <16 x half>* %a
@@ -271,7 +323,10 @@ define void @frinti_v16f16(<16 x half>* %a) #0 {
 define <2 x float> @frinti_v2f32(<2 x float> %op) #0 {
 ; CHECK-LABEL: frinti_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frinti v0.2s, v0.2s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    frinti z0.s, p0/m, z0.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x float> @llvm.nearbyint.v2f32(<2 x float> %op)
   ret <2 x float> %res
@@ -280,7 +335,10 @@ define <2 x float> @frinti_v2f32(<2 x float> %op) #0 {
 define <4 x float> @frinti_v4f32(<4 x float> %op) #0 {
 ; CHECK-LABEL: frinti_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frinti v0.4s, v0.4s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    frinti z0.s, p0/m, z0.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %op)
   ret <4 x float> %res
@@ -290,8 +348,9 @@ define void @frinti_v8f32(<8 x float>* %a) #0 {
 ; CHECK-LABEL: frinti_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frinti v0.4s, v0.4s
-; CHECK-NEXT:    frinti v1.4s, v1.4s
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    frinti z0.s, p0/m, z0.s
+; CHECK-NEXT:    frinti z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <8 x float>, <8 x float>* %a
@@ -313,7 +372,10 @@ define <1 x double> @frinti_v1f64(<1 x double> %op) #0 {
 define <2 x double> @frinti_v2f64(<2 x double> %op) #0 {
 ; CHECK-LABEL: frinti_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frinti v0.2d, v0.2d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    frinti z0.d, p0/m, z0.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %op)
   ret <2 x double> %res
@@ -323,8 +385,9 @@ define void @frinti_v4f64(<4 x double>* %a) #0 {
 ; CHECK-LABEL: frinti_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frinti v0.2d, v0.2d
-; CHECK-NEXT:    frinti v1.2d, v1.2d
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    frinti z0.d, p0/m, z0.d
+; CHECK-NEXT:    frinti z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <4 x double>, <4 x double>* %a
@@ -340,7 +403,10 @@ define void @frinti_v4f64(<4 x double>* %a) #0 {
 define <2 x half> @frintx_v2f16(<2 x half> %op) #0 {
 ; CHECK-LABEL: frintx_v2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintx v0.4h, v0.4h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x half> @llvm.rint.v2f16(<2 x half> %op)
   ret <2 x half> %res
@@ -349,7 +415,10 @@ define <2 x half> @frintx_v2f16(<2 x half> %op) #0 {
 define <4 x half> @frintx_v4f16(<4 x half> %op) #0 {
 ; CHECK-LABEL: frintx_v4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintx v0.4h, v0.4h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <4 x half> @llvm.rint.v4f16(<4 x half> %op)
   ret <4 x half> %res
@@ -358,7 +427,10 @@ define <4 x half> @frintx_v4f16(<4 x half> %op) #0 {
 define <8 x half> @frintx_v8f16(<8 x half> %op) #0 {
 ; CHECK-LABEL: frintx_v8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintx v0.8h, v0.8h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <8 x half> @llvm.rint.v8f16(<8 x half> %op)
   ret <8 x half> %res
@@ -368,8 +440,9 @@ define void @frintx_v16f16(<16 x half>* %a) #0 {
 ; CHECK-LABEL: frintx_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frintx v0.8h, v0.8h
-; CHECK-NEXT:    frintx v1.8h, v1.8h
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
+; CHECK-NEXT:    frintx z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <16 x half>, <16 x half>* %a
@@ -381,7 +454,10 @@ define void @frintx_v16f16(<16 x half>* %a) #0 {
 define <2 x float> @frintx_v2f32(<2 x float> %op) #0 {
 ; CHECK-LABEL: frintx_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintx v0.2s, v0.2s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    frintx z0.s, p0/m, z0.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x float> @llvm.rint.v2f32(<2 x float> %op)
   ret <2 x float> %res
@@ -390,7 +466,10 @@ define <2 x float> @frintx_v2f32(<2 x float> %op) #0 {
 define <4 x float> @frintx_v4f32(<4 x float> %op) #0 {
 ; CHECK-LABEL: frintx_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintx v0.4s, v0.4s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    frintx z0.s, p0/m, z0.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <4 x float> @llvm.rint.v4f32(<4 x float> %op)
   ret <4 x float> %res
@@ -400,8 +479,9 @@ define void @frintx_v8f32(<8 x float>* %a) #0 {
 ; CHECK-LABEL: frintx_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frintx v0.4s, v0.4s
-; CHECK-NEXT:    frintx v1.4s, v1.4s
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    frintx z0.s, p0/m, z0.s
+; CHECK-NEXT:    frintx z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <8 x float>, <8 x float>* %a
@@ -423,7 +503,10 @@ define <1 x double> @frintx_v1f64(<1 x double> %op) #0 {
 define <2 x double> @frintx_v2f64(<2 x double> %op) #0 {
 ; CHECK-LABEL: frintx_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintx v0.2d, v0.2d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x double> @llvm.rint.v2f64(<2 x double> %op)
   ret <2 x double> %res
@@ -433,8 +516,9 @@ define void @frintx_v4f64(<4 x double>* %a) #0 {
 ; CHECK-LABEL: frintx_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frintx v0.2d, v0.2d
-; CHECK-NEXT:    frintx v1.2d, v1.2d
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
+; CHECK-NEXT:    frintx z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <4 x double>, <4 x double>* %a
@@ -450,7 +534,10 @@ define void @frintx_v4f64(<4 x double>* %a) #0 {
 define <2 x half> @frinta_v2f16(<2 x half> %op) #0 {
 ; CHECK-LABEL: frinta_v2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frinta v0.4h, v0.4h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    frinta z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x half> @llvm.round.v2f16(<2 x half> %op)
   ret <2 x half> %res
@@ -459,7 +546,10 @@ define <2 x half> @frinta_v2f16(<2 x half> %op) #0 {
 define <4 x half> @frinta_v4f16(<4 x half> %op) #0 {
 ; CHECK-LABEL: frinta_v4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frinta v0.4h, v0.4h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    frinta z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <4 x half> @llvm.round.v4f16(<4 x half> %op)
   ret <4 x half> %res
@@ -468,7 +558,10 @@ define <4 x half> @frinta_v4f16(<4 x half> %op) #0 {
 define <8 x half> @frinta_v8f16(<8 x half> %op) #0 {
 ; CHECK-LABEL: frinta_v8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frinta v0.8h, v0.8h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    frinta z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <8 x half> @llvm.round.v8f16(<8 x half> %op)
   ret <8 x half> %res
@@ -478,8 +571,9 @@ define void @frinta_v16f16(<16 x half>* %a) #0 {
 ; CHECK-LABEL: frinta_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frinta v0.8h, v0.8h
-; CHECK-NEXT:    frinta v1.8h, v1.8h
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    frinta z0.h, p0/m, z0.h
+; CHECK-NEXT:    frinta z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <16 x half>, <16 x half>* %a
@@ -491,7 +585,10 @@ define void @frinta_v16f16(<16 x half>* %a) #0 {
 define <2 x float> @frinta_v2f32(<2 x float> %op) #0 {
 ; CHECK-LABEL: frinta_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frinta v0.2s, v0.2s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    frinta z0.s, p0/m, z0.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x float> @llvm.round.v2f32(<2 x float> %op)
   ret <2 x float> %res
@@ -500,7 +597,10 @@ define <2 x float> @frinta_v2f32(<2 x float> %op) #0 {
 define <4 x float> @frinta_v4f32(<4 x float> %op) #0 {
 ; CHECK-LABEL: frinta_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frinta v0.4s, v0.4s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    frinta z0.s, p0/m, z0.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <4 x float> @llvm.round.v4f32(<4 x float> %op)
   ret <4 x float> %res
@@ -510,8 +610,9 @@ define void @frinta_v8f32(<8 x float>* %a) #0 {
 ; CHECK-LABEL: frinta_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frinta v0.4s, v0.4s
-; CHECK-NEXT:    frinta v1.4s, v1.4s
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    frinta z0.s, p0/m, z0.s
+; CHECK-NEXT:    frinta z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <8 x float>, <8 x float>* %a
@@ -533,7 +634,10 @@ define <1 x double> @frinta_v1f64(<1 x double> %op) #0 {
 define <2 x double> @frinta_v2f64(<2 x double> %op) #0 {
 ; CHECK-LABEL: frinta_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frinta v0.2d, v0.2d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    frinta z0.d, p0/m, z0.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x double> @llvm.round.v2f64(<2 x double> %op)
   ret <2 x double> %res
@@ -543,8 +647,9 @@ define void @frinta_v4f64(<4 x double>* %a) #0 {
 ; CHECK-LABEL: frinta_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frinta v0.2d, v0.2d
-; CHECK-NEXT:    frinta v1.2d, v1.2d
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    frinta z0.d, p0/m, z0.d
+; CHECK-NEXT:    frinta z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <4 x double>, <4 x double>* %a
@@ -560,7 +665,10 @@ define void @frinta_v4f64(<4 x double>* %a) #0 {
 define <2 x half> @frintn_v2f16(<2 x half> %op) #0 {
 ; CHECK-LABEL: frintn_v2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn v0.4h, v0.4h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    frintn z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x half> @llvm.roundeven.v2f16(<2 x half> %op)
   ret <2 x half> %res
@@ -569,7 +677,10 @@ define <2 x half> @frintn_v2f16(<2 x half> %op) #0 {
 define <4 x half> @frintn_v4f16(<4 x half> %op) #0 {
 ; CHECK-LABEL: frintn_v4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn v0.4h, v0.4h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    frintn z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %op)
   ret <4 x half> %res
@@ -578,7 +689,10 @@ define <4 x half> @frintn_v4f16(<4 x half> %op) #0 {
 define <8 x half> @frintn_v8f16(<8 x half> %op) #0 {
 ; CHECK-LABEL: frintn_v8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn v0.8h, v0.8h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    frintn z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <8 x half> @llvm.roundeven.v8f16(<8 x half> %op)
   ret <8 x half> %res
@@ -588,8 +702,9 @@ define void @frintn_v16f16(<16 x half>* %a) #0 {
 ; CHECK-LABEL: frintn_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frintn v0.8h, v0.8h
-; CHECK-NEXT:    frintn v1.8h, v1.8h
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    frintn z0.h, p0/m, z0.h
+; CHECK-NEXT:    frintn z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <16 x half>, <16 x half>* %a
@@ -601,7 +716,10 @@ define void @frintn_v16f16(<16 x half>* %a) #0 {
 define <2 x float> @frintn_v2f32(<2 x float> %op) #0 {
 ; CHECK-LABEL: frintn_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn v0.2s, v0.2s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    frintn z0.s, p0/m, z0.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %op)
   ret <2 x float> %res
@@ -610,7 +728,10 @@ define <2 x float> @frintn_v2f32(<2 x float> %op) #0 {
 define <4 x float> @frintn_v4f32(<4 x float> %op) #0 {
 ; CHECK-LABEL: frintn_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn v0.4s, v0.4s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    frintn z0.s, p0/m, z0.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %op)
   ret <4 x float> %res
@@ -620,8 +741,9 @@ define void @frintn_v8f32(<8 x float>* %a) #0 {
 ; CHECK-LABEL: frintn_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frintn v0.4s, v0.4s
-; CHECK-NEXT:    frintn v1.4s, v1.4s
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    frintn z0.s, p0/m, z0.s
+; CHECK-NEXT:    frintn z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <8 x float>, <8 x float>* %a
@@ -643,7 +765,10 @@ define <1 x double> @frintn_v1f64(<1 x double> %op) #0 {
 define <2 x double> @frintn_v2f64(<2 x double> %op) #0 {
 ; CHECK-LABEL: frintn_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintn v0.2d, v0.2d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    frintn z0.d, p0/m, z0.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %op)
   ret <2 x double> %res
@@ -653,8 +778,9 @@ define void @frintn_v4f64(<4 x double>* %a) #0 {
 ; CHECK-LABEL: frintn_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frintn v0.2d, v0.2d
-; CHECK-NEXT:    frintn v1.2d, v1.2d
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    frintn z0.d, p0/m, z0.d
+; CHECK-NEXT:    frintn z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <4 x double>, <4 x double>* %a
@@ -670,7 +796,10 @@ define void @frintn_v4f64(<4 x double>* %a) #0 {
 define <2 x half> @frintz_v2f16(<2 x half> %op) #0 {
 ; CHECK-LABEL: frintz_v2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintz v0.4h, v0.4h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    frintz z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x half> @llvm.trunc.v2f16(<2 x half> %op)
   ret <2 x half> %res
@@ -679,7 +808,10 @@ define <2 x half> @frintz_v2f16(<2 x half> %op) #0 {
 define <4 x half> @frintz_v4f16(<4 x half> %op) #0 {
 ; CHECK-LABEL: frintz_v4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintz v0.4h, v0.4h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    frintz z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <4 x half> @llvm.trunc.v4f16(<4 x half> %op)
   ret <4 x half> %res
@@ -688,7 +820,10 @@ define <4 x half> @frintz_v4f16(<4 x half> %op) #0 {
 define <8 x half> @frintz_v8f16(<8 x half> %op) #0 {
 ; CHECK-LABEL: frintz_v8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintz v0.8h, v0.8h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    frintz z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <8 x half> @llvm.trunc.v8f16(<8 x half> %op)
   ret <8 x half> %res
@@ -698,8 +833,9 @@ define void @frintz_v16f16(<16 x half>* %a) #0 {
 ; CHECK-LABEL: frintz_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frintz v0.8h, v0.8h
-; CHECK-NEXT:    frintz v1.8h, v1.8h
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    frintz z0.h, p0/m, z0.h
+; CHECK-NEXT:    frintz z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <16 x half>, <16 x half>* %a
@@ -711,7 +847,10 @@ define void @frintz_v16f16(<16 x half>* %a) #0 {
 define <2 x float> @frintz_v2f32(<2 x float> %op) #0 {
 ; CHECK-LABEL: frintz_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintz v0.2s, v0.2s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    frintz z0.s, p0/m, z0.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x float> @llvm.trunc.v2f32(<2 x float> %op)
   ret <2 x float> %res
@@ -720,7 +859,10 @@ define <2 x float> @frintz_v2f32(<2 x float> %op) #0 {
 define <4 x float> @frintz_v4f32(<4 x float> %op) #0 {
 ; CHECK-LABEL: frintz_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintz v0.4s, v0.4s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    frintz z0.s, p0/m, z0.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <4 x float> @llvm.trunc.v4f32(<4 x float> %op)
   ret <4 x float> %res
@@ -730,8 +872,9 @@ define void @frintz_v8f32(<8 x float>* %a) #0 {
 ; CHECK-LABEL: frintz_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frintz v0.4s, v0.4s
-; CHECK-NEXT:    frintz v1.4s, v1.4s
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    frintz z0.s, p0/m, z0.s
+; CHECK-NEXT:    frintz z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <8 x float>, <8 x float>* %a
@@ -753,7 +896,10 @@ define <1 x double> @frintz_v1f64(<1 x double> %op) #0 {
 define <2 x double> @frintz_v2f64(<2 x double> %op) #0 {
 ; CHECK-LABEL: frintz_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    frintz v0.2d, v0.2d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    frintz z0.d, p0/m, z0.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = call <2 x double> @llvm.trunc.v2f64(<2 x double> %op)
   ret <2 x double> %res
@@ -763,8 +909,9 @@ define void @frintz_v4f64(<4 x double>* %a) #0 {
 ; CHECK-LABEL: frintz_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    frintz v0.2d, v0.2d
-; CHECK-NEXT:    frintz v1.2d, v1.2d
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    frintz z0.d, p0/m, z0.d
+; CHECK-NEXT:    frintz z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op = load <4 x double>, <4 x double>* %a


        


More information about the llvm-commits mailing list