[llvm] 3ba161b - [NFC][LLVM][CodeGen][SVE] Add merging unary operation tests.
Paul Walker via llvm-commits
llvm-commits at lists.llvm.org
Thu Jul 3 07:47:00 PDT 2025
Author: Paul Walker
Date: 2025-07-03T14:46:17Z
New Revision: 3ba161bb753000400caeff6e1ca6b8a860d0dacc
URL: https://github.com/llvm/llvm-project/commit/3ba161bb753000400caeff6e1ca6b8a860d0dacc
DIFF: https://github.com/llvm/llvm-project/commit/3ba161bb753000400caeff6e1ca6b8a860d0dacc.diff
LOG: [NFC][LLVM][CodeGen][SVE] Add merging unary operation tests.
Added:
llvm/test/CodeGen/AArch64/sve-merging-unary.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/AArch64/sve-merging-unary.ll b/llvm/test/CodeGen/AArch64/sve-merging-unary.ll
new file mode 100644
index 0000000000000..9e331a69bcf7c
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-merging-unary.ll
@@ -0,0 +1,1748 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mattr=+sve,+bf16 < %s | FileCheck %s
+; RUN: llc -mattr=+sme2 -force-streaming < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define <vscale x 16 x i8> @abs_nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: abs_nxv16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.b
+; CHECK-NEXT: abs z1.b, p1/m, z1.b
+; CHECK-NEXT: mov z0.b, p0/m, z1.b
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %b, i1 0)
+ %res = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b.op, <vscale x 16 x i8> %a
+ ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @abs_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: abs_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: abs z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %b, i1 0)
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @abs_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: abs_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: abs z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %b, i1 0)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @abs_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: abs_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: abs z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %b, i1 0)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
+ ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 16 x i8> @clz_nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: clz_nxv16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.b
+; CHECK-NEXT: clz z1.b, p1/m, z1.b
+; CHECK-NEXT: mov z0.b, p0/m, z1.b
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 16 x i8> @llvm.ctlz.nxv16i8(<vscale x 16 x i8> %b)
+ %res = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b.op, <vscale x 16 x i8> %a
+ ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @clz_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: clz_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: clz z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 8 x i16> @llvm.ctlz.nxv8i16(<vscale x 8 x i16> %b)
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @clz_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: clz_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: clz z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x i32> @llvm.ctlz.nxv4i32(<vscale x 4 x i32> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @clz_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: clz_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: clz z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x i64> @llvm.ctlz.nxv2i64(<vscale x 2 x i64> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
+ ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 16 x i8> @cnt_nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: cnt_nxv16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.b
+; CHECK-NEXT: cnt z1.b, p1/m, z1.b
+; CHECK-NEXT: mov z0.b, p0/m, z1.b
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 16 x i8> @llvm.ctpop.nxv16i8(<vscale x 16 x i8> %b)
+ %res = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b.op, <vscale x 16 x i8> %a
+ ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @cnt_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: cnt_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: cnt z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 8 x i16> @llvm.ctpop.nxv8i16(<vscale x 8 x i16> %b)
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @cnt_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: cnt_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: cnt z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x i32> @llvm.ctpop.nxv4i32(<vscale x 4 x i32> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @cnt_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: cnt_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: cnt z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x i64> @llvm.ctpop.nxv2i64(<vscale x 2 x i64> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
+ ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 2 x half> @fabs_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
+; CHECK-LABEL: fabs_nxv2f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fabs z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x half> @llvm.fabs.nxv2f16(<vscale x 2 x half> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
+ ret <vscale x 2 x half> %res
+}
+
+define <vscale x 4 x half> @fabs_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
+; CHECK-LABEL: fabs_nxv4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: fabs z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x half> @llvm.fabs.nxv4f16(<vscale x 4 x half> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
+ ret <vscale x 4 x half> %res
+}
+
+define <vscale x 8 x half> @fabs_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: fabs_nxv8f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: fabs z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 8 x half> @llvm.fabs.nxv8f16(<vscale x 8 x half> %b)
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
+ ret <vscale x 8 x half> %res
+}
+
+define <vscale x 2 x float> @fabs_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
+; CHECK-LABEL: fabs_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fabs z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x float> @llvm.fabs.nxv2f32(<vscale x 2 x float> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
+ ret <vscale x 2 x float> %res
+}
+
+define <vscale x 4 x float> @fabs_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: fabs_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: fabs z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
+ ret <vscale x 4 x float> %res
+}
+
+define <vscale x 2 x double> @fabs_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: fabs_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fabs z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
+ ret <vscale x 2 x double> %res
+}
+
+define <vscale x 2 x bfloat> @fabs_nxv2bf16(<vscale x 2 x i1> %pg, <vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b) {
+; CHECK-LABEL: fabs_nxv2bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and z1.h, z1.h, #0x7fff
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x bfloat> @llvm.fabs.nxv2bf16(<vscale x 2 x bfloat> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x bfloat> %b.op, <vscale x 2 x bfloat> %a
+ ret <vscale x 2 x bfloat> %res
+}
+
+define <vscale x 4 x bfloat> @fabs_nxv4bf16(<vscale x 4 x i1> %pg, <vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) {
+; CHECK-LABEL: fabs_nxv4bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and z1.h, z1.h, #0x7fff
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x bfloat> @llvm.fabs.nxv4bf16(<vscale x 4 x bfloat> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x bfloat> %b.op, <vscale x 4 x bfloat> %a
+ ret <vscale x 4 x bfloat> %res
+}
+
+define <vscale x 8 x bfloat> @fabs_nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
+; CHECK-LABEL: fabs_nxv8bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and z1.h, z1.h, #0x7fff
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 8 x bfloat> @llvm.fabs.nxv8bf16(<vscale x 8 x bfloat> %b)
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %b.op, <vscale x 8 x bfloat> %a
+ ret <vscale x 8 x bfloat> %res
+}
+
+define <vscale x 2 x float> @fcvt_nxv2f16_to_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x half> %b) {
+; CHECK-LABEL: fcvt_nxv2f16_to_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fcvt z1.s, p1/m, z1.h
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fpext <vscale x 2 x half> %b to <vscale x 2 x float>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
+ ret <vscale x 2 x float> %res
+}
+
+define <vscale x 2 x double> @fcvt_nxv2f16_to_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x half> %b) {
+; CHECK-LABEL: fcvt_nxv2f16_to_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fcvt z1.d, p1/m, z1.h
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fpext <vscale x 2 x half> %b to <vscale x 2 x double>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
+ ret <vscale x 2 x double> %res
+}
+
+define <vscale x 4 x float> @fcvt_nxv4f16_to_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x half> %b) {
+; CHECK-LABEL: fcvt_nxv4f16_to_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: fcvt z1.s, p1/m, z1.h
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = fpext <vscale x 4 x half> %b to <vscale x 4 x float>
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
+ ret <vscale x 4 x float> %res
+}
+
+define <vscale x 2 x half> @fcvt_nxv2f32_to_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x float> %b) {
+; CHECK-LABEL: fcvt_nxv2f32_to_nxv2f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fcvt z1.h, p1/m, z1.s
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fptrunc <vscale x 2 x float> %b to <vscale x 2 x half>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
+ ret <vscale x 2 x half> %res
+}
+
+define <vscale x 2 x double> @fcvt_nxv2f32_to_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x float> %b) {
+; CHECK-LABEL: fcvt_nxv2f32_to_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fcvt z1.d, p1/m, z1.s
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fpext <vscale x 2 x float> %b to <vscale x 2 x double>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
+ ret <vscale x 2 x double> %res
+}
+
+define <vscale x 2 x bfloat> @fcvt_nxv2f32_to_nxv2bf16(<vscale x 2 x i1> %pg, <vscale x 2 x bfloat> %a, <vscale x 2 x float> %b) {
+; CHECK-LABEL: fcvt_nxv2f32_to_nxv2bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: bfcvt z1.h, p1/m, z1.s
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fptrunc <vscale x 2 x float> %b to <vscale x 2 x bfloat>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x bfloat> %b.op, <vscale x 2 x bfloat> %a
+ ret <vscale x 2 x bfloat> %res
+}
+
+define <vscale x 4 x half> @fcvt_nxv4f32_to_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: fcvt_nxv4f32_to_nxv4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: fcvt z1.h, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = fptrunc <vscale x 4 x float> %b to <vscale x 4 x half>
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
+ ret <vscale x 4 x half> %res
+}
+
+define <vscale x 4 x bfloat> @fcvt_nxv4f32_to_nxv4bf16(<vscale x 4 x i1> %pg, <vscale x 4 x bfloat> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: fcvt_nxv4f32_to_nxv4bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: bfcvt z1.h, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = fptrunc <vscale x 4 x float> %b to <vscale x 4 x bfloat>
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x bfloat> %b.op, <vscale x 4 x bfloat> %a
+ ret <vscale x 4 x bfloat> %res
+}
+
+define <vscale x 2 x half> @fcvt_nxv2f64_to_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: fcvt_nxv2f64_to_nxv2f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fcvt z1.h, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fptrunc <vscale x 2 x double> %b to <vscale x 2 x half>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
+ ret <vscale x 2 x half> %res
+}
+
+define <vscale x 2 x half> @fcvt_nxv2f64_to_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x float> %b) {
+; CHECK-LABEL: fcvt_nxv2f64_to_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fcvt z1.h, p1/m, z1.s
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fptrunc <vscale x 2 x float> %b to <vscale x 2 x half>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
+ ret <vscale x 2 x half> %res
+}
+
+define <vscale x 2 x bfloat> @fcvt_nxv2f64_to_nxv2bf16(<vscale x 2 x i1> %pg, <vscale x 2 x bfloat> %a, <vscale x 2 x double> %b) "target-features"="+sve2" {
+; CHECK-LABEL: fcvt_nxv2f64_to_nxv2bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fcvtx z1.s, p1/m, z1.d
+; CHECK-NEXT: bfcvt z1.h, p1/m, z1.s
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fptrunc <vscale x 2 x double> %b to <vscale x 2 x bfloat>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x bfloat> %b.op, <vscale x 2 x bfloat> %a
+ ret <vscale x 2 x bfloat> %res
+}
+
+define <vscale x 2 x float> @fcvt_nxv2bf16_to_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x bfloat> %b) {
+; CHECK-LABEL: fcvt_nxv2bf16_to_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z1.s, z1.s, #16
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fpext <vscale x 2 x bfloat> %b to <vscale x 2 x float>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
+ ret <vscale x 2 x float> %res
+}
+
+define <vscale x 2 x double> @fcvt_nxv2bf16_to_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x bfloat> %b) {
+; CHECK-LABEL: fcvt_nxv2bf16_to_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z1.s, z1.s, #16
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fcvt z1.d, p1/m, z1.s
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fpext <vscale x 2 x bfloat> %b to <vscale x 2 x double>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
+ ret <vscale x 2 x double> %res
+}
+
+define <vscale x 4 x float> @fcvt_nxv4bf16_to_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x bfloat> %b) {
+; CHECK-LABEL: fcvt_nxv4bf16_to_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z1.s, z1.s, #16
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = fpext <vscale x 4 x bfloat> %b to <vscale x 4 x float>
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
+ ret <vscale x 4 x float> %res
+}
+
+define <vscale x 2 x i64> @fcvtsu_nxv2f16_to_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x half> %b) {
+; CHECK-LABEL: fcvtsu_nxv2f16_to_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fcvtzs z1.d, p1/m, z1.h
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fptosi <vscale x 2 x half> %b to <vscale x 2 x i64>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
+ ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 4 x i32> @fcvtsu_nxv4f16_to_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x half> %b) {
+; CHECK-LABEL: fcvtsu_nxv4f16_to_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: fcvtzs z1.s, p1/m, z1.h
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = fptosi <vscale x 4 x half> %b to <vscale x 4 x i32>
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 8 x i16> @fcvtsu_nxv8f16_to_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: fcvtsu_nxv8f16_to_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: fcvtzs z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = fptosi <vscale x 8 x half> %b to <vscale x 8 x i16>
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 2 x i64> @fcvtsu_nxv2f32_to_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x float> %b) {
+; CHECK-LABEL: fcvtsu_nxv2f32_to_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fcvtzs z1.d, p1/m, z1.s
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fptosi <vscale x 2 x float> %b to <vscale x 2 x i64>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
+ ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 4 x i32> @fcvtsu_nxv4f32_to_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: fcvtsu_nxv4f32_to_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: fcvtzs z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = fptosi <vscale x 4 x float> %b to <vscale x 4 x i32>
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @fcvtsu_nxv2f64_to_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: fcvtsu_nxv2f64_to_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fcvtzs z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fptosi <vscale x 2 x double> %b to <vscale x 2 x i64>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
+ ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 2 x i64> @fcvtzu_nxv2f16_to_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x half> %b) {
+; CHECK-LABEL: fcvtzu_nxv2f16_to_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fcvtzu z1.d, p1/m, z1.h
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fptoui <vscale x 2 x half> %b to <vscale x 2 x i64>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
+ ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 4 x i32> @fcvtzu_nxv4f16_to_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x half> %b) {
+; CHECK-LABEL: fcvtzu_nxv4f16_to_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: fcvtzu z1.s, p1/m, z1.h
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = fptoui <vscale x 4 x half> %b to <vscale x 4 x i32>
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 8 x i16> @fcvtzu_nxv8f16_to_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: fcvtzu_nxv8f16_to_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: fcvtzu z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = fptoui <vscale x 8 x half> %b to <vscale x 8 x i16>
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 2 x i64> @fcvtzu_nxv2f32_to_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x float> %b) {
+; CHECK-LABEL: fcvtzu_nxv2f32_to_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fcvtzu z1.d, p1/m, z1.s
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fptoui <vscale x 2 x float> %b to <vscale x 2 x i64>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
+ ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 4 x i32> @fcvtzu_nxv4f32_to_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: fcvtzu_nxv4f32_to_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: fcvtzu z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = fptoui <vscale x 4 x float> %b to <vscale x 4 x i32>
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @fcvtzu_nxv2f64_to_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: fcvtzu_nxv2f64_to_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fcvtzu z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fptoui <vscale x 2 x double> %b to <vscale x 2 x i64>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
+ ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 2 x half> @fneg_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
+; CHECK-LABEL: fneg_nxv2f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fneg z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fneg <vscale x 2 x half> %b
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
+ ret <vscale x 2 x half> %res
+}
+
+define <vscale x 4 x half> @fneg_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
+; CHECK-LABEL: fneg_nxv4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: fneg z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = fneg <vscale x 4 x half> %b
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
+ ret <vscale x 4 x half> %res
+}
+
+define <vscale x 8 x half> @fneg_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: fneg_nxv8f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: fneg z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = fneg <vscale x 8 x half> %b
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
+ ret <vscale x 8 x half> %res
+}
+
+define <vscale x 2 x float> @fneg_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
+; CHECK-LABEL: fneg_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fneg z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fneg <vscale x 2 x float> %b
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
+ ret <vscale x 2 x float> %res
+}
+
+define <vscale x 4 x float> @fneg_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: fneg_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: fneg z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = fneg <vscale x 4 x float> %b
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
+ ret <vscale x 4 x float> %res
+}
+
+define <vscale x 2 x double> @fneg_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: fneg_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fneg z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fneg <vscale x 2 x double> %b
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
+ ret <vscale x 2 x double> %res
+}
+
+define <vscale x 2 x bfloat> @fneg_nxv2bf16(<vscale x 2 x i1> %pg, <vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b) {
+; CHECK-LABEL: fneg_nxv2bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: eor z1.h, z1.h, #0x8000
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = fneg <vscale x 2 x bfloat> %b
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x bfloat> %b.op, <vscale x 2 x bfloat> %a
+ ret <vscale x 2 x bfloat> %res
+}
+
+define <vscale x 4 x bfloat> @fneg_nxv4bf16(<vscale x 4 x i1> %pg, <vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) {
+; CHECK-LABEL: fneg_nxv4bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: eor z1.h, z1.h, #0x8000
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = fneg <vscale x 4 x bfloat> %b
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x bfloat> %b.op, <vscale x 4 x bfloat> %a
+ ret <vscale x 4 x bfloat> %res
+}
+
+define <vscale x 8 x bfloat> @fneg_nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
+; CHECK-LABEL: fneg_nxv8bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: eor z1.h, z1.h, #0x8000
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = fneg <vscale x 8 x bfloat> %b
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %b.op, <vscale x 8 x bfloat> %a
+ ret <vscale x 8 x bfloat> %res
+}
+
+define <vscale x 2 x half> @frinta_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
+; CHECK-LABEL: frinta_nxv2f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frinta z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x half> @llvm.round.nxv2f16(<vscale x 2 x half> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
+ ret <vscale x 2 x half> %res
+}
+
+define <vscale x 4 x half> @frinta_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
+; CHECK-LABEL: frinta_nxv4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: frinta z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x half> @llvm.round.nxv4f16(<vscale x 4 x half> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
+ ret <vscale x 4 x half> %res
+}
+
+define <vscale x 8 x half> @frinta_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: frinta_nxv8f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: frinta z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 8 x half> @llvm.round.nxv8f16(<vscale x 8 x half> %b)
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
+ ret <vscale x 8 x half> %res
+}
+
+define <vscale x 2 x float> @frinta_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
+; CHECK-LABEL: frinta_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frinta z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x float> @llvm.round.nxv2f32(<vscale x 2 x float> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
+ ret <vscale x 2 x float> %res
+}
+
+define <vscale x 4 x float> @frinta_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: frinta_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: frinta z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x float> @llvm.round.nxv4f32(<vscale x 4 x float> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
+ ret <vscale x 4 x float> %res
+}
+
+define <vscale x 2 x double> @frinta_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: frinta_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frinta z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x double> @llvm.round.nxv2f64(<vscale x 2 x double> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
+ ret <vscale x 2 x double> %res
+}
+
+define <vscale x 2 x half> @frinti_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
+; CHECK-LABEL: frinti_nxv2f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frinti z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x half> @llvm.nearbyint.nxv2f16(<vscale x 2 x half> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
+ ret <vscale x 2 x half> %res
+}
+
+define <vscale x 4 x half> @frinti_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
+; CHECK-LABEL: frinti_nxv4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: frinti z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x half> @llvm.nearbyint.nxv4f16(<vscale x 4 x half> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
+ ret <vscale x 4 x half> %res
+}
+
+define <vscale x 8 x half> @frinti_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: frinti_nxv8f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: frinti z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 8 x half> @llvm.nearbyint.nxv8f16(<vscale x 8 x half> %b)
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
+ ret <vscale x 8 x half> %res
+}
+
+define <vscale x 2 x float> @frinti_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
+; CHECK-LABEL: frinti_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frinti z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x float> @llvm.nearbyint.nxv2f32(<vscale x 2 x float> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
+ ret <vscale x 2 x float> %res
+}
+
+define <vscale x 4 x float> @frinti_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: frinti_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: frinti z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x float> @llvm.nearbyint.nxv4f32(<vscale x 4 x float> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
+ ret <vscale x 4 x float> %res
+}
+
+define <vscale x 2 x double> @frinti_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: frinti_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frinti z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x double> @llvm.nearbyint.nxv2f64(<vscale x 2 x double> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
+ ret <vscale x 2 x double> %res
+}
+
+define <vscale x 2 x half> @frintm_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
+; CHECK-LABEL: frintm_nxv2f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frintm z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x half> @llvm.floor.nxv2f16(<vscale x 2 x half> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
+ ret <vscale x 2 x half> %res
+}
+
+define <vscale x 4 x half> @frintm_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
+; CHECK-LABEL: frintm_nxv4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: frintm z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x half> @llvm.floor.nxv4f16(<vscale x 4 x half> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
+ ret <vscale x 4 x half> %res
+}
+
+define <vscale x 8 x half> @frintm_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: frintm_nxv8f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: frintm z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 8 x half> @llvm.floor.nxv8f16(<vscale x 8 x half> %b)
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
+ ret <vscale x 8 x half> %res
+}
+
+define <vscale x 2 x float> @frintm_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
+; CHECK-LABEL: frintm_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frintm z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x float> @llvm.floor.nxv2f32(<vscale x 2 x float> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
+ ret <vscale x 2 x float> %res
+}
+
+define <vscale x 4 x float> @frintm_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: frintm_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: frintm z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x float> @llvm.floor.nxv4f32(<vscale x 4 x float> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
+ ret <vscale x 4 x float> %res
+}
+
+define <vscale x 2 x double> @frintm_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: frintm_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frintm z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x double> @llvm.floor.nxv2f64(<vscale x 2 x double> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
+ ret <vscale x 2 x double> %res
+}
+
+define <vscale x 2 x half> @frintn_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
+; CHECK-LABEL: frintn_nxv2f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frintn z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x half> @llvm.roundeven.nxv2f16(<vscale x 2 x half> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
+ ret <vscale x 2 x half> %res
+}
+
+define <vscale x 4 x half> @frintn_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
+; CHECK-LABEL: frintn_nxv4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: frintn z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x half> @llvm.roundeven.nxv4f16(<vscale x 4 x half> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
+ ret <vscale x 4 x half> %res
+}
+
+define <vscale x 8 x half> @frintn_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: frintn_nxv8f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: frintn z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 8 x half> @llvm.roundeven.nxv8f16(<vscale x 8 x half> %b)
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
+ ret <vscale x 8 x half> %res
+}
+
+define <vscale x 2 x float> @frintn_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
+; CHECK-LABEL: frintn_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frintn z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x float> @llvm.roundeven.nxv2f32(<vscale x 2 x float> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
+ ret <vscale x 2 x float> %res
+}
+
+define <vscale x 4 x float> @frintn_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: frintn_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: frintn z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x float> @llvm.roundeven.nxv4f32(<vscale x 4 x float> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
+ ret <vscale x 4 x float> %res
+}
+
+define <vscale x 2 x double> @frintn_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: frintn_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frintn z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x double> @llvm.roundeven.nxv2f64(<vscale x 2 x double> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
+ ret <vscale x 2 x double> %res
+}
+
+define <vscale x 2 x half> @frintp_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
+; CHECK-LABEL: frintp_nxv2f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frintp z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x half> @llvm.ceil.nxv2f16(<vscale x 2 x half> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
+ ret <vscale x 2 x half> %res
+}
+
+define <vscale x 4 x half> @frintp_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
+; CHECK-LABEL: frintp_nxv4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: frintp z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
+ ret <vscale x 4 x half> %res
+}
+
+define <vscale x 8 x half> @frintp_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: frintp_nxv8f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: frintp z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 8 x half> @llvm.ceil.nxv8f16(<vscale x 8 x half> %b)
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
+ ret <vscale x 8 x half> %res
+}
+
+define <vscale x 2 x float> @frintp_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
+; CHECK-LABEL: frintp_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frintp z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x float> @llvm.ceil.nxv2f32(<vscale x 2 x float> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
+ ret <vscale x 2 x float> %res
+}
+
+define <vscale x 4 x float> @frintp_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: frintp_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: frintp z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
+ ret <vscale x 4 x float> %res
+}
+
+define <vscale x 2 x double> @frintp_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: frintp_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frintp z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x double> @llvm.ceil.nxv2f64(<vscale x 2 x double> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
+ ret <vscale x 2 x double> %res
+}
+
+define <vscale x 2 x half> @frintx_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
+; CHECK-LABEL: frintx_nxv2f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frintx z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x half> @llvm.rint.nxv2f16(<vscale x 2 x half> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
+ ret <vscale x 2 x half> %res
+}
+
+define <vscale x 4 x half> @frintx_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
+; CHECK-LABEL: frintx_nxv4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: frintx z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x half> @llvm.rint.nxv4f16(<vscale x 4 x half> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
+ ret <vscale x 4 x half> %res
+}
+
+define <vscale x 8 x half> @frintx_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: frintx_nxv8f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: frintx z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 8 x half> @llvm.rint.nxv8f16(<vscale x 8 x half> %b)
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
+ ret <vscale x 8 x half> %res
+}
+
+define <vscale x 2 x float> @frintx_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
+; CHECK-LABEL: frintx_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frintx z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x float> @llvm.rint.nxv2f32(<vscale x 2 x float> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
+ ret <vscale x 2 x float> %res
+}
+
+define <vscale x 4 x float> @frintx_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: frintx_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: frintx z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x float> @llvm.rint.nxv4f32(<vscale x 4 x float> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
+ ret <vscale x 4 x float> %res
+}
+
+define <vscale x 2 x double> @frintx_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: frintx_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frintx z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x double> @llvm.rint.nxv2f64(<vscale x 2 x double> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
+ ret <vscale x 2 x double> %res
+}
+
+define <vscale x 2 x half> @frintz_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
+; CHECK-LABEL: frintz_nxv2f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frintz z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x half> @llvm.trunc.nxv2f16(<vscale x 2 x half> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
+ ret <vscale x 2 x half> %res
+}
+
+define <vscale x 4 x half> @frintz_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
+; CHECK-LABEL: frintz_nxv4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: frintz z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
+ ret <vscale x 4 x half> %res
+}
+
+define <vscale x 8 x half> @frintz_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: frintz_nxv8f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: frintz z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 8 x half> @llvm.trunc.nxv8f16(<vscale x 8 x half> %b)
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
+ ret <vscale x 8 x half> %res
+}
+
+define <vscale x 2 x float> @frintz_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
+; CHECK-LABEL: frintz_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frintz z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x float> @llvm.trunc.nxv2f32(<vscale x 2 x float> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
+ ret <vscale x 2 x float> %res
+}
+
+define <vscale x 4 x float> @frintz_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: frintz_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: frintz z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
+ ret <vscale x 4 x float> %res
+}
+
+define <vscale x 2 x double> @frintz_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: frintz_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: frintz z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x double> @llvm.trunc.nxv2f64(<vscale x 2 x double> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
+ ret <vscale x 2 x double> %res
+}
+
+define <vscale x 2 x half> @fsqrt_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
+; CHECK-LABEL: fsqrt_nxv2f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fsqrt z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x half> @llvm.sqrt.nxv2f16(<vscale x 2 x half> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
+ ret <vscale x 2 x half> %res
+}
+
+define <vscale x 4 x half> @fsqrt_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
+; CHECK-LABEL: fsqrt_nxv4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: fsqrt z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x half> @llvm.sqrt.nxv4f16(<vscale x 4 x half> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
+ ret <vscale x 4 x half> %res
+}
+
+define <vscale x 8 x half> @fsqrt_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: fsqrt_nxv8f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: fsqrt z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 8 x half> @llvm.sqrt.nxv8f16(<vscale x 8 x half> %b)
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
+ ret <vscale x 8 x half> %res
+}
+
+define <vscale x 2 x float> @fsqrt_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
+; CHECK-LABEL: fsqrt_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fsqrt z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x float> @llvm.sqrt.nxv2f32(<vscale x 2 x float> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
+ ret <vscale x 2 x float> %res
+}
+
+define <vscale x 4 x float> @fsqrt_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: fsqrt_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: fsqrt z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x float> @llvm.sqrt.nxv4f32(<vscale x 4 x float> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
+ ret <vscale x 4 x float> %res
+}
+
+define <vscale x 2 x double> @fsqrt_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: fsqrt_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: fsqrt z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x double> @llvm.sqrt.nxv2f64(<vscale x 2 x double> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
+ ret <vscale x 2 x double> %res
+}
+
+define <vscale x 16 x i8> @neg_nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: neg_nxv16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: subr z1.b, z1.b, #0 // =0x0
+; CHECK-NEXT: mov z0.b, p0/m, z1.b
+; CHECK-NEXT: ret
+ %b.op = sub <vscale x 16 x i8> zeroinitializer, %b
+ %res = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b.op, <vscale x 16 x i8> %a
+ ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @neg_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: neg_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: subr z1.h, z1.h, #0 // =0x0
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = sub <vscale x 8 x i16> zeroinitializer, %b
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @neg_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: neg_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: subr z1.s, z1.s, #0 // =0x0
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = sub <vscale x 4 x i32> zeroinitializer, %b
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @neg_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: neg_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: subr z1.d, z1.d, #0 // =0x0
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = sub <vscale x 2 x i64> zeroinitializer, %b
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
+ ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 16 x i8> @rbit_nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: rbit_nxv16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.b
+; CHECK-NEXT: rbit z1.b, p1/m, z1.b
+; CHECK-NEXT: mov z0.b, p0/m, z1.b
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 16 x i8> @llvm.bitreverse.nxv16i8(<vscale x 16 x i8> %b)
+ %res = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b.op, <vscale x 16 x i8> %a
+ ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @rbit_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: rbit_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: rbit z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 8 x i16> @llvm.bitreverse.nxv8i16(<vscale x 8 x i16> %b)
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @rbit_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: rbit_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: rbit z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x i32> @llvm.bitreverse.nxv4i32(<vscale x 4 x i32> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @rbit_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: rbit_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: rbit z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x i64> @llvm.bitreverse.nxv2i64(<vscale x 2 x i64> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
+ ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 8 x i16> @revb_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: revb_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: revb z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 8 x i16> @llvm.bswap.nxv8i16(<vscale x 8 x i16> %b)
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @revb_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: revb_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: revb z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 4 x i32> @llvm.bswap.nxv4i32(<vscale x 4 x i32> %b)
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @revb_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: revb_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: revb z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = call <vscale x 2 x i64> @llvm.bswap.nxv2i64(<vscale x 2 x i64> %b)
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
+ ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 8 x half> @scvtf_nxv8i16_to_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: scvtf_nxv8i16_to_nxv8f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: scvtf z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = sitofp <vscale x 8 x i16> %b to <vscale x 8 x half>
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
+ ret <vscale x 8 x half> %res
+}
+
+define <vscale x 4 x half> @scvtf_nxv4i32_to_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: scvtf_nxv4i32_to_nxv4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: scvtf z1.h, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = sitofp <vscale x 4 x i32> %b to <vscale x 4 x half>
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
+ ret <vscale x 4 x half> %res
+}
+
+define <vscale x 4 x float> @scvtf_nxv4i32_to_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: scvtf_nxv4i32_to_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: scvtf z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = sitofp <vscale x 4 x i32> %b to <vscale x 4 x float>
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
+ ret <vscale x 4 x float> %res
+}
+
+define <vscale x 2 x half> @scvtf_nxv2i64_to_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: scvtf_nxv2i64_to_nxv2f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: scvtf z1.h, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = sitofp <vscale x 2 x i64> %b to <vscale x 2 x half>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
+ ret <vscale x 2 x half> %res
+}
+
+define <vscale x 2 x float> @scvtf_nxv2i64_to_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: scvtf_nxv2i64_to_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: scvtf z1.s, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = sitofp <vscale x 2 x i64> %b to <vscale x 2 x float>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
+ ret <vscale x 2 x float> %res
+}
+
+define <vscale x 2 x double> @scvtf_nxv2i64_to_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: scvtf_nxv2i64_to_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: scvtf z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = sitofp <vscale x 2 x i64> %b to <vscale x 2 x double>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
+ ret <vscale x 2 x double> %res
+}
+
+define <vscale x 8 x i16> @sxtb_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: sxtb_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: sxtb z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @sxtb_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i8> %b) {
+; CHECK-LABEL: sxtb_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: sxtb z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = sext <vscale x 4 x i8> %b to <vscale x 4 x i32>
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @sxtb_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i8> %b) {
+; CHECK-LABEL: sxtb_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: sxtb z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = sext <vscale x 2 x i8> %b to <vscale x 2 x i64>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
+ ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 4 x i32> @sxth_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i16> %b) {
+; CHECK-LABEL: sxth_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: sxth z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = sext <vscale x 4 x i16> %b to <vscale x 4 x i32>
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @sxth_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i16> %b) {
+; CHECK-LABEL: sxth_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: sxth z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = sext <vscale x 2 x i16> %b to <vscale x 2 x i64>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
+ ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 2 x i64> @sxtw_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i32> %b) {
+; CHECK-LABEL: sxtw_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: sxtw z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = sext <vscale x 2 x i32> %b to <vscale x 2 x i64>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
+ ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 8 x half> @ucvtf_nxv8i16_to_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: ucvtf_nxv8i16_to_nxv8f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: ucvtf z1.h, p1/m, z1.h
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = uitofp <vscale x 8 x i16> %b to <vscale x 8 x half>
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
+ ret <vscale x 8 x half> %res
+}
+
+define <vscale x 4 x half> @ucvtf_nxv4i32_to_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: ucvtf_nxv4i32_to_nxv4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ucvtf z1.h, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = uitofp <vscale x 4 x i32> %b to <vscale x 4 x half>
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
+ ret <vscale x 4 x half> %res
+}
+
+define <vscale x 4 x float> @ucvtf_nxv4i32_to_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: ucvtf_nxv4i32_to_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ucvtf z1.s, p1/m, z1.s
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = uitofp <vscale x 4 x i32> %b to <vscale x 4 x float>
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
+ ret <vscale x 4 x float> %res
+}
+
+define <vscale x 2 x half> @ucvtf_nxv2i64_to_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: ucvtf_nxv2i64_to_nxv2f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: ucvtf z1.h, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = uitofp <vscale x 2 x i64> %b to <vscale x 2 x half>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
+ ret <vscale x 2 x half> %res
+}
+
+define <vscale x 2 x float> @ucvtf_nxv2i64_to_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: ucvtf_nxv2i64_to_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: ucvtf z1.s, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = uitofp <vscale x 2 x i64> %b to <vscale x 2 x float>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
+ ret <vscale x 2 x float> %res
+}
+
+define <vscale x 2 x double> @ucvtf_nxv2i64_to_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: ucvtf_nxv2i64_to_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: ucvtf z1.d, p1/m, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = uitofp <vscale x 2 x i64> %b to <vscale x 2 x double>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
+ ret <vscale x 2 x double> %res
+}
+
+define <vscale x 8 x i16> @uxtb_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: uxtb_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and z1.h, z1.h, #0xff
+; CHECK-NEXT: mov z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %b.op = zext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+ %res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @uxtb_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i8> %b) {
+; CHECK-LABEL: uxtb_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and z1.s, z1.s, #0xff
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = zext <vscale x 4 x i8> %b to <vscale x 4 x i32>
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @uxtb_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i8> %b) {
+; CHECK-LABEL: uxtb_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and z1.d, z1.d, #0xff
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = zext <vscale x 2 x i8> %b to <vscale x 2 x i64>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
+ ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 4 x i32> @uxth_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i16> %b) {
+; CHECK-LABEL: uxth_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and z1.s, z1.s, #0xffff
+; CHECK-NEXT: mov z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %b.op = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
+ %res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @uxth_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i16> %b) {
+; CHECK-LABEL: uxth_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and z1.d, z1.d, #0xffff
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = zext <vscale x 2 x i16> %b to <vscale x 2 x i64>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
+ ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 2 x i64> @uxtw_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i32> %b) {
+; CHECK-LABEL: uxtw_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and z1.d, z1.d, #0xffffffff
+; CHECK-NEXT: mov z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %b.op = zext <vscale x 2 x i32> %b to <vscale x 2 x i64>
+ %res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
+ ret <vscale x 2 x i64> %res
+}
+
+
+declare <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8>, i1)
+declare <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16>, i1)
+declare <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32>, i1)
+declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1)
+
+declare <vscale x 16 x i8> @llvm.bitreverse.nxv16i8(<vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.bitreverse.nxv8i16(<vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.bitreverse.nxv4i32(<vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.bitreverse.nxv2i64(<vscale x 2 x i64>)
+
+declare <vscale x 8 x i16> @llvm.bswap.nxv8i16(<vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.bswap.nxv4i32(<vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.bswap.nxv2i64(<vscale x 2 x i64>)
+
+declare <vscale x 2 x half> @llvm.ceil.nxv2f16(<vscale x 2 x half>)
+declare <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half>)
+declare <vscale x 8 x half> @llvm.ceil.nxv8f16(<vscale x 8 x half>)
+declare <vscale x 2 x float> @llvm.ceil.nxv2f32(<vscale x 2 x float>)
+declare <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float>)
+declare <vscale x 2 x double> @llvm.ceil.nxv2f64(<vscale x 2 x double>)
+
+declare <vscale x 16 x i8> @llvm.ctlz.nxv16i8(<vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.ctlz.nxv8i16(<vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.ctlz.nxv4i32(<vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.ctlz.nxv2i64(<vscale x 2 x i64>)
+
+declare <vscale x 16 x i8> @llvm.ctpop.nxv16i8(<vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.ctpop.nxv8i16(<vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.ctpop.nxv4i32(<vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.ctpop.nxv2i64(<vscale x 2 x i64>)
+
+declare <vscale x 2 x half> @llvm.fabs.nxv2f16(<vscale x 2 x half>)
+declare <vscale x 4 x half> @llvm.fabs.nxv4f16(<vscale x 4 x half>)
+declare <vscale x 8 x half> @llvm.fabs.nxv8f16(<vscale x 8 x half>)
+declare <vscale x 2 x float> @llvm.fabs.nxv2f32(<vscale x 2 x float>)
+declare <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float>)
+declare <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double>)
+declare <vscale x 2 x bfloat> @llvm.fabs.nxv2bf16(<vscale x 2 x bfloat>)
+declare <vscale x 4 x bfloat> @llvm.fabs.nxv4bf16(<vscale x 4 x bfloat>)
+declare <vscale x 8 x bfloat> @llvm.fabs.nxv8bf16(<vscale x 8 x bfloat>)
+
+declare <vscale x 2 x half> @llvm.floor.nxv2f16(<vscale x 2 x half>)
+declare <vscale x 4 x half> @llvm.floor.nxv4f16(<vscale x 4 x half>)
+declare <vscale x 8 x half> @llvm.floor.nxv8f16(<vscale x 8 x half>)
+declare <vscale x 2 x float> @llvm.floor.nxv2f32(<vscale x 2 x float>)
+declare <vscale x 4 x float> @llvm.floor.nxv4f32(<vscale x 4 x float>)
+declare <vscale x 2 x double> @llvm.floor.nxv2f64(<vscale x 2 x double>)
+
+declare <vscale x 4 x half> @llvm.nearbyint.nxv4f16(<vscale x 4 x half>)
+declare <vscale x 8 x half> @llvm.nearbyint.nxv8f16(<vscale x 8 x half>)
+declare <vscale x 2 x float> @llvm.nearbyint.nxv2f32(<vscale x 2 x float>)
+declare <vscale x 4 x float> @llvm.nearbyint.nxv4f32(<vscale x 4 x float>)
+declare <vscale x 2 x double> @llvm.nearbyint.nxv2f64(<vscale x 2 x double>)
+
+declare <vscale x 2 x half> @llvm.rint.nxv2f16(<vscale x 2 x half>)
+declare <vscale x 4 x half> @llvm.rint.nxv4f16(<vscale x 4 x half>)
+declare <vscale x 8 x half> @llvm.rint.nxv8f16(<vscale x 8 x half>)
+declare <vscale x 2 x float> @llvm.rint.nxv2f32(<vscale x 2 x float>)
+declare <vscale x 4 x float> @llvm.rint.nxv4f32(<vscale x 4 x float>)
+declare <vscale x 2 x double> @llvm.rint.nxv2f64(<vscale x 2 x double>)
+
+declare <vscale x 2 x half> @llvm.round.nxv2f16(<vscale x 2 x half>)
+declare <vscale x 4 x half> @llvm.round.nxv4f16(<vscale x 4 x half>)
+declare <vscale x 8 x half> @llvm.round.nxv8f16(<vscale x 8 x half>)
+declare <vscale x 2 x float> @llvm.round.nxv2f32(<vscale x 2 x float>)
+declare <vscale x 4 x float> @llvm.round.nxv4f32(<vscale x 4 x float>)
+declare <vscale x 2 x double> @llvm.round.nxv2f64(<vscale x 2 x double>)
+declare <vscale x 2 x half> @llvm.nearbyint.nxv2f16(<vscale x 2 x half>)
+
+declare <vscale x 2 x half> @llvm.roundeven.nxv2f16(<vscale x 2 x half>)
+declare <vscale x 4 x half> @llvm.roundeven.nxv4f16(<vscale x 4 x half>)
+declare <vscale x 8 x half> @llvm.roundeven.nxv8f16(<vscale x 8 x half>)
+declare <vscale x 2 x float> @llvm.roundeven.nxv2f32(<vscale x 2 x float>)
+declare <vscale x 4 x float> @llvm.roundeven.nxv4f32(<vscale x 4 x float>)
+declare <vscale x 2 x double> @llvm.roundeven.nxv2f64(<vscale x 2 x double>)
+
+declare <vscale x 2 x half> @llvm.sqrt.nxv2f16(<vscale x 2 x half>)
+declare <vscale x 4 x half> @llvm.sqrt.nxv4f16(<vscale x 4 x half>)
+declare <vscale x 8 x half> @llvm.sqrt.nxv8f16(<vscale x 8 x half>)
+declare <vscale x 2 x float> @llvm.sqrt.nxv2f32(<vscale x 2 x float>)
+declare <vscale x 4 x float> @llvm.sqrt.nxv4f32(<vscale x 4 x float>)
+declare <vscale x 2 x double> @llvm.sqrt.nxv2f64(<vscale x 2 x double>)
+
+declare <vscale x 2 x half> @llvm.trunc.nxv2f16(<vscale x 2 x half>)
+declare <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half>)
+declare <vscale x 8 x half> @llvm.trunc.nxv8f16(<vscale x 8 x half>)
+declare <vscale x 2 x float> @llvm.trunc.nxv2f32(<vscale x 2 x float>)
+declare <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float>)
+declare <vscale x 2 x double> @llvm.trunc.nxv2f64(<vscale x 2 x double>)
More information about the llvm-commits
mailing list