[llvm] c21fad9 - Revert "[AArch64][SVE] Add more intrinsics in 'isZeroingInactiveLanes'."
via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 16 23:31:05 PST 2023
Author: chendewen
Date: 2023-01-17T15:30:46+08:00
New Revision: c21fad90b3fc29114dde5cb695bf5865c3a42d77
URL: https://github.com/llvm/llvm-project/commit/c21fad90b3fc29114dde5cb695bf5865c3a42d77
DIFF: https://github.com/llvm/llvm-project/commit/c21fad90b3fc29114dde5cb695bf5865c3a42d77.diff
LOG: Revert "[AArch64][SVE] Add more intrinsics in 'isZeroingInactiveLanes'."
This reverts commit 6ef6b2b5162ef48a63fb2697d77cffa6d7b1f7e7.
Added:
Modified:
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Removed:
llvm/test/CodeGen/AArch64/sve2-intrinsics-reinterpret.ll
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index c6bf71cc9d7a..219e2ad1d540 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -287,29 +287,6 @@ static bool isZeroingInactiveLanes(SDValue Op) {
case Intrinsic::aarch64_sve_fcmpge:
case Intrinsic::aarch64_sve_fcmpgt:
case Intrinsic::aarch64_sve_fcmpuo:
- case Intrinsic::aarch64_sve_facgt:
- case Intrinsic::aarch64_sve_facge:
- case Intrinsic::aarch64_sve_whilege:
- case Intrinsic::aarch64_sve_whilegt:
- case Intrinsic::aarch64_sve_whilehi:
- case Intrinsic::aarch64_sve_whilehs:
- case Intrinsic::aarch64_sve_whilele:
- case Intrinsic::aarch64_sve_whilelo:
- case Intrinsic::aarch64_sve_whilels:
- case Intrinsic::aarch64_sve_whilelt:
- case Intrinsic::aarch64_sve_match:
- case Intrinsic::aarch64_sve_nmatch:
- case Intrinsic::aarch64_sve_trn1:
- case Intrinsic::aarch64_sve_trn2:
- case Intrinsic::aarch64_sve_uzp1:
- case Intrinsic::aarch64_sve_uzp2:
- case Intrinsic::aarch64_sve_orr_z:
- case Intrinsic::aarch64_sve_orn_z:
- case Intrinsic::aarch64_sve_and_z:
- case Intrinsic::aarch64_sve_bic_z:
- case Intrinsic::aarch64_sve_eor_z:
- case Intrinsic::aarch64_sve_nor_z:
- case Intrinsic::aarch64_sve_nand_z:
return true;
}
}
diff --git a/llvm/test/CodeGen/AArch64/sve2-intrinsics-reinterpret.ll b/llvm/test/CodeGen/AArch64/sve2-intrinsics-reinterpret.ll
deleted file mode 100644
index b0a98746e017..000000000000
--- a/llvm/test/CodeGen/AArch64/sve2-intrinsics-reinterpret.ll
+++ /dev/null
@@ -1,538 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
-
-define <vscale x 16 x i1> @facgt_fun(<vscale x 2 x i1> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
-; CHECK-LABEL: facgt_fun:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: facgt p0.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: ret
-entry:
- %0 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.facgt.nxv2f64(<vscale x 2 x i1> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c)
- %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
- ret <vscale x 16 x i1> %1
-}
-
-define <vscale x 16 x i1> @facge_fun(<vscale x 2 x i1> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
-; CHECK-LABEL: facge_fun:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: facge p0.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: ret
-entry:
- %0 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.facge.nxv2f64(<vscale x 2 x i1> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c)
- %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
- ret <vscale x 16 x i1> %1
-}
-
-define <vscale x 16 x i1> @whilege_fun(i32 %a, i32 %b) {
-; CHECK-LABEL: whilege_fun:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: whilege p0.d, w0, w1
-; CHECK-NEXT: ret
-entry:
- %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32 %a, i32 %b)
- %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
- ret <vscale x 16 x i1> %1
-}
-
-define <vscale x 16 x i1> @whilegt_fun(i32 %a, i32 %b) {
-; CHECK-LABEL: whilegt_fun:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: whilegt p0.d, w0, w1
-; CHECK-NEXT: ret
-entry:
- %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32 %a, i32 %b)
- %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
- ret <vscale x 16 x i1> %1
-}
-
-define <vscale x 16 x i1> @whilehi_fun(i32 %a, i32 %b) {
-; CHECK-LABEL: whilehi_fun:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: whilehi p0.d, w0, w1
-; CHECK-NEXT: ret
-entry:
- %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32 %a, i32 %b)
- %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
- ret <vscale x 16 x i1> %1
-}
-
-define <vscale x 16 x i1> @whilehs_fun(i32 %a, i32 %b) {
-; CHECK-LABEL: whilehs_fun:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: whilehs p0.d, w0, w1
-; CHECK-NEXT: ret
-entry:
- %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32 %a, i32 %b)
- %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
- ret <vscale x 16 x i1> %1
-}
-
-define <vscale x 16 x i1> @whilele_fun(i32 %a, i32 %b) {
-; CHECK-LABEL: whilele_fun:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: whilele p0.d, w0, w1
-; CHECK-NEXT: ret
-entry:
- %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i32(i32 %a, i32 %b)
- %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
- ret <vscale x 16 x i1> %1
-}
-
-define <vscale x 16 x i1> @whilelo_fun(i32 %a, i32 %b) {
-; CHECK-LABEL: whilelo_fun:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: whilelo p0.d, w0, w1
-; CHECK-NEXT: ret
-entry:
- %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32 %a, i32 %b)
- %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
- ret <vscale x 16 x i1> %1
-}
-
-define <vscale x 16 x i1> @whilels_fun(i32 %a, i32 %b) {
-; CHECK-LABEL: whilels_fun:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: whilels p0.d, w0, w1
-; CHECK-NEXT: ret
-entry:
- %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i32(i32 %a, i32 %b)
- %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
- ret <vscale x 16 x i1> %1
-}
-
-define <vscale x 16 x i1> @whilelt_fun(i32 %a, i32 %b) {
-; CHECK-LABEL: whilelt_fun:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: whilelt p0.d, w0, w1
-; CHECK-NEXT: ret
-entry:
- %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32 %a, i32 %b)
- %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
- ret <vscale x 16 x i1> %1
-}
-
-define <vscale x 16 x i1> @cmpeq_d_fun(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: cmpeq_d_fun:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1> %pg,
- <vscale x 2 x i64> %a,
- <vscale x 2 x i64> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @cmpeq_wide_s_fun(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: cmpeq_wide_s_fun:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1> %pg,
- <vscale x 4 x i32> %a,
- <vscale x 2 x i64> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @cmpge_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: cmpge_d:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmpge p0.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1> %pg,
- <vscale x 2 x i64> %a,
- <vscale x 2 x i64> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @cmpge_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: cmpge_wide_s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmpge p0.s, p0/z, z0.s, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1> %pg,
- <vscale x 4 x i32> %a,
- <vscale x 2 x i64> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @cmpgt_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: cmpgt_d:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmpgt p0.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1> %pg,
- <vscale x 2 x i64> %a,
- <vscale x 2 x i64> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @cmpgt_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: cmpgt_wide_s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmpgt p0.s, p0/z, z0.s, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1> %pg,
- <vscale x 4 x i32> %a,
- <vscale x 2 x i64> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @cmphi_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: cmphi_d:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmphi p0.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1> %pg,
- <vscale x 2 x i64> %a,
- <vscale x 2 x i64> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @cmphi_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: cmphi_wide_s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmphi p0.s, p0/z, z0.s, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1> %pg,
- <vscale x 4 x i32> %a,
- <vscale x 2 x i64> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @cmphs_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: cmphs_d:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmphs p0.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1> %pg,
- <vscale x 2 x i64> %a,
- <vscale x 2 x i64> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @cmphs_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: cmphs_wide_s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmphs p0.s, p0/z, z0.s, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1> %pg,
- <vscale x 4 x i32> %a,
- <vscale x 2 x i64> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @cmple_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: cmple_wide_s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmple p0.s, p0/z, z0.s, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1> %pg,
- <vscale x 4 x i32> %a,
- <vscale x 2 x i64> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @cmplo_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: cmplo_wide_s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmplo p0.s, p0/z, z0.s, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1> %pg,
- <vscale x 4 x i32> %a,
- <vscale x 2 x i64> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @cmpls_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: cmpls_wide_s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmpls p0.s, p0/z, z0.s, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1> %pg,
- <vscale x 4 x i32> %a,
- <vscale x 2 x i64> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @cmplt_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: cmplt_wide_s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmplt p0.s, p0/z, z0.s, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1> %pg,
- <vscale x 4 x i32> %a,
- <vscale x 2 x i64> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @cmpne_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: cmpne_d:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %pg,
- <vscale x 2 x i64> %a,
- <vscale x 2 x i64> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @cmpne_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: cmpne_wide_s:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> %pg,
- <vscale x 4 x i32> %a,
- <vscale x 2 x i64> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @fcmeq_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
-; CHECK-LABEL: fcmeq_d:
-; CHECK: // %bb.0:
-; CHECK-NEXT: fcmeq p0.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpeq.nxv2f64(<vscale x 2 x i1> %pg,
- <vscale x 2 x double> %a,
- <vscale x 2 x double> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @fcmgt_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
-; CHECK-LABEL: fcmgt_d:
-; CHECK: // %bb.0:
-; CHECK-NEXT: fcmgt p0.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpgt.nxv2f64(<vscale x 2 x i1> %pg,
- <vscale x 2 x double> %a,
- <vscale x 2 x double> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @fcmne_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
-; CHECK-LABEL: fcmne_d:
-; CHECK: // %bb.0:
-; CHECK-NEXT: fcmne p0.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpne.nxv2f64(<vscale x 2 x i1> %pg,
- <vscale x 2 x double> %a,
- <vscale x 2 x double> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @fcmuo_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
-; CHECK-LABEL: fcmuo_d:
-; CHECK: // %bb.0:
-; CHECK-NEXT: fcmuo p0.d, p0/z, z0.d, z1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpuo.nxv2f64(<vscale x 2 x i1> %pg,
- <vscale x 2 x double> %a,
- <vscale x 2 x double> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @match_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; CHECK-LABEL: match_i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: match p0.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: ret
- %1 = call <vscale x 8 x i1> @llvm.aarch64.sve.match.nxv8i16(<vscale x 8 x i1> %pg,
- <vscale x 8 x i16> %a,
- <vscale x 8 x i16> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @nmatch_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; CHECK-LABEL: nmatch_i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: nmatch p0.h, p0/z, z0.h, z1.h
-; CHECK-NEXT: ret
- %1 = call <vscale x 8 x i1> @llvm.aarch64.sve.nmatch.nxv8i16(<vscale x 8 x i1> %pg,
- <vscale x 8 x i16> %a,
- <vscale x 8 x i16> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @trn1_b64(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) {
-; CHECK-LABEL: trn1_b64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: trn1 p0.d, p0.d, p1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.trn1.nxv2i1(<vscale x 2 x i1> %a,
- <vscale x 2 x i1> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @trn2_b64(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) {
-; CHECK-LABEL: trn2_b64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: trn2 p0.d, p0.d, p1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.trn2.nxv2i1(<vscale x 2 x i1> %a,
- <vscale x 2 x i1> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @uzp1_b64(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) {
-; CHECK-LABEL: uzp1_b64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: uzp1 p0.d, p0.d, p1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.uzp1.nxv2i1(<vscale x 2 x i1> %a,
- <vscale x 2 x i1> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @uzp2_b64(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) {
-; CHECK-LABEL: uzp2_b64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: uzp2 p0.d, p0.d, p1.d
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.uzp2.nxv2i1(<vscale x 2 x i1> %a,
- <vscale x 2 x i1> %b)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @orr_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
-; CHECK-LABEL: orr_2:
-; CHECK: // %bb.0:
-; CHECK-NEXT: orr p0.b, p0/z, p1.b, p2.b
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.orr.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @orn_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
-; CHECK-LABEL: orn_2:
-; CHECK: // %bb.0:
-; CHECK-NEXT: orn p0.b, p0/z, p1.b, p2.b
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.orn.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out
-}
-
-define <vscale x 16 x i1> @and_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
-; CHECK-LABEL: and_2:
-; CHECK: // %bb.0:
-; CHECK-NEXT: and p0.b, p0/z, p1.b, p2.b
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.and.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out;
-}
-
-define <vscale x 16 x i1> @bic_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
-; CHECK-LABEL: bic_2:
-; CHECK: // %bb.0:
-; CHECK-NEXT: bic p0.b, p0/z, p1.b, p2.b
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.bic.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out;
-}
-
-define <vscale x 16 x i1> @eor_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
-; CHECK-LABEL: eor_2:
-; CHECK: // %bb.0:
-; CHECK-NEXT: eor p0.b, p0/z, p1.b, p2.b
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.eor.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out;
-}
-
-define <vscale x 16 x i1> @nor_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
-; CHECK-LABEL: nor_2:
-; CHECK: // %bb.0:
-; CHECK-NEXT: nor p0.b, p0/z, p1.b, p2.b
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.nor.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out;
-}
-
-define <vscale x 16 x i1> @nand_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
-; CHECK-LABEL: nand_2:
-; CHECK: // %bb.0:
-; CHECK-NEXT: nand p0.b, p0/z, p1.b, p2.b
-; CHECK-NEXT: ret
- %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.nand.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
- %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
- ret <vscale x 16 x i1> %out;
-}
-
-declare <vscale x 2 x i1> @llvm.aarch64.sve.orn.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.orr.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.nand.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.nor.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.and.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.bic.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.eor.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.trn2.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.trn1.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.uzp1.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.uzp2.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.facgt.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.facge.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpeq.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpge.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpgt.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpne.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpuo.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32, i32)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32, i32)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32, i32)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32, i32)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i32(i32, i32)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32, i32)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i32(i32, i32)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32, i32)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
-declare <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
-declare <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
-declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
-declare <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
-declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
-declare <vscale x 8 x i1> @llvm.aarch64.sve.match.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 8 x i1> @llvm.aarch64.sve.nmatch.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1>)
-declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1>)
-declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1>)
More information about the llvm-commits
mailing list