[llvm] [AArch64][SVE] Fix for wide adds trying to be generated (PR #118838)
James Chesterman via llvm-commits
llvm-commits at lists.llvm.org
Thu Dec 5 13:28:31 PST 2024
https://github.com/JamesChesterman updated https://github.com/llvm/llvm-project/pull/118838
>From 1a68328b4a9fcd7dd8f3be01312cf30c827ef0bd Mon Sep 17 00:00:00 2001
From: James Chesterman <james.chesterman at arm.com>
Date: Thu, 5 Dec 2024 17:29:38 +0000
Subject: [PATCH 1/3] [AArch64][SVE] Fix for wide adds trying to be generated
Wide adds would try to be generated when SVE is enabled not SVE2.
---
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 7ab3fc06715ec8..75f0bae84db67e 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -21814,7 +21814,7 @@ SDValue tryLowerPartialReductionToWideAdd(SDNode *N,
Intrinsic::experimental_vector_partial_reduce_add &&
"Expected a partial reduction node");
- if (!Subtarget->isSVEorStreamingSVEAvailable())
+ if (!Subtarget->hasSVE2() && !Subtarget->isStreamingSVEAvailable())
return SDValue();
SDLoc DL(N);
>From 8e55b1ab8bc9b61c1526f3e218799a62b750031a Mon Sep 17 00:00:00 2001
From: James Chesterman <james.chesterman at arm.com>
Date: Thu, 5 Dec 2024 20:02:49 +0000
Subject: [PATCH 2/3] Add extra run line to file to protect the fix.
---
llvm/test/CodeGen/AArch64/sve-partial-reduce-wide-add.ll | 1 +
1 file changed, 1 insertion(+)
diff --git a/llvm/test/CodeGen/AArch64/sve-partial-reduce-wide-add.ll b/llvm/test/CodeGen/AArch64/sve-partial-reduce-wide-add.ll
index 1d05649964670d..adf6a1760526dc 100644
--- a/llvm/test/CodeGen/AArch64/sve-partial-reduce-wide-add.ll
+++ b/llvm/test/CodeGen/AArch64/sve-partial-reduce-wide-add.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=aarch64 -mattr=+sve2 %s -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64 -mattr=+sve %s -o - | FileCheck %s
define <vscale x 2 x i64> @signed_wide_add_nxv4i32(<vscale x 2 x i64> %acc, <vscale x 4 x i32> %input){
; CHECK-LABEL: signed_wide_add_nxv4i32:
>From 97be492c5288636e0a2f0f02a1adf36ddc61e320 Mon Sep 17 00:00:00 2001
From: James Chesterman <james.chesterman at arm.com>
Date: Thu, 5 Dec 2024 21:27:48 +0000
Subject: [PATCH 3/3] Update CHECK lines with new RUN line in test file
---
.../AArch64/sve-partial-reduce-wide-add.ll | 112 +++++++++++++-----
1 file changed, 80 insertions(+), 32 deletions(-)
diff --git a/llvm/test/CodeGen/AArch64/sve-partial-reduce-wide-add.ll b/llvm/test/CodeGen/AArch64/sve-partial-reduce-wide-add.ll
index adf6a1760526dc..b4b946c68566ed 100644
--- a/llvm/test/CodeGen/AArch64/sve-partial-reduce-wide-add.ll
+++ b/llvm/test/CodeGen/AArch64/sve-partial-reduce-wide-add.ll
@@ -1,13 +1,21 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=aarch64 -mattr=+sve2 %s -o - | FileCheck %s
-; RUN: llc -mtriple=aarch64 -mattr=+sve %s -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64 -mattr=+sve2 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SVE2
+; RUN: llc -mtriple=aarch64 -mattr=+sve %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SVE
define <vscale x 2 x i64> @signed_wide_add_nxv4i32(<vscale x 2 x i64> %acc, <vscale x 4 x i32> %input){
-; CHECK-LABEL: signed_wide_add_nxv4i32:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: saddwb z0.d, z0.d, z1.s
-; CHECK-NEXT: saddwt z0.d, z0.d, z1.s
-; CHECK-NEXT: ret
+; CHECK-SVE2-LABEL: signed_wide_add_nxv4i32:
+; CHECK-SVE2: // %bb.0: // %entry
+; CHECK-SVE2-NEXT: saddwb z0.d, z0.d, z1.s
+; CHECK-SVE2-NEXT: saddwt z0.d, z0.d, z1.s
+; CHECK-SVE2-NEXT: ret
+;
+; CHECK-SVE-LABEL: signed_wide_add_nxv4i32:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: sunpklo z2.d, z1.s
+; CHECK-SVE-NEXT: sunpkhi z1.d, z1.s
+; CHECK-SVE-NEXT: add z0.d, z0.d, z2.d
+; CHECK-SVE-NEXT: add z0.d, z1.d, z0.d
+; CHECK-SVE-NEXT: ret
entry:
%input.wide = sext <vscale x 4 x i32> %input to <vscale x 4 x i64>
%partial.reduce = tail call <vscale x 2 x i64> @llvm.experimental.vector.partial.reduce.add.nxv2i64.nxv4i64(<vscale x 2 x i64> %acc, <vscale x 4 x i64> %input.wide)
@@ -15,11 +23,19 @@ entry:
}
define <vscale x 2 x i64> @unsigned_wide_add_nxv4i32(<vscale x 2 x i64> %acc, <vscale x 4 x i32> %input){
-; CHECK-LABEL: unsigned_wide_add_nxv4i32:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: uaddwb z0.d, z0.d, z1.s
-; CHECK-NEXT: uaddwt z0.d, z0.d, z1.s
-; CHECK-NEXT: ret
+; CHECK-SVE2-LABEL: unsigned_wide_add_nxv4i32:
+; CHECK-SVE2: // %bb.0: // %entry
+; CHECK-SVE2-NEXT: uaddwb z0.d, z0.d, z1.s
+; CHECK-SVE2-NEXT: uaddwt z0.d, z0.d, z1.s
+; CHECK-SVE2-NEXT: ret
+;
+; CHECK-SVE-LABEL: unsigned_wide_add_nxv4i32:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: uunpklo z2.d, z1.s
+; CHECK-SVE-NEXT: uunpkhi z1.d, z1.s
+; CHECK-SVE-NEXT: add z0.d, z0.d, z2.d
+; CHECK-SVE-NEXT: add z0.d, z1.d, z0.d
+; CHECK-SVE-NEXT: ret
entry:
%input.wide = zext <vscale x 4 x i32> %input to <vscale x 4 x i64>
%partial.reduce = tail call <vscale x 2 x i64> @llvm.experimental.vector.partial.reduce.add.nxv2i64.nxv4i64(<vscale x 2 x i64> %acc, <vscale x 4 x i64> %input.wide)
@@ -27,11 +43,19 @@ entry:
}
define <vscale x 4 x i32> @signed_wide_add_nxv8i16(<vscale x 4 x i32> %acc, <vscale x 8 x i16> %input){
-; CHECK-LABEL: signed_wide_add_nxv8i16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: saddwb z0.s, z0.s, z1.h
-; CHECK-NEXT: saddwt z0.s, z0.s, z1.h
-; CHECK-NEXT: ret
+; CHECK-SVE2-LABEL: signed_wide_add_nxv8i16:
+; CHECK-SVE2: // %bb.0: // %entry
+; CHECK-SVE2-NEXT: saddwb z0.s, z0.s, z1.h
+; CHECK-SVE2-NEXT: saddwt z0.s, z0.s, z1.h
+; CHECK-SVE2-NEXT: ret
+;
+; CHECK-SVE-LABEL: signed_wide_add_nxv8i16:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: sunpklo z2.s, z1.h
+; CHECK-SVE-NEXT: sunpkhi z1.s, z1.h
+; CHECK-SVE-NEXT: add z0.s, z0.s, z2.s
+; CHECK-SVE-NEXT: add z0.s, z1.s, z0.s
+; CHECK-SVE-NEXT: ret
entry:
%input.wide = sext <vscale x 8 x i16> %input to <vscale x 8 x i32>
%partial.reduce = tail call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv8i32(<vscale x 4 x i32> %acc, <vscale x 8 x i32> %input.wide)
@@ -39,11 +63,19 @@ entry:
}
define <vscale x 4 x i32> @unsigned_wide_add_nxv8i16(<vscale x 4 x i32> %acc, <vscale x 8 x i16> %input){
-; CHECK-LABEL: unsigned_wide_add_nxv8i16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: uaddwb z0.s, z0.s, z1.h
-; CHECK-NEXT: uaddwt z0.s, z0.s, z1.h
-; CHECK-NEXT: ret
+; CHECK-SVE2-LABEL: unsigned_wide_add_nxv8i16:
+; CHECK-SVE2: // %bb.0: // %entry
+; CHECK-SVE2-NEXT: uaddwb z0.s, z0.s, z1.h
+; CHECK-SVE2-NEXT: uaddwt z0.s, z0.s, z1.h
+; CHECK-SVE2-NEXT: ret
+;
+; CHECK-SVE-LABEL: unsigned_wide_add_nxv8i16:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: uunpklo z2.s, z1.h
+; CHECK-SVE-NEXT: uunpkhi z1.s, z1.h
+; CHECK-SVE-NEXT: add z0.s, z0.s, z2.s
+; CHECK-SVE-NEXT: add z0.s, z1.s, z0.s
+; CHECK-SVE-NEXT: ret
entry:
%input.wide = zext <vscale x 8 x i16> %input to <vscale x 8 x i32>
%partial.reduce = tail call <vscale x 4 x i32> @llvm.experimental.vector.partial.reduce.add.nxv4i32.nxv8i32(<vscale x 4 x i32> %acc, <vscale x 8 x i32> %input.wide)
@@ -51,11 +83,19 @@ entry:
}
define <vscale x 8 x i16> @signed_wide_add_nxv16i8(<vscale x 8 x i16> %acc, <vscale x 16 x i8> %input){
-; CHECK-LABEL: signed_wide_add_nxv16i8:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: saddwb z0.h, z0.h, z1.b
-; CHECK-NEXT: saddwt z0.h, z0.h, z1.b
-; CHECK-NEXT: ret
+; CHECK-SVE2-LABEL: signed_wide_add_nxv16i8:
+; CHECK-SVE2: // %bb.0: // %entry
+; CHECK-SVE2-NEXT: saddwb z0.h, z0.h, z1.b
+; CHECK-SVE2-NEXT: saddwt z0.h, z0.h, z1.b
+; CHECK-SVE2-NEXT: ret
+;
+; CHECK-SVE-LABEL: signed_wide_add_nxv16i8:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: sunpklo z2.h, z1.b
+; CHECK-SVE-NEXT: sunpkhi z1.h, z1.b
+; CHECK-SVE-NEXT: add z0.h, z0.h, z2.h
+; CHECK-SVE-NEXT: add z0.h, z1.h, z0.h
+; CHECK-SVE-NEXT: ret
entry:
%input.wide = sext <vscale x 16 x i8> %input to <vscale x 16 x i16>
%partial.reduce = tail call <vscale x 8 x i16> @llvm.experimental.vector.partial.reduce.add.nxv8i16.nxv16i16(<vscale x 8 x i16> %acc, <vscale x 16 x i16> %input.wide)
@@ -63,11 +103,19 @@ entry:
}
define <vscale x 8 x i16> @unsigned_wide_add_nxv16i8(<vscale x 8 x i16> %acc, <vscale x 16 x i8> %input){
-; CHECK-LABEL: unsigned_wide_add_nxv16i8:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: uaddwb z0.h, z0.h, z1.b
-; CHECK-NEXT: uaddwt z0.h, z0.h, z1.b
-; CHECK-NEXT: ret
+; CHECK-SVE2-LABEL: unsigned_wide_add_nxv16i8:
+; CHECK-SVE2: // %bb.0: // %entry
+; CHECK-SVE2-NEXT: uaddwb z0.h, z0.h, z1.b
+; CHECK-SVE2-NEXT: uaddwt z0.h, z0.h, z1.b
+; CHECK-SVE2-NEXT: ret
+;
+; CHECK-SVE-LABEL: unsigned_wide_add_nxv16i8:
+; CHECK-SVE: // %bb.0: // %entry
+; CHECK-SVE-NEXT: uunpklo z2.h, z1.b
+; CHECK-SVE-NEXT: uunpkhi z1.h, z1.b
+; CHECK-SVE-NEXT: add z0.h, z0.h, z2.h
+; CHECK-SVE-NEXT: add z0.h, z1.h, z0.h
+; CHECK-SVE-NEXT: ret
entry:
%input.wide = zext <vscale x 16 x i8> %input to <vscale x 16 x i16>
%partial.reduce = tail call <vscale x 8 x i16> @llvm.experimental.vector.partial.reduce.add.nxv8i16.nxv16i16(<vscale x 8 x i16> %acc, <vscale x 16 x i16> %input.wide)
More information about the llvm-commits
mailing list