[llvm] [AArch64] Guard against non-vector abd long nodes. (PR #102026)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Aug 5 10:44:36 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-aarch64
Author: David Green (davemgreen)
<details>
<summary>Changes</summary>
This fixes a problem if abd nodes are generated more readily (#<!-- -->92576). The folding of abd nodes into abdl needs to check that the inputs are the correct form of vector. The added test requires vector legalization to occur in order to hit the combine at the wrong time.
---
Full diff: https://github.com/llvm/llvm-project/pull/102026.diff
3 Files Affected:
- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.cpp (+1)
- (modified) llvm/test/CodeGen/AArch64/abds.ll (+22)
- (modified) llvm/test/CodeGen/AArch64/abdu.ll (+26)
``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 7704321a0fc3a..f0c3afc4f9b5d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -21769,6 +21769,7 @@ static SDValue performExtendCombine(SDNode *N,
// helps the backend to decide that an sabdl2 would be useful, saving a real
// extract_high operation.
if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ZERO_EXTEND &&
+ N->getOperand(0).getValueType().is64BitVector() &&
(N->getOperand(0).getOpcode() == ISD::ABDU ||
N->getOperand(0).getOpcode() == ISD::ABDS)) {
SDNode *ABDNode = N->getOperand(0).getNode();
diff --git a/llvm/test/CodeGen/AArch64/abds.ll b/llvm/test/CodeGen/AArch64/abds.ll
index d4ad33f963ba7..215907c66a6e8 100644
--- a/llvm/test/CodeGen/AArch64/abds.ll
+++ b/llvm/test/CodeGen/AArch64/abds.ll
@@ -571,6 +571,28 @@ define i32 @abd_sub_i32(i32 %a, i32 %b) nounwind {
ret i32 %abs
}
+define i64 @vector_legalized(i16 %a, i16 %b) {
+; CHECK-LABEL: vector_legalized:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NEXT: sxth w8, w0
+; CHECK-NEXT: sub w8, w8, w1, sxth
+; CHECK-NEXT: addp d0, v0.2d
+; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: cneg w8, w8, mi
+; CHECK-NEXT: fmov x9, d0
+; CHECK-NEXT: add x0, x9, x8
+; CHECK-NEXT: ret
+ %ea = sext i16 %a to i32
+ %eb = sext i16 %b to i32
+ %s = sub i32 %ea, %eb
+ %ab = call i32 @llvm.abs.i32(i32 %s, i1 false)
+ %e = zext i32 %ab to i64
+ %red = call i64 @llvm.vector.reduce.add.v32i64(<32 x i64> zeroinitializer)
+ %z = add i64 %red, %e
+ ret i64 %z
+}
+
declare i8 @llvm.abs.i8(i8, i1)
declare i16 @llvm.abs.i16(i16, i1)
diff --git a/llvm/test/CodeGen/AArch64/abdu.ll b/llvm/test/CodeGen/AArch64/abdu.ll
index 983db629e4498..f70f095d7daba 100644
--- a/llvm/test/CodeGen/AArch64/abdu.ll
+++ b/llvm/test/CodeGen/AArch64/abdu.ll
@@ -409,6 +409,32 @@ define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
ret i128 %sel
}
+;
+; negative tests
+;
+
+define i64 @vector_legalized(i16 %a, i16 %b) {
+; CHECK-LABEL: vector_legalized:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NEXT: and w8, w0, #0xffff
+; CHECK-NEXT: sub w8, w8, w1, uxth
+; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: addp d0, v0.2d
+; CHECK-NEXT: cneg w8, w8, mi
+; CHECK-NEXT: fmov x9, d0
+; CHECK-NEXT: add x0, x9, x8
+; CHECK-NEXT: ret
+ %ea = zext i16 %a to i32
+ %eb = zext i16 %b to i32
+ %s = sub i32 %ea, %eb
+ %ab = call i32 @llvm.abs.i32(i32 %s, i1 false)
+ %e = zext i32 %ab to i64
+ %red = call i64 @llvm.vector.reduce.add.v32i64(<32 x i64> zeroinitializer)
+ %z = add i64 %red, %e
+ ret i64 %z
+}
+
declare i8 @llvm.abs.i8(i8, i1)
declare i16 @llvm.abs.i16(i16, i1)
declare i32 @llvm.abs.i32(i32, i1)
``````````
</details>
https://github.com/llvm/llvm-project/pull/102026
More information about the llvm-commits
mailing list