[llvm] 851462f - [AArch64] Remove invalid uabdl patterns. (#89272)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Apr 19 01:30:17 PDT 2024
Author: David Green
Date: 2024-04-19T09:30:13+01:00
New Revision: 851462fcaa7f6e3301865de84f98be7e872e64b6
URL: https://github.com/llvm/llvm-project/commit/851462fcaa7f6e3301865de84f98be7e872e64b6
DIFF: https://github.com/llvm/llvm-project/commit/851462fcaa7f6e3301865de84f98be7e872e64b6.diff
LOG: [AArch64] Remove invalid uabdl patterns. (#89272)
These were added in https://reviews.llvm.org/D14208, which look like
they attempt to detect abs from xor+add+ashr. They do not appear to be
detecting the correct value for the src input though, which I think is
intended to be the sub(zext, zext) part of the pattern. We have pattens
from abs now, so the old invalid patterns can be removed.
Fixes #88784
Added:
Modified:
llvm/lib/Target/AArch64/AArch64InstrInfo.td
llvm/test/CodeGen/AArch64/arm64-vabs.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 3bf90778363c6c..be53be782077b7 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -4954,19 +4954,9 @@ defm UABDL : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)),
(zext (v8i8 V64:$opB))))),
(UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
-def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
- (v8i16 (add (sub (zext (v8i8 V64:$opA)),
- (zext (v8i8 V64:$opB))),
- (AArch64vashr v8i16:$src, (i32 15))))),
- (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 (v16i8 V128:$opA))),
(zext (extract_high_v16i8 (v16i8 V128:$opB)))))),
(UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
-def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
- (v8i16 (add (sub (zext (extract_high_v16i8 (v16i8 V128:$opA))),
- (zext (extract_high_v16i8 (v16i8 V128:$opB)))),
- (AArch64vashr v8i16:$src, (i32 15))))),
- (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)),
(zext (v4i16 V64:$opB))))),
(UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index 5b45ba2552cefd..d64327656a9e01 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -1848,3 +1848,51 @@ define <2 x i128> @uabd_i64(<2 x i64> %a, <2 x i64> %b) {
%absel = select <2 x i1> %abcmp, <2 x i128> %ababs, <2 x i128> %ab
diff
ret <2 x i128> %absel
}
+
+define <8 x i16> @pr88784(<8 x i8> %l0, <8 x i8> %l1, <8 x i16> %l2) {
+; CHECK-SD-LABEL: pr88784:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: usubl.8h v0, v0, v1
+; CHECK-SD-NEXT: cmlt.8h v1, v2, #0
+; CHECK-SD-NEXT: ssra.8h v0, v2, #15
+; CHECK-SD-NEXT: eor.16b v0, v1, v0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: pr88784:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: usubl.8h v0, v0, v1
+; CHECK-GI-NEXT: sshr.8h v1, v2, #15
+; CHECK-GI-NEXT: ssra.8h v0, v2, #15
+; CHECK-GI-NEXT: eor.16b v0, v1, v0
+; CHECK-GI-NEXT: ret
+ %l4 = zext <8 x i8> %l0 to <8 x i16>
+ %l5 = ashr <8 x i16> %l2, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+ %l6 = zext <8 x i8> %l1 to <8 x i16>
+ %l7 = sub <8 x i16> %l4, %l6
+ %l8 = add <8 x i16> %l5, %l7
+ %l9 = xor <8 x i16> %l5, %l8
+ ret <8 x i16> %l9
+}
+
+define <8 x i16> @pr88784_fixed(<8 x i8> %l0, <8 x i8> %l1, <8 x i16> %l2) {
+; CHECK-SD-LABEL: pr88784_fixed:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: uabdl.8h v0, v0, v1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: pr88784_fixed:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: usubl.8h v0, v0, v1
+; CHECK-GI-NEXT: sshr.8h v1, v0, #15
+; CHECK-GI-NEXT: ssra.8h v0, v0, #15
+; CHECK-GI-NEXT: eor.16b v0, v1, v0
+; CHECK-GI-NEXT: ret
+ %l4 = zext <8 x i8> %l0 to <8 x i16>
+ %l6 = zext <8 x i8> %l1 to <8 x i16>
+ %l7 = sub <8 x i16> %l4, %l6
+ %l5 = ashr <8 x i16> %l7, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+ %l8 = add <8 x i16> %l5, %l7
+ %l9 = xor <8 x i16> %l5, %l8
+ ret <8 x i16> %l9
+}
+
More information about the llvm-commits
mailing list