[llvm] b8c8bb0 - [DAG] Fold neg(splat(neg(x)) -> splat(x)
David Green via llvm-commits
llvm-commits at lists.llvm.org
Fri Jun 25 11:53:41 PDT 2021
Author: David Green
Date: 2021-06-25T19:53:29+01:00
New Revision: b8c8bb07692cfb9a78049fd2fb5c46a91ee2e90f
URL: https://github.com/llvm/llvm-project/commit/b8c8bb07692cfb9a78049fd2fb5c46a91ee2e90f
DIFF: https://github.com/llvm/llvm-project/commit/b8c8bb07692cfb9a78049fd2fb5c46a91ee2e90f.diff
LOG: [DAG] Fold neg(splat(neg(x)) -> splat(x)
This add as a fold of sub(0, splat(sub(0, x))) -> splat(x). This can
come up in the lowering of right shifts under AArch64, where we generate
a shift left of a negated number.
Differential Revision: https://reviews.llvm.org/D103755
Added:
Modified:
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/test/CodeGen/AArch64/neon-shift-neg.ll
llvm/test/CodeGen/AArch64/sub-splat-sub.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 02f3ea22fa42f..63c979c4dbd73 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -3308,6 +3308,17 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
!TLI.isOperationLegalOrCustom(ISD::ABS, VT) &&
TLI.expandABS(N1.getNode(), Result, DAG, true))
return Result;
+
+ // Fold neg(splat(neg(x)) -> splat(x)
+ if (VT.isVector()) {
+ SDValue N1S = DAG.getSplatValue(N1, true);
+ if (N1S && N1S.getOpcode() == ISD::SUB &&
+ isNullConstant(N1S.getOperand(0))) {
+ if (VT.isScalableVector())
+ return DAG.getSplatVector(VT, DL, N1S.getOperand(1));
+ return DAG.getSplatBuildVector(VT, DL, N1S.getOperand(1));
+ }
+ }
}
// Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1)
diff --git a/llvm/test/CodeGen/AArch64/neon-shift-neg.ll b/llvm/test/CodeGen/AArch64/neon-shift-neg.ll
index ecfa4308ffc86..9ea0e96e2f77c 100644
--- a/llvm/test/CodeGen/AArch64/neon-shift-neg.ll
+++ b/llvm/test/CodeGen/AArch64/neon-shift-neg.ll
@@ -4,9 +4,7 @@
define <2 x i64> @shr64x2(<2 x i64> %a, i64 %b) {
; CHECK-LABEL: shr64x2:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg x8, x0
-; CHECK-NEXT: dup v1.2d, x8
-; CHECK-NEXT: neg v1.2d, v1.2d
+; CHECK-NEXT: dup v1.2d, x0
; CHECK-NEXT: sshl v0.2d, v0.2d, v1.2d
; CHECK-NEXT: ret
entry:
@@ -20,9 +18,7 @@ entry:
define <4 x i32> @shr32x4(<4 x i32> %a, i32 %b) {
; CHECK-LABEL: shr32x4:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg w8, w0
-; CHECK-NEXT: dup v1.4s, w8
-; CHECK-NEXT: neg v1.4s, v1.4s
+; CHECK-NEXT: dup v1.4s, w0
; CHECK-NEXT: sshl v0.4s, v0.4s, v1.4s
; CHECK-NEXT: ret
entry:
@@ -36,9 +32,7 @@ entry:
define <4 x i32> @shr32x4undef(<4 x i32> %a, i32 %b) {
; CHECK-LABEL: shr32x4undef:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg w8, w0
-; CHECK-NEXT: dup v1.4s, w8
-; CHECK-NEXT: neg v1.4s, v1.4s
+; CHECK-NEXT: dup v1.4s, w0
; CHECK-NEXT: sshl v0.4s, v0.4s, v1.4s
; CHECK-NEXT: ret
entry:
@@ -52,9 +46,7 @@ entry:
define <8 x i16> @shr16x8(<8 x i16> %a, i16 %b) {
; CHECK-LABEL: shr16x8:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg w8, w0
-; CHECK-NEXT: dup v1.8h, w8
-; CHECK-NEXT: neg v1.8h, v1.8h
+; CHECK-NEXT: dup v1.8h, w0
; CHECK-NEXT: sshl v0.8h, v0.8h, v1.8h
; CHECK-NEXT: ret
entry:
@@ -68,9 +60,7 @@ entry:
define <16 x i8> @shr8x16(<16 x i8> %a, i8 %b) {
; CHECK-LABEL: shr8x16:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg w8, w0
-; CHECK-NEXT: dup v1.16b, w8
-; CHECK-NEXT: neg v1.16b, v1.16b
+; CHECK-NEXT: dup v1.16b, w0
; CHECK-NEXT: sshl v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ret
entry:
@@ -84,9 +74,7 @@ entry:
define <1 x i64> @shr64x1(<1 x i64> %a, i64 %b) {
; CHECK-LABEL: shr64x1:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg x8, x0
-; CHECK-NEXT: fmov d1, x8
-; CHECK-NEXT: neg d1, d1
+; CHECK-NEXT: fmov d1, x0
; CHECK-NEXT: sshl d0, d0, d1
; CHECK-NEXT: ret
entry:
@@ -99,9 +87,7 @@ entry:
define <2 x i32> @shr32x2(<2 x i32> %a, i32 %b) {
; CHECK-LABEL: shr32x2:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg w8, w0
-; CHECK-NEXT: dup v1.2s, w8
-; CHECK-NEXT: neg v1.2s, v1.2s
+; CHECK-NEXT: dup v1.2s, w0
; CHECK-NEXT: sshl v0.2s, v0.2s, v1.2s
; CHECK-NEXT: ret
entry:
@@ -115,9 +101,7 @@ entry:
define <4 x i16> @shr16x4(<4 x i16> %a, i16 %b) {
; CHECK-LABEL: shr16x4:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg w8, w0
-; CHECK-NEXT: dup v1.4h, w8
-; CHECK-NEXT: neg v1.4h, v1.4h
+; CHECK-NEXT: dup v1.4h, w0
; CHECK-NEXT: sshl v0.4h, v0.4h, v1.4h
; CHECK-NEXT: ret
entry:
@@ -131,9 +115,7 @@ entry:
define <8 x i8> @shr8x8(<8 x i8> %a, i8 %b) {
; CHECK-LABEL: shr8x8:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg w8, w0
-; CHECK-NEXT: dup v1.8b, w8
-; CHECK-NEXT: neg v1.8b, v1.8b
+; CHECK-NEXT: dup v1.8b, w0
; CHECK-NEXT: sshl v0.8b, v0.8b, v1.8b
; CHECK-NEXT: ret
entry:
@@ -147,9 +129,7 @@ entry:
define <2 x i64> @lshr64x2(<2 x i64> %a, i64 %b) {
; CHECK-LABEL: lshr64x2:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg x8, x0
-; CHECK-NEXT: dup v1.2d, x8
-; CHECK-NEXT: neg v1.2d, v1.2d
+; CHECK-NEXT: dup v1.2d, x0
; CHECK-NEXT: ushl v0.2d, v0.2d, v1.2d
; CHECK-NEXT: ret
entry:
@@ -163,9 +143,7 @@ entry:
define <4 x i32> @lshr32x4(<4 x i32> %a, i32 %b) {
; CHECK-LABEL: lshr32x4:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg w8, w0
-; CHECK-NEXT: dup v1.4s, w8
-; CHECK-NEXT: neg v1.4s, v1.4s
+; CHECK-NEXT: dup v1.4s, w0
; CHECK-NEXT: ushl v0.4s, v0.4s, v1.4s
; CHECK-NEXT: ret
entry:
@@ -179,9 +157,7 @@ entry:
define <4 x i32> @lshr32x4undef(<4 x i32> %a, i32 %b) {
; CHECK-LABEL: lshr32x4undef:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg w8, w0
-; CHECK-NEXT: dup v1.4s, w8
-; CHECK-NEXT: neg v1.4s, v1.4s
+; CHECK-NEXT: dup v1.4s, w0
; CHECK-NEXT: ushl v0.4s, v0.4s, v1.4s
; CHECK-NEXT: ret
entry:
@@ -195,9 +171,7 @@ entry:
define <8 x i16> @lshr16x8(<8 x i16> %a, i16 %b) {
; CHECK-LABEL: lshr16x8:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg w8, w0
-; CHECK-NEXT: dup v1.8h, w8
-; CHECK-NEXT: neg v1.8h, v1.8h
+; CHECK-NEXT: dup v1.8h, w0
; CHECK-NEXT: ushl v0.8h, v0.8h, v1.8h
; CHECK-NEXT: ret
entry:
@@ -211,9 +185,7 @@ entry:
define <16 x i8> @lshr8x16(<16 x i8> %a, i8 %b) {
; CHECK-LABEL: lshr8x16:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg w8, w0
-; CHECK-NEXT: dup v1.16b, w8
-; CHECK-NEXT: neg v1.16b, v1.16b
+; CHECK-NEXT: dup v1.16b, w0
; CHECK-NEXT: ushl v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ret
entry:
@@ -227,9 +199,7 @@ entry:
define <1 x i64> @lshr64x1(<1 x i64> %a, i64 %b) {
; CHECK-LABEL: lshr64x1:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg x8, x0
-; CHECK-NEXT: fmov d1, x8
-; CHECK-NEXT: neg d1, d1
+; CHECK-NEXT: fmov d1, x0
; CHECK-NEXT: ushl d0, d0, d1
; CHECK-NEXT: ret
entry:
@@ -242,9 +212,7 @@ entry:
define <2 x i32> @lshr32x2(<2 x i32> %a, i32 %b) {
; CHECK-LABEL: lshr32x2:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg w8, w0
-; CHECK-NEXT: dup v1.2s, w8
-; CHECK-NEXT: neg v1.2s, v1.2s
+; CHECK-NEXT: dup v1.2s, w0
; CHECK-NEXT: ushl v0.2s, v0.2s, v1.2s
; CHECK-NEXT: ret
entry:
@@ -258,9 +226,7 @@ entry:
define <4 x i16> @lshr16x4(<4 x i16> %a, i16 %b) {
; CHECK-LABEL: lshr16x4:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg w8, w0
-; CHECK-NEXT: dup v1.4h, w8
-; CHECK-NEXT: neg v1.4h, v1.4h
+; CHECK-NEXT: dup v1.4h, w0
; CHECK-NEXT: ushl v0.4h, v0.4h, v1.4h
; CHECK-NEXT: ret
entry:
@@ -274,9 +240,7 @@ entry:
define <8 x i8> @lshr8x8(<8 x i8> %a, i8 %b) {
; CHECK-LABEL: lshr8x8:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg w8, w0
-; CHECK-NEXT: dup v1.8b, w8
-; CHECK-NEXT: neg v1.8b, v1.8b
+; CHECK-NEXT: dup v1.8b, w0
; CHECK-NEXT: ushl v0.8b, v0.8b, v1.8b
; CHECK-NEXT: ret
entry:
@@ -603,9 +567,7 @@ entry:
define <vscale x 16 x i8> @subsub(<vscale x 16 x i8> %a, i8 %b) {
; CHECK-LABEL: subsub:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg w8, w0
-; CHECK-NEXT: mov z0.b, w8
-; CHECK-NEXT: subr z0.b, z0.b, #0 // =0x0
+; CHECK-NEXT: mov z0.b, w0
; CHECK-NEXT: ret
entry:
%sub = sub i8 0, %b
diff --git a/llvm/test/CodeGen/AArch64/sub-splat-sub.ll b/llvm/test/CodeGen/AArch64/sub-splat-sub.ll
index 8a5c6166a3f88..54022591bb771 100644
--- a/llvm/test/CodeGen/AArch64/sub-splat-sub.ll
+++ b/llvm/test/CodeGen/AArch64/sub-splat-sub.ll
@@ -4,9 +4,7 @@
define <16 x i8> @subsubii8(<16 x i8> %a, i8 %b) {
; CHECK-LABEL: subsubii8:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg w8, w0
-; CHECK-NEXT: dup v0.16b, w8
-; CHECK-NEXT: neg v0.16b, v0.16b
+; CHECK-NEXT: dup v0.16b, w0
; CHECK-NEXT: ret
entry:
%sub = sub i8 0, %b
@@ -19,9 +17,7 @@ entry:
define <vscale x 16 x i8> @subsubni8(<vscale x 16 x i8> %a, i8 %b) {
; CHECK-LABEL: subsubni8:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: neg w8, w0
-; CHECK-NEXT: mov z0.b, w8
-; CHECK-NEXT: subr z0.b, z0.b, #0 // =0x0
+; CHECK-NEXT: mov z0.b, w0
; CHECK-NEXT: ret
entry:
%sub = sub i8 0, %b
More information about the llvm-commits
mailing list