[llvm] 7abf7dd - [AArch64] Add combine for add(udot(0, x, y), z) -> udot(z, x, y).
David Green via llvm-commits
llvm-commits at lists.llvm.org
Mon Mar 1 04:54:10 PST 2021
Author: David Green
Date: 2021-03-01T12:53:34Z
New Revision: 7abf7dd5efe257b5e7ff72199aa513e7a513b742
URL: https://github.com/llvm/llvm-project/commit/7abf7dd5efe257b5e7ff72199aa513e7a513b742
DIFF: https://github.com/llvm/llvm-project/commit/7abf7dd5efe257b5e7ff72199aa513e7a513b742.diff
LOG: [AArch64] Add combine for add(udot(0, x, y), z) -> udot(z, x, y).
Given a zero input for a udot, an add can be folded in to take the place
of the input, using thte addition that the instruction naturally
performs.
Differential Revision: https://reviews.llvm.org/D97188
Added:
Modified:
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/test/CodeGen/AArch64/neon-dot-product.ll
llvm/test/CodeGen/AArch64/neon-dotreduce.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 2e8752f609a5..3cc809eb255c 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -13217,6 +13217,29 @@ static SDValue performUADDVCombine(SDNode *N, SelectionDAG &DAG) {
DAG.getConstant(0, DL, MVT::i64));
}
+// ADD(UDOT(zero, x, y), A) --> UDOT(A, x, y)
+static SDValue performAddDotCombine(SDNode *N, SelectionDAG &DAG) {
+ EVT VT = N->getValueType(0);
+ if (N->getOpcode() != ISD::ADD)
+ return SDValue();
+
+ SDValue Dot = N->getOperand(0);
+ SDValue A = N->getOperand(1);
+ // Handle commutivity
+ auto isZeroDot = [](SDValue Dot) {
+ return (Dot.getOpcode() == AArch64ISD::UDOT ||
+ Dot.getOpcode() == AArch64ISD::SDOT) &&
+ ISD::isBuildVectorAllZeros(Dot.getOperand(0).getNode());
+ };
+ if (!isZeroDot(Dot))
+ std::swap(Dot, A);
+ if (!isZeroDot(Dot))
+ return SDValue();
+
+ return DAG.getNode(Dot.getOpcode(), SDLoc(N), VT, A, Dot.getOperand(1),
+ Dot.getOperand(2));
+}
+
// The basic add/sub long vector instructions have variants with "2" on the end
// which act on the high-half of their inputs. They are normally matched by
// patterns like:
@@ -13276,6 +13299,8 @@ static SDValue performAddSubCombine(SDNode *N,
// Try to change sum of two reductions.
if (SDValue Val = performUADDVCombine(N, DAG))
return Val;
+ if (SDValue Val = performAddDotCombine(N, DAG))
+ return Val;
return performAddSubLongCombine(N, DCI, DAG);
}
diff --git a/llvm/test/CodeGen/AArch64/neon-dot-product.ll b/llvm/test/CodeGen/AArch64/neon-dot-product.ll
index 1d82f93b32d1..6c76d4e90e2d 100644
--- a/llvm/test/CodeGen/AArch64/neon-dot-product.ll
+++ b/llvm/test/CodeGen/AArch64/neon-dot-product.ll
@@ -55,9 +55,7 @@ entry:
define <2 x i32> @test_vdot_u32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #0 {
; CHECK-LABEL: test_vdot_u32_zero:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: movi v3.2d, #0000000000000000
-; CHECK-NEXT: udot v3.2s, v1.8b, v2.8b
-; CHECK-NEXT: add v0.2s, v3.2s, v0.2s
+; CHECK-NEXT: udot v0.2s, v1.8b, v2.8b
; CHECK-NEXT: ret
entry:
%vdot1.i = call <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32> zeroinitializer, <8 x i8> %b, <8 x i8> %c) #2
@@ -68,9 +66,7 @@ entry:
define <4 x i32> @test_vdotq_u32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #0 {
; CHECK-LABEL: test_vdotq_u32_zero:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: movi v3.2d, #0000000000000000
-; CHECK-NEXT: udot v3.4s, v1.16b, v2.16b
-; CHECK-NEXT: add v0.4s, v3.4s, v0.4s
+; CHECK-NEXT: udot v0.4s, v1.16b, v2.16b
; CHECK-NEXT: ret
entry:
%vdot1.i = call <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32> zeroinitializer, <16 x i8> %b, <16 x i8> %c) #2
@@ -81,9 +77,7 @@ entry:
define <2 x i32> @test_vdot_s32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #0 {
; CHECK-LABEL: test_vdot_s32_zero:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: movi v3.2d, #0000000000000000
-; CHECK-NEXT: sdot v3.2s, v1.8b, v2.8b
-; CHECK-NEXT: add v0.2s, v3.2s, v0.2s
+; CHECK-NEXT: sdot v0.2s, v1.8b, v2.8b
; CHECK-NEXT: ret
entry:
%vdot1.i = call <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32> zeroinitializer, <8 x i8> %b, <8 x i8> %c) #2
@@ -94,9 +88,7 @@ entry:
define <4 x i32> @test_vdotq_s32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #0 {
; CHECK-LABEL: test_vdotq_s32_zero:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: movi v3.2d, #0000000000000000
-; CHECK-NEXT: sdot v3.4s, v1.16b, v2.16b
-; CHECK-NEXT: add v0.4s, v3.4s, v0.4s
+; CHECK-NEXT: sdot v0.4s, v1.16b, v2.16b
; CHECK-NEXT: ret
entry:
%vdot1.i = call <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32> zeroinitializer, <16 x i8> %b, <16 x i8> %c) #2
@@ -161,6 +153,11 @@ entry:
define <2 x i32> @test_vdot_lane_u32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK-LABEL: test_vdot_lane_u32_zero:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT: udot v0.2s, v1.8b, v2.4b[1]
+; CHECK-NEXT: ret
entry:
%.cast = bitcast <8 x i8> %c to <2 x i32>
%shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
@@ -171,6 +168,11 @@ entry:
}
define <4 x i32> @test_vdotq_lane_u32_zero(<4 x i32> %a, <16 x i8> %b, <8 x i8> %c) {
+; CHECK-LABEL: test_vdotq_lane_u32_zero:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT: udot v0.4s, v1.16b, v2.4b[1]
+; CHECK-NEXT: ret
entry:
%.cast = bitcast <8 x i8> %c to <2 x i32>
%shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -183,9 +185,7 @@ entry:
define <2 x i32> @test_vdot_laneq_u32_zero(<2 x i32> %a, <8 x i8> %b, <16 x i8> %c) {
; CHECK-LABEL: test_vdot_laneq_u32_zero:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: movi v3.2d, #0000000000000000
-; CHECK-NEXT: udot v3.2s, v1.8b, v2.4b[1]
-; CHECK-NEXT: add v0.2s, v3.2s, v0.2s
+; CHECK-NEXT: udot v0.2s, v1.8b, v2.4b[1]
; CHECK-NEXT: ret
entry:
%.cast = bitcast <16 x i8> %c to <4 x i32>
@@ -199,9 +199,7 @@ entry:
define <4 x i32> @test_vdotq_laneq_u32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) {
; CHECK-LABEL: test_vdotq_laneq_u32_zero:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: movi v3.2d, #0000000000000000
-; CHECK-NEXT: udot v3.4s, v1.16b, v2.4b[1]
-; CHECK-NEXT: add v0.4s, v3.4s, v0.4s
+; CHECK-NEXT: udot v0.4s, v1.16b, v2.4b[1]
; CHECK-NEXT: ret
entry:
%.cast = bitcast <16 x i8> %c to <4 x i32>
@@ -269,6 +267,11 @@ entry:
define <2 x i32> @test_vdot_lane_s32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK-LABEL: test_vdot_lane_s32_zero:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT: sdot v0.2s, v1.8b, v2.4b[1]
+; CHECK-NEXT: ret
entry:
%.cast = bitcast <8 x i8> %c to <2 x i32>
%shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
@@ -279,6 +282,11 @@ entry:
}
define <4 x i32> @test_vdotq_lane_s32_zero(<4 x i32> %a, <16 x i8> %b, <8 x i8> %c) {
+; CHECK-LABEL: test_vdotq_lane_s32_zero:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT: sdot v0.4s, v1.16b, v2.4b[1]
+; CHECK-NEXT: ret
entry:
%.cast = bitcast <8 x i8> %c to <2 x i32>
%shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -291,9 +299,7 @@ entry:
define <2 x i32> @test_vdot_laneq_s32_zero(<2 x i32> %a, <8 x i8> %b, <16 x i8> %c) {
; CHECK-LABEL: test_vdot_laneq_s32_zero:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: movi v3.2d, #0000000000000000
-; CHECK-NEXT: sdot v3.2s, v1.8b, v2.4b[1]
-; CHECK-NEXT: add v0.2s, v3.2s, v0.2s
+; CHECK-NEXT: sdot v0.2s, v1.8b, v2.4b[1]
; CHECK-NEXT: ret
entry:
%.cast = bitcast <16 x i8> %c to <4 x i32>
@@ -307,9 +313,7 @@ entry:
define <4 x i32> @test_vdotq_laneq_s32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) {
; CHECK-LABEL: test_vdotq_laneq_s32_zero:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: movi v3.2d, #0000000000000000
-; CHECK-NEXT: sdot v3.4s, v1.16b, v2.4b[1]
-; CHECK-NEXT: add v0.4s, v3.4s, v0.4s
+; CHECK-NEXT: sdot v0.4s, v1.16b, v2.4b[1]
; CHECK-NEXT: ret
entry:
%.cast = bitcast <16 x i8> %c to <4 x i32>
diff --git a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
index 2b399f23376b..55bfd060c360 100644
--- a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
+++ b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
@@ -273,11 +273,9 @@ define i32 @test_udot_v16i8_double_nomla(<16 x i8> %a, <16 x i8> %b, <16 x i8> %
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: movi v1.16b, #1
; CHECK-NEXT: movi v3.2d, #0000000000000000
-; CHECK-NEXT: movi v4.2d, #0000000000000000
-; CHECK-NEXT: udot v4.4s, v1.16b, v0.16b
; CHECK-NEXT: udot v3.4s, v1.16b, v2.16b
-; CHECK-NEXT: add v0.4s, v4.4s, v3.4s
-; CHECK-NEXT: addv s0, v0.4s
+; CHECK-NEXT: udot v3.4s, v1.16b, v0.16b
+; CHECK-NEXT: addv s0, v3.4s
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
@@ -390,11 +388,9 @@ define i32 @test_sdot_v16i8_double_nomla(<16 x i8> %a, <16 x i8> %b, <16 x i8> %
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: movi v1.16b, #1
; CHECK-NEXT: movi v3.2d, #0000000000000000
-; CHECK-NEXT: movi v4.2d, #0000000000000000
-; CHECK-NEXT: sdot v4.4s, v1.16b, v0.16b
; CHECK-NEXT: sdot v3.4s, v1.16b, v2.16b
-; CHECK-NEXT: add v0.4s, v4.4s, v3.4s
-; CHECK-NEXT: addv s0, v0.4s
+; CHECK-NEXT: sdot v3.4s, v1.16b, v0.16b
+; CHECK-NEXT: addv s0, v3.4s
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
entry:
More information about the llvm-commits
mailing list