[llvm] f9e0390 - [AArch64] Select SMULL for zero extended vectors when top bit is zero
Zain Jaffal via llvm-commits
llvm-commits at lists.llvm.org
Wed Dec 7 23:06:34 PST 2022
Author: Zain Jaffal
Date: 2022-12-08T09:06:18+02:00
New Revision: f9e0390751cb5eefbbbc191f851c52422acacab1
URL: https://github.com/llvm/llvm-project/commit/f9e0390751cb5eefbbbc191f851c52422acacab1
DIFF: https://github.com/llvm/llvm-project/commit/f9e0390751cb5eefbbbc191f851c52422acacab1.diff
LOG: [AArch64] Select SMULL for zero extended vectors when top bit is zero
we can safely replace a `zext` instruction with `sext` if the top bit is zero. This is useful because we can select `smull` when both operands are sign extended.
Reviewed By: fhahn, dmgreen
Differential Revision: https://reviews.llvm.org/D134711
Added:
Modified:
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/test/CodeGen/AArch64/aarch64-smull.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 2ad7d8e6763af..ef5c70b6bbaca 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -4528,7 +4528,7 @@ SDValue AArch64TargetLowering::LowerSET_ROUNDING(SDValue Op,
}
static unsigned selectUmullSmull(SDNode *&N0, SDNode *&N1, SelectionDAG &DAG,
- bool &IsMLA) {
+ SDLoc DL, bool &IsMLA) {
bool IsN0SExt = isSignExtended(N0, DAG);
bool IsN1SExt = isSignExtended(N1, DAG);
if (IsN0SExt && IsN1SExt)
@@ -4540,6 +4540,23 @@ static unsigned selectUmullSmull(SDNode *&N0, SDNode *&N1, SelectionDAG &DAG,
if (IsN0ZExt && IsN1ZExt)
return AArch64ISD::UMULL;
+ // Select SMULL if we can replace zext with sext.
+ if ((IsN0SExt && IsN1ZExt) || (IsN0ZExt && IsN1SExt)) {
+ SDValue ZextOperand;
+ if (IsN0ZExt)
+ ZextOperand = N0->getOperand(0);
+ else
+ ZextOperand = N1->getOperand(0);
+ if (DAG.SignBitIsZero(ZextOperand)) {
+ SDNode *NewSext =
+ DAG.getSExtOrTrunc(ZextOperand, DL, N0->getValueType(0)).getNode();
+ if (IsN0ZExt)
+ N0 = NewSext;
+ else
+ N1 = NewSext;
+ return AArch64ISD::SMULL;
+ }
+ }
if (!IsN1SExt && !IsN1ZExt)
return 0;
// Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
@@ -4577,7 +4594,8 @@ SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
SDNode *N0 = Op.getOperand(0).getNode();
SDNode *N1 = Op.getOperand(1).getNode();
bool isMLA = false;
- unsigned NewOpc = selectUmullSmull(N0, N1, DAG, isMLA);
+ SDLoc DL(Op);
+ unsigned NewOpc = selectUmullSmull(N0, N1, DAG, DL, isMLA);
if (!NewOpc) {
if (VT == MVT::v2i64)
@@ -4589,7 +4607,6 @@ SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
}
// Legalize to a S/UMULL instruction
- SDLoc DL(Op);
SDValue Op0;
SDValue Op1 = skipExtensionForVectorMULL(N1, DAG);
if (!isMLA) {
diff --git a/llvm/test/CodeGen/AArch64/aarch64-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
index bdc156f8b05db..59d39458d4c58 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
@@ -50,14 +50,10 @@ define <8 x i32> @smull_zext_v8i8_v8i32(<8 x i8>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: smull_zext_v8i8_v8i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
-; CHECK-NEXT: ldr q1, [x1]
+; CHECK-NEXT: ldr q2, [x1]
; CHECK-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-NEXT: sshll v2.4s, v1.4h, #0
-; CHECK-NEXT: sshll2 v1.4s, v1.8h, #0
-; CHECK-NEXT: ushll2 v3.4s, v0.8h, #0
-; CHECK-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-NEXT: mul v1.4s, v3.4s, v1.4s
-; CHECK-NEXT: mul v0.4s, v0.4s, v2.4s
+; CHECK-NEXT: smull2 v1.4s, v0.8h, v2.8h
+; CHECK-NEXT: smull v0.4s, v0.4h, v2.4h
; CHECK-NEXT: ret
%load.A = load <8 x i8>, <8 x i8>* %A
%load.B = load <8 x i16>, <8 x i16>* %B
@@ -70,15 +66,11 @@ define <8 x i32> @smull_zext_v8i8_v8i32(<8 x i8>* %A, <8 x i16>* %B) nounwind {
define <8 x i32> @smull_zext_v8i8_v8i32_sext_first_operand(<8 x i16>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: smull_zext_v8i8_v8i32_sext_first_operand:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: ldr q0, [x0]
-; CHECK-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-NEXT: sshll v2.4s, v0.4h, #0
-; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0
-; CHECK-NEXT: ushll2 v3.4s, v1.8h, #0
-; CHECK-NEXT: ushll v4.4s, v1.4h, #0
-; CHECK-NEXT: mul v1.4s, v0.4s, v3.4s
-; CHECK-NEXT: mul v0.4s, v2.4s, v4.4s
+; CHECK-NEXT: ldr d0, [x1]
+; CHECK-NEXT: ldr q2, [x0]
+; CHECK-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-NEXT: smull2 v1.4s, v2.8h, v0.8h
+; CHECK-NEXT: smull v0.4s, v2.4h, v0.4h
; CHECK-NEXT: ret
%load.A = load <8 x i16>, <8 x i16>* %A
%load.B = load <8 x i8>, <8 x i8>* %B
@@ -116,9 +108,7 @@ define <4 x i32> @smull_zext_v4i16_v4i32(<4 x i8>* %A, <4 x i16>* %B) nounwind {
; CHECK-NEXT: ldr s0, [x0]
; CHECK-NEXT: ldr d1, [x1]
; CHECK-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-NEXT: sshll v1.4s, v1.4h, #0
-; CHECK-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-NEXT: mul v0.4s, v0.4s, v1.4s
+; CHECK-NEXT: smull v0.4s, v0.4h, v1.4h
; CHECK-NEXT: ret
%load.A = load <4 x i8>, <4 x i8>* %A
%load.B = load <4 x i16>, <4 x i16>* %B
@@ -156,16 +146,7 @@ define <2 x i64> @smull_zext_and_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounw
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: ldr d1, [x1]
; CHECK-NEXT: bic v0.2s, #128, lsl #24
-; CHECK-NEXT: sshll v1.2d, v1.2s, #0
-; CHECK-NEXT: ushll v0.2d, v0.2s, #0
-; CHECK-NEXT: fmov x9, d1
-; CHECK-NEXT: fmov x10, d0
-; CHECK-NEXT: mov x8, v1.d[1]
-; CHECK-NEXT: mov x11, v0.d[1]
-; CHECK-NEXT: smull x9, w10, w9
-; CHECK-NEXT: smull x8, w11, w8
-; CHECK-NEXT: fmov d0, x9
-; CHECK-NEXT: mov v0.d[1], x8
+; CHECK-NEXT: smull v0.2d, v0.2s, v1.2s
; CHECK-NEXT: ret
%load.A = load <2 x i32>, <2 x i32>* %A
%and.A = and <2 x i32> %load.A, <i32 u0x7FFFFFFF, i32 u0x7FFFFFFF>
More information about the llvm-commits
mailing list