[llvm] c07a01c - Revert "[AArch64] Select SMULL for zero extended vectors when top bit is zero"
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Thu Dec 8 02:29:18 PST 2022
Author: Florian Hahn
Date: 2022-12-08T10:28:33Z
New Revision: c07a01c2bb0d1f95689f809fd5be23829e364393
URL: https://github.com/llvm/llvm-project/commit/c07a01c2bb0d1f95689f809fd5be23829e364393
DIFF: https://github.com/llvm/llvm-project/commit/c07a01c2bb0d1f95689f809fd5be23829e364393.diff
LOG: Revert "[AArch64] Select SMULL for zero extended vectors when top bit is zero"
This reverts commit f9e0390751cb5eefbbbc191f851c52422acacab1.
The patch causes a crash for the IR below:
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
target triple = "arm64-apple-macosx"
define void @test(ptr %data, <8 x i16> %v) {
entry:
%0 = sext <8 x i16> %v to <8 x i32>
%1 = mul <8 x i32> %0, <i32 35584, i32 35584, i32 35584, i32 35584, i32 35584, i32 35584, i32 35584, i32 35584>
%2 = lshr <8 x i32> %1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%3 = trunc <8 x i32> %2 to <8 x i16>
store <8 x i16> %3, ptr %data, align 2
ret void
}
Added:
Modified:
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/test/CodeGen/AArch64/aarch64-smull.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index ef5c70b6bbac..2ad7d8e6763a 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -4528,7 +4528,7 @@ SDValue AArch64TargetLowering::LowerSET_ROUNDING(SDValue Op,
}
static unsigned selectUmullSmull(SDNode *&N0, SDNode *&N1, SelectionDAG &DAG,
- SDLoc DL, bool &IsMLA) {
+ bool &IsMLA) {
bool IsN0SExt = isSignExtended(N0, DAG);
bool IsN1SExt = isSignExtended(N1, DAG);
if (IsN0SExt && IsN1SExt)
@@ -4540,23 +4540,6 @@ static unsigned selectUmullSmull(SDNode *&N0, SDNode *&N1, SelectionDAG &DAG,
if (IsN0ZExt && IsN1ZExt)
return AArch64ISD::UMULL;
- // Select SMULL if we can replace zext with sext.
- if ((IsN0SExt && IsN1ZExt) || (IsN0ZExt && IsN1SExt)) {
- SDValue ZextOperand;
- if (IsN0ZExt)
- ZextOperand = N0->getOperand(0);
- else
- ZextOperand = N1->getOperand(0);
- if (DAG.SignBitIsZero(ZextOperand)) {
- SDNode *NewSext =
- DAG.getSExtOrTrunc(ZextOperand, DL, N0->getValueType(0)).getNode();
- if (IsN0ZExt)
- N0 = NewSext;
- else
- N1 = NewSext;
- return AArch64ISD::SMULL;
- }
- }
if (!IsN1SExt && !IsN1ZExt)
return 0;
// Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
@@ -4594,8 +4577,7 @@ SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
SDNode *N0 = Op.getOperand(0).getNode();
SDNode *N1 = Op.getOperand(1).getNode();
bool isMLA = false;
- SDLoc DL(Op);
- unsigned NewOpc = selectUmullSmull(N0, N1, DAG, DL, isMLA);
+ unsigned NewOpc = selectUmullSmull(N0, N1, DAG, isMLA);
if (!NewOpc) {
if (VT == MVT::v2i64)
@@ -4607,6 +4589,7 @@ SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
}
// Legalize to a S/UMULL instruction
+ SDLoc DL(Op);
SDValue Op0;
SDValue Op1 = skipExtensionForVectorMULL(N1, DAG);
if (!isMLA) {
diff --git a/llvm/test/CodeGen/AArch64/aarch64-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
index 59d39458d4c5..bdc156f8b05d 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
@@ -50,10 +50,14 @@ define <8 x i32> @smull_zext_v8i8_v8i32(<8 x i8>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: smull_zext_v8i8_v8i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr d0, [x0]
-; CHECK-NEXT: ldr q2, [x1]
+; CHECK-NEXT: ldr q1, [x1]
; CHECK-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-NEXT: smull2 v1.4s, v0.8h, v2.8h
-; CHECK-NEXT: smull v0.4s, v0.4h, v2.4h
+; CHECK-NEXT: sshll v2.4s, v1.4h, #0
+; CHECK-NEXT: sshll2 v1.4s, v1.8h, #0
+; CHECK-NEXT: ushll2 v3.4s, v0.8h, #0
+; CHECK-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-NEXT: mul v1.4s, v3.4s, v1.4s
+; CHECK-NEXT: mul v0.4s, v0.4s, v2.4s
; CHECK-NEXT: ret
%load.A = load <8 x i8>, <8 x i8>* %A
%load.B = load <8 x i16>, <8 x i16>* %B
@@ -66,11 +70,15 @@ define <8 x i32> @smull_zext_v8i8_v8i32(<8 x i8>* %A, <8 x i16>* %B) nounwind {
define <8 x i32> @smull_zext_v8i8_v8i32_sext_first_operand(<8 x i16>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: smull_zext_v8i8_v8i32_sext_first_operand:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldr d0, [x1]
-; CHECK-NEXT: ldr q2, [x0]
-; CHECK-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-NEXT: smull2 v1.4s, v2.8h, v0.8h
-; CHECK-NEXT: smull v0.4s, v2.4h, v0.4h
+; CHECK-NEXT: ldr d1, [x1]
+; CHECK-NEXT: ldr q0, [x0]
+; CHECK-NEXT: ushll v1.8h, v1.8b, #0
+; CHECK-NEXT: sshll v2.4s, v0.4h, #0
+; CHECK-NEXT: sshll2 v0.4s, v0.8h, #0
+; CHECK-NEXT: ushll2 v3.4s, v1.8h, #0
+; CHECK-NEXT: ushll v4.4s, v1.4h, #0
+; CHECK-NEXT: mul v1.4s, v0.4s, v3.4s
+; CHECK-NEXT: mul v0.4s, v2.4s, v4.4s
; CHECK-NEXT: ret
%load.A = load <8 x i16>, <8 x i16>* %A
%load.B = load <8 x i8>, <8 x i8>* %B
@@ -108,7 +116,9 @@ define <4 x i32> @smull_zext_v4i16_v4i32(<4 x i8>* %A, <4 x i16>* %B) nounwind {
; CHECK-NEXT: ldr s0, [x0]
; CHECK-NEXT: ldr d1, [x1]
; CHECK-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-NEXT: smull v0.4s, v0.4h, v1.4h
+; CHECK-NEXT: sshll v1.4s, v1.4h, #0
+; CHECK-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-NEXT: mul v0.4s, v0.4s, v1.4s
; CHECK-NEXT: ret
%load.A = load <4 x i8>, <4 x i8>* %A
%load.B = load <4 x i16>, <4 x i16>* %B
@@ -146,7 +156,16 @@ define <2 x i64> @smull_zext_and_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounw
; CHECK-NEXT: ldr d0, [x0]
; CHECK-NEXT: ldr d1, [x1]
; CHECK-NEXT: bic v0.2s, #128, lsl #24
-; CHECK-NEXT: smull v0.2d, v0.2s, v1.2s
+; CHECK-NEXT: sshll v1.2d, v1.2s, #0
+; CHECK-NEXT: ushll v0.2d, v0.2s, #0
+; CHECK-NEXT: fmov x9, d1
+; CHECK-NEXT: fmov x10, d0
+; CHECK-NEXT: mov x8, v1.d[1]
+; CHECK-NEXT: mov x11, v0.d[1]
+; CHECK-NEXT: smull x9, w10, w9
+; CHECK-NEXT: smull x8, w11, w8
+; CHECK-NEXT: fmov d0, x9
+; CHECK-NEXT: mov v0.d[1], x8
; CHECK-NEXT: ret
%load.A = load <2 x i32>, <2 x i32>* %A
%and.A = and <2 x i32> %load.A, <i32 u0x7FFFFFFF, i32 u0x7FFFFFFF>
More information about the llvm-commits
mailing list