[llvm] ebae917 - Recommit "[AArch64] Select SMULL for zero extended vectors when top bit is zero"

Zain Jaffal via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 12 06:46:43 PST 2022


Author: Zain Jaffal
Date: 2022-12-12T14:45:54Z
New Revision: ebae917294e680132fe1a3bc586a332dd47de8d0

URL: https://github.com/llvm/llvm-project/commit/ebae917294e680132fe1a3bc586a332dd47de8d0
DIFF: https://github.com/llvm/llvm-project/commit/ebae917294e680132fe1a3bc586a332dd47de8d0.diff

LOG: Recommit "[AArch64] Select SMULL for zero extended vectors when top bit is zero"

This is a recommit of f9e0390751cb5eefbbbc191f851c52422acacab1
The previous commit failed to handle cases where the zero extended operand is an extended `BUILD_VECTOR`.
We don't replace zext with a sext operand to select smull if any operand is `BUILD_VECTOR`

Original commit message:

we can safely replace a `zext` instruction with `sext` if the top bit is zero. This is useful because we can select `smull` when both operands are sign extended.

Reviewed By: fhahn, dmgreen

Differential Revision: https://reviews.llvm.org/D134711

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/aarch64-smull.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index ac55c284b919..af4b98e75a80 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -4539,7 +4539,9 @@ static unsigned selectUmullSmull(SDNode *&N0, SDNode *&N1, SelectionDAG &DAG,
     return AArch64ISD::UMULL;
 
   // Select SMULL if we can replace zext with sext.
-  if ((IsN0SExt && IsN1ZExt) || (IsN0ZExt && IsN1SExt)) {
+  if (((IsN0SExt && IsN1ZExt) || (IsN0ZExt && IsN1SExt)) &&
+      !isExtendedBUILD_VECTOR(N0, DAG, false) &&
+      !isExtendedBUILD_VECTOR(N1, DAG, false)) {
     SDValue ZextOperand;
     if (IsN0ZExt)
       ZextOperand = N0->getOperand(0);

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
index 59d39458d4c5..9ebbe18dc1dd 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
@@ -753,6 +753,26 @@ define i16 @smullWithInconsistentExtensions(<8 x i8> %x, <8 x i8> %y) {
   ret i16 %r
 }
 
+define <8 x i16> @smull_extended_vector_operand(<8 x i16> %v) {
+; CHECK-LABEL: smull_extended_vector_operand:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v1.4s, #139, lsl #8
+; CHECK-NEXT:    sshll v2.4s, v0.4h, #0
+; CHECK-NEXT:    sshll2 v0.4s, v0.8h, #0
+; CHECK-NEXT:    mul v2.4s, v2.4s, v1.4s
+; CHECK-NEXT:    mul v1.4s, v0.4s, v1.4s
+; CHECK-NEXT:    shrn v0.4h, v2.4s, #1
+; CHECK-NEXT:    shrn2 v0.8h, v1.4s, #1
+; CHECK-NEXT:    ret
+entry:
+%0 = sext <8 x i16> %v to <8 x i32>
+%1 = mul <8 x i32> %0, <i32 35584, i32 35584, i32 35584, i32 35584, i32 35584, i32 35584, i32 35584, i32 35584>
+%2 = lshr <8 x i32> %1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+%3 = trunc <8 x i32> %2 to <8 x i16>
+ret <8 x i16> %3
+
+}
+
 define void @distribute(<8 x i16>* %dst, <16 x i8>* %src, i32 %mul) nounwind {
 ; CHECK-LABEL: distribute:
 ; CHECK:       // %bb.0: // %entry


        


More information about the llvm-commits mailing list