[llvm] 54d8627 - [AArch64] Redundant masks in downcast long multiply

Nicholas Guy via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 3 02:12:35 PST 2020


Author: Nicholas Guy
Date: 2020-11-03T10:12:28Z
New Revision: 54d8627852a60e648022392ca316175911025eb9

URL: https://github.com/llvm/llvm-project/commit/54d8627852a60e648022392ca316175911025eb9
DIFF: https://github.com/llvm/llvm-project/commit/54d8627852a60e648022392ca316175911025eb9.diff

LOG: [AArch64] Redundant masks in downcast long multiply

Adds patterns to catch masks preceeding a long multiply,
and generating a single umull/smull instruction instead.

Differential revision: https://reviews.llvm.org/D89956

Added: 
    llvm/test/CodeGen/AArch64/aarch64-mull-masks.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64InstrInfo.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index c69d840c3b03..52608a6249c8 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -1475,8 +1475,16 @@ def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>;
 def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>;
 def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>;
 
+def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext_inreg GPR64:$Rm, i32))),
+          (SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>;
+def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (sext GPR32:$Rm))),
+          (SMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>;
 def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))),
           (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
+def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (and GPR64:$Rm, 0xFFFFFFFF))),
+          (UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), (EXTRACT_SUBREG $Rm, sub_32), XZR)>;
+def : Pat<(i64 (mul (and GPR64:$Rn, 0xFFFFFFFF), (zext GPR32:$Rm))),
+          (UMADDLrrr (EXTRACT_SUBREG $Rn, sub_32), $Rm, XZR)>;
 def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))),
           (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
 

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-mull-masks.ll b/llvm/test/CodeGen/AArch64/aarch64-mull-masks.ll
new file mode 100644
index 000000000000..a10063d9e0c6
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/aarch64-mull-masks.ll
@@ -0,0 +1,78 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-none-linux-gnu < %s -o -| FileCheck %s
+
+define i64 @umull(i64 %x0, i64 %x1) {
+; CHECK-LABEL: umull:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    umull x0, w1, w0
+; CHECK-NEXT:    ret
+entry:
+  %and = and i64 %x0, 4294967295
+  %and1 = and i64 %x1, 4294967295
+  %mul = mul nuw i64 %and1, %and
+  ret i64 %mul
+}
+
+define i64 @umull2(i64 %x, i32 %y) {
+; CHECK-LABEL: umull2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    umull x0, w0, w1
+; CHECK-NEXT:    ret
+entry:
+  %and = and i64 %x, 4294967295
+  %conv = zext i32 %y to i64
+  %mul = mul nuw nsw i64 %and, %conv
+  ret i64 %mul
+}
+
+define i64 @umull2_commuted(i64 %x, i32 %y) {
+; CHECK-LABEL: umull2_commuted:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    umull x0, w0, w1
+; CHECK-NEXT:    ret
+entry:
+  %and = and i64 %x, 4294967295
+  %conv = zext i32 %y to i64
+  %mul = mul nuw nsw i64 %conv, %and
+  ret i64 %mul
+}
+
+define i64 @smull(i64 %x0, i64 %x1) {
+; CHECK-LABEL: smull:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    smull x0, w1, w0
+; CHECK-NEXT:    ret
+entry:
+  %sext = shl i64 %x0, 32
+  %conv1 = ashr exact i64 %sext, 32
+  %sext4 = shl i64 %x1, 32
+  %conv3 = ashr exact i64 %sext4, 32
+  %mul = mul nsw i64 %conv3, %conv1
+  ret i64 %mul
+}
+
+define i64 @smull2(i64 %x, i32 %y) {
+; CHECK-LABEL: smull2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    smull x0, w0, w1
+; CHECK-NEXT:    ret
+entry:
+  %shl = shl i64 %x, 32
+  %shr = ashr exact i64 %shl, 32
+  %conv = sext i32 %y to i64
+  %mul = mul nsw i64 %shr, %conv
+  ret i64 %mul
+}
+
+define i64 @smull2_commuted(i64 %x, i32 %y) {
+; CHECK-LABEL: smull2_commuted:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    smull x0, w0, w1
+; CHECK-NEXT:    ret
+entry:
+  %shl = shl i64 %x, 32
+  %shr = ashr exact i64 %shl, 32
+  %conv = sext i32 %y to i64
+  %mul = mul nsw i64 %conv, %shr
+  ret i64 %mul
+}


        


More information about the llvm-commits mailing list