[llvm] 1d27f26 - [LoongArch] Add codegen support for multiplication operations

Weining Lu via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 8 02:21:50 PDT 2022


Author: Weining Lu
Date: 2022-07-08T17:15:17+08:00
New Revision: 1d27f26426c7389a260e4cf9344b11035c413b01

URL: https://github.com/llvm/llvm-project/commit/1d27f26426c7389a260e4cf9344b11035c413b01
DIFF: https://github.com/llvm/llvm-project/commit/1d27f26426c7389a260e4cf9344b11035c413b01.diff

LOG: [LoongArch] Add codegen support for multiplication operations

Reference:
https://llvm.org/docs/LangRef.html#mul-instruction

Differential Revision: https://reviews.llvm.org/D128194

Added: 
    llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll

Modified: 
    llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
    llvm/lib/Target/LoongArch/LoongArchInstrInfo.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index a8f95a0e62db..41ccbdfdb86d 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -75,6 +75,10 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
   setOperationAction(ISD::BR_CC, GRLenVT, Expand);
   setOperationAction(ISD::SELECT_CC, GRLenVT, Expand);
   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+  setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, GRLenVT, Expand);
+
+  if (!Subtarget.is64Bit())
+    setLibcallName(RTLIB::MUL_I128, nullptr);
 
   // Compute derived properties from the register classes.
   computeRegisterProperties(STI.getRegisterInfo());

diff  --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index 57130f169b8f..db5e0b59f83c 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -605,6 +605,9 @@ def : PatGprGpr<sdiv, DIV_W>;
 def : PatGprGpr<udiv, DIV_WU>;
 def : PatGprGpr<srem, MOD_W>;
 def : PatGprGpr<urem, MOD_WU>;
+def : PatGprGpr<mul, MUL_W>;
+def : PatGprGpr<mulhs, MULH_W>;
+def : PatGprGpr<mulhu, MULH_WU>;
 } // Predicates = [IsLA32]
 
 let Predicates = [IsLA64] in {
@@ -618,6 +621,20 @@ def : PatGprGpr<sdiv, DIV_D>;
 def : PatGprGpr<udiv, DIV_DU>;
 def : PatGprGpr<srem, MOD_D>;
 def : PatGprGpr<urem, MOD_DU>;
+// TODO: Select "_W[U]" instructions for i32xi32 if only lower 32 bits of the
+// product are used.
+def : PatGprGpr<mul, MUL_D>;
+def : PatGprGpr<mulhs, MULH_D>;
+def : PatGprGpr<mulhu, MULH_DU>;
+// Select MULW_D_W for calculating the full 64 bits product of i32xi32 signed
+// multiplication.
+def : Pat<(i64 (mul (sext_inreg GPR:$rj, i32), (sext_inreg GPR:$rk, i32))),
+          (MULW_D_W GPR:$rj, GPR:$rk)>;
+// Select MULW_D_WU for calculating the full 64 bits product of i32xi32
+// unsigned multiplication.
+def : Pat<(i64 (mul (loongarch_bstrpick GPR:$rj, (i64 31), (i64 0)),
+                    (loongarch_bstrpick GPR:$rk, (i64 31), (i64 0)))),
+          (MULW_D_WU GPR:$rj, GPR:$rk)>;
 } // Predicates = [IsLA64]
 
 def : PatGprGpr<and, AND>;

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll
new file mode 100644
index 000000000000..0d31e790cf72
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll
@@ -0,0 +1,287 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+;; Exercise the 'mul' LLVM IR: https://llvm.org/docs/LangRef.html#mul-instruction
+
+define i1 @mul_i1(i1 %a, i1 %b) {
+; LA32-LABEL: mul_i1:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: mul_i1:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = mul i1 %a, %b
+  ret i1 %r
+}
+
+define i8 @mul_i8(i8 %a, i8 %b) {
+; LA32-LABEL: mul_i8:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: mul_i8:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = mul i8 %a, %b
+  ret i8 %r
+}
+
+define i16 @mul_i16(i16 %a, i16 %b) {
+; LA32-LABEL: mul_i16:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: mul_i16:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = mul i16 %a, %b
+  ret i16 %r
+}
+
+define i32 @mul_i32(i32 %a, i32 %b) {
+; LA32-LABEL: mul_i32:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: mul_i32:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = mul i32 %a, %b
+  ret i32 %r
+}
+
+define i64 @mul_i64(i64 %a, i64 %b) {
+; LA32-LABEL: mul_i64:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    mul.w $a3, $a0, $a3
+; LA32-NEXT:    mulh.wu $a4, $a0, $a2
+; LA32-NEXT:    add.w $a3, $a4, $a3
+; LA32-NEXT:    mul.w $a1, $a1, $a2
+; LA32-NEXT:    add.w $a1, $a3, $a1
+; LA32-NEXT:    mul.w $a0, $a0, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: mul_i64:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = mul i64 %a, %b
+  ret i64 %r
+}
+
+define i64 @mul_pow2(i64 %a) {
+; LA32-LABEL: mul_pow2:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a1, $a1, 3
+; LA32-NEXT:    srli.w $a2, $a0, 29
+; LA32-NEXT:    or $a1, $a1, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: mul_pow2:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = mul i64 %a, 8
+  ret i64 %1
+}
+
+define i64 @mul_p5(i64 %a) {
+; LA32-LABEL: mul_p5:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ori $a2, $zero, 5
+; LA32-NEXT:    mul.w $a1, $a1, $a2
+; LA32-NEXT:    mulh.wu $a3, $a0, $a2
+; LA32-NEXT:    add.w $a1, $a3, $a1
+; LA32-NEXT:    mul.w $a0, $a0, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: mul_p5:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ori $a1, $zero, 5
+; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = mul i64 %a, 5
+  ret i64 %1
+}
+
+define i32 @mulh_w(i32 %a, i32 %b) {
+; LA32-LABEL: mulh_w:
+; LA32:       # %bb.0:
+; LA32-NEXT:    mulh.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: mulh_w:
+; LA64:       # %bb.0:
+; LA64-NEXT:    mulw.d.w $a0, $a0, $a1
+; LA64-NEXT:    srli.d $a0, $a0, 32
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = sext i32 %a to i64
+  %2 = sext i32 %b to i64
+  %3 = mul i64 %1, %2
+  %4 = lshr i64 %3, 32
+  %5 = trunc i64 %4 to i32
+  ret i32 %5
+}
+
+define i32 @mulh_wu(i32 %a, i32 %b) {
+; LA32-LABEL: mulh_wu:
+; LA32:       # %bb.0:
+; LA32-NEXT:    mulh.wu $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: mulh_wu:
+; LA64:       # %bb.0:
+; LA64-NEXT:    mulw.d.wu $a0, $a0, $a1
+; LA64-NEXT:    srli.d $a0, $a0, 32
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = zext i32 %a to i64
+  %2 = zext i32 %b to i64
+  %3 = mul i64 %1, %2
+  %4 = lshr i64 %3, 32
+  %5 = trunc i64 %4 to i32
+  ret i32 %5
+}
+
+define i64 @mulh_d(i64 %a, i64 %b) {
+; LA32-LABEL: mulh_d:
+; LA32:       # %bb.0:
+; LA32-NEXT:    mulh.wu $a4, $a0, $a2
+; LA32-NEXT:    mul.w $a5, $a1, $a2
+; LA32-NEXT:    add.w $a4, $a5, $a4
+; LA32-NEXT:    sltu $a5, $a4, $a5
+; LA32-NEXT:    mulh.wu $a6, $a1, $a2
+; LA32-NEXT:    add.w $a5, $a6, $a5
+; LA32-NEXT:    mul.w $a6, $a0, $a3
+; LA32-NEXT:    add.w $a4, $a6, $a4
+; LA32-NEXT:    sltu $a4, $a4, $a6
+; LA32-NEXT:    mulh.wu $a6, $a0, $a3
+; LA32-NEXT:    add.w $a4, $a6, $a4
+; LA32-NEXT:    add.w $a4, $a5, $a4
+; LA32-NEXT:    sltu $a5, $a4, $a5
+; LA32-NEXT:    mulh.wu $a6, $a1, $a3
+; LA32-NEXT:    add.w $a5, $a6, $a5
+; LA32-NEXT:    mul.w $a6, $a1, $a3
+; LA32-NEXT:    add.w $a4, $a6, $a4
+; LA32-NEXT:    sltu $a6, $a4, $a6
+; LA32-NEXT:    add.w $a5, $a5, $a6
+; LA32-NEXT:    srai.w $a6, $a1, 31
+; LA32-NEXT:    mul.w $a7, $a2, $a6
+; LA32-NEXT:    mulh.wu $a2, $a2, $a6
+; LA32-NEXT:    add.w $a2, $a2, $a7
+; LA32-NEXT:    mul.w $a6, $a3, $a6
+; LA32-NEXT:    add.w $a2, $a2, $a6
+; LA32-NEXT:    srai.w $a3, $a3, 31
+; LA32-NEXT:    mul.w $a1, $a3, $a1
+; LA32-NEXT:    mulh.wu $a6, $a3, $a0
+; LA32-NEXT:    add.w $a1, $a6, $a1
+; LA32-NEXT:    mul.w $a0, $a3, $a0
+; LA32-NEXT:    add.w $a1, $a1, $a0
+; LA32-NEXT:    add.w $a1, $a1, $a2
+; LA32-NEXT:    add.w $a2, $a0, $a7
+; LA32-NEXT:    sltu $a0, $a2, $a0
+; LA32-NEXT:    add.w $a0, $a1, $a0
+; LA32-NEXT:    add.w $a1, $a5, $a0
+; LA32-NEXT:    add.w $a0, $a4, $a2
+; LA32-NEXT:    sltu $a2, $a0, $a4
+; LA32-NEXT:    add.w $a1, $a1, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: mulh_d:
+; LA64:       # %bb.0:
+; LA64-NEXT:    mulh.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = sext i64 %a to i128
+  %2 = sext i64 %b to i128
+  %3 = mul i128 %1, %2
+  %4 = lshr i128 %3, 64
+  %5 = trunc i128 %4 to i64
+  ret i64 %5
+}
+
+define i64 @mulh_du(i64 %a, i64 %b) {
+; LA32-LABEL: mulh_du:
+; LA32:       # %bb.0:
+; LA32-NEXT:    mulh.wu $a4, $a0, $a2
+; LA32-NEXT:    mul.w $a5, $a1, $a2
+; LA32-NEXT:    add.w $a4, $a5, $a4
+; LA32-NEXT:    sltu $a5, $a4, $a5
+; LA32-NEXT:    mulh.wu $a2, $a1, $a2
+; LA32-NEXT:    add.w $a2, $a2, $a5
+; LA32-NEXT:    mul.w $a5, $a0, $a3
+; LA32-NEXT:    add.w $a4, $a5, $a4
+; LA32-NEXT:    sltu $a4, $a4, $a5
+; LA32-NEXT:    mulh.wu $a0, $a0, $a3
+; LA32-NEXT:    add.w $a0, $a0, $a4
+; LA32-NEXT:    mul.w $a4, $a1, $a3
+; LA32-NEXT:    mulh.wu $a1, $a1, $a3
+; LA32-NEXT:    add.w $a0, $a2, $a0
+; LA32-NEXT:    sltu $a2, $a0, $a2
+; LA32-NEXT:    add.w $a1, $a1, $a2
+; LA32-NEXT:    add.w $a0, $a4, $a0
+; LA32-NEXT:    sltu $a2, $a0, $a4
+; LA32-NEXT:    add.w $a1, $a1, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: mulh_du:
+; LA64:       # %bb.0:
+; LA64-NEXT:    mulh.du $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = zext i64 %a to i128
+  %2 = zext i64 %b to i128
+  %3 = mul i128 %1, %2
+  %4 = lshr i128 %3, 64
+  %5 = trunc i128 %4 to i64
+  ret i64 %5
+}
+
+define i64 @mulw_d_w(i32 %a, i32 %b) {
+; LA32-LABEL: mulw_d_w:
+; LA32:       # %bb.0:
+; LA32-NEXT:    mul.w $a2, $a0, $a1
+; LA32-NEXT:    mulh.w $a1, $a0, $a1
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: mulw_d_w:
+; LA64:       # %bb.0:
+; LA64-NEXT:    mulw.d.w $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = sext i32 %a to i64
+  %2 = sext i32 %b to i64
+  %3 = mul i64 %1, %2
+  ret i64 %3
+}
+
+define i64 @mulw_d_wu(i32 %a, i32 %b) {
+; LA32-LABEL: mulw_d_wu:
+; LA32:       # %bb.0:
+; LA32-NEXT:    mul.w $a2, $a0, $a1
+; LA32-NEXT:    mulh.wu $a1, $a0, $a1
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: mulw_d_wu:
+; LA64:       # %bb.0:
+; LA64-NEXT:    mulw.d.wu $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = zext i32 %a to i64
+  %2 = zext i32 %b to i64
+  %3 = mul i64 %1, %2
+  ret i64 %3
+}


        


More information about the llvm-commits mailing list