[llvm] 89e53b2 - [LoongArch] Optimize multiplication with immediates

Ben Shi via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 11 21:00:16 PDT 2023


Author: Ben Shi
Date: 2023-04-12T11:52:56+08:00
New Revision: 89e53b2aed0928354b42515fba1f2f585137184b

URL: https://github.com/llvm/llvm-project/commit/89e53b2aed0928354b42515fba1f2f585137184b
DIFF: https://github.com/llvm/llvm-project/commit/89e53b2aed0928354b42515fba1f2f585137184b.diff

LOG: [LoongArch] Optimize multiplication with immediates

Optimize (mul x, imm) to (ADD (SLLI x, s0), (SLLI x, s1)) or
(SUB (SLLI x, s0), (SLLI x, s1)) if possible.

Reviewed By: SixWeining

Differential Revision: https://reviews.llvm.org/D147692

Added: 
    

Modified: 
    llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
    llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 7f62a12e2b9d4..901ec39d05a04 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -3165,6 +3165,27 @@ bool LoongArchTargetLowering::decomposeMulByConstant(LLVMContext &Context,
         ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
          (Imm - 8).isPowerOf2() || (Imm - 16).isPowerOf2()))
       return true;
+    // Break (MUL x, imm) into (ADD (SLLI x, s0), (SLLI x, s1)),
+    // in which the immediate has two set bits. Or Break (MUL x, imm)
+    // into (SUB (SLLI x, s0), (SLLI x, s1)), in which the immediate
+    // equals to (1 << s0) - (1 << s1).
+    if (ConstNode->hasOneUse() && !(Imm.sge(-2048) && Imm.sle(4095))) {
+      unsigned Shifts = Imm.countr_zero();
+      // Reject immediates which can be composed via a single LUI.
+      if (Shifts >= 12)
+        return false;
+      // Reject multiplications can be optimized to
+      // (SLLI (ALSL x, x, 1/2/3/4), s).
+      APInt ImmPop = Imm.ashr(Shifts);
+      if (ImmPop == 3 || ImmPop == 5 || ImmPop == 9 || ImmPop == 17)
+        return false;
+      // We do not consider the case `(-Imm - ImmSmall).isPowerOf2()`,
+      // since it needs one more instruction than other 3 cases.
+      APInt ImmSmall = APInt(Imm.getBitWidth(), 1 << Shifts, true);
+      if ((Imm - ImmSmall).isPowerOf2() || (Imm + ImmSmall).isPowerOf2() ||
+          (ImmSmall - Imm).isPowerOf2())
+        return true;
+    }
   }
 
   return false;

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll
index 988ead244bc55..53a3529219fef 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll
@@ -1308,17 +1308,16 @@ define i64 @mul_i64_4352(i64 %a) {
 define signext i32 @mul_i32_65792(i32 %a) {
 ; LA32-LABEL: mul_i32_65792:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    lu12i.w $a1, 16
-; LA32-NEXT:    ori $a1, $a1, 256
-; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    slli.w $a1, $a0, 8
+; LA32-NEXT:    slli.w $a0, $a0, 16
+; LA32-NEXT:    add.w $a0, $a0, $a1
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32_65792:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    lu12i.w $a1, 16
-; LA64-NEXT:    ori $a1, $a1, 256
-; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    slli.d $a1, $a0, 8
+; LA64-NEXT:    slli.d $a0, $a0, 16
+; LA64-NEXT:    add.w $a0, $a0, $a1
 ; LA64-NEXT:    ret
   %b = mul i32 %a, 65792
   ret i32 %b
@@ -1327,17 +1326,16 @@ define signext i32 @mul_i32_65792(i32 %a) {
 define signext i32 @mul_i32_65280(i32 %a) {
 ; LA32-LABEL: mul_i32_65280:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    lu12i.w $a1, 15
-; LA32-NEXT:    ori $a1, $a1, 3840
-; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    slli.w $a1, $a0, 8
+; LA32-NEXT:    slli.w $a0, $a0, 16
+; LA32-NEXT:    sub.w $a0, $a0, $a1
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32_65280:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    lu12i.w $a1, 15
-; LA64-NEXT:    ori $a1, $a1, 3840
-; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    slli.d $a1, $a0, 8
+; LA64-NEXT:    slli.d $a0, $a0, 16
+; LA64-NEXT:    sub.w $a0, $a0, $a1
 ; LA64-NEXT:    ret
   %b = mul i32 %a, 65280
   ret i32 %b
@@ -1346,17 +1344,16 @@ define signext i32 @mul_i32_65280(i32 %a) {
 define signext i32 @mul_i32_minus_65280(i32 %a) {
 ; LA32-LABEL: mul_i32_minus_65280:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    lu12i.w $a1, -16
-; LA32-NEXT:    ori $a1, $a1, 256
-; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    slli.w $a1, $a0, 16
+; LA32-NEXT:    slli.w $a0, $a0, 8
+; LA32-NEXT:    sub.w $a0, $a0, $a1
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32_minus_65280:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    lu12i.w $a1, -16
-; LA64-NEXT:    ori $a1, $a1, 256
-; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    slli.d $a1, $a0, 16
+; LA64-NEXT:    slli.d $a0, $a0, 8
+; LA64-NEXT:    sub.w $a0, $a0, $a1
 ; LA64-NEXT:    ret
   %b = mul i32 %a, -65280
   ret i32 %b
@@ -1375,9 +1372,9 @@ define i64 @mul_i64_65792(i64 %a) {
 ;
 ; LA64-LABEL: mul_i64_65792:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    lu12i.w $a1, 16
-; LA64-NEXT:    ori $a1, $a1, 256
-; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    slli.d $a1, $a0, 8
+; LA64-NEXT:    slli.d $a0, $a0, 16
+; LA64-NEXT:    add.d $a0, $a0, $a1
 ; LA64-NEXT:    ret
   %b = mul i64 %a, 65792
   ret i64 %b
@@ -1396,9 +1393,9 @@ define i64 @mul_i64_65280(i64 %a) {
 ;
 ; LA64-LABEL: mul_i64_65280:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    lu12i.w $a1, 15
-; LA64-NEXT:    ori $a1, $a1, 3840
-; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    slli.d $a1, $a0, 8
+; LA64-NEXT:    slli.d $a0, $a0, 16
+; LA64-NEXT:    sub.d $a0, $a0, $a1
 ; LA64-NEXT:    ret
   %b = mul i64 %a, 65280
   ret i64 %b
@@ -1418,9 +1415,9 @@ define i64 @mul_i64_minus_65280(i64 %a) {
 ;
 ; LA64-LABEL: mul_i64_minus_65280:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    lu12i.w $a1, -16
-; LA64-NEXT:    ori $a1, $a1, 256
-; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    slli.d $a1, $a0, 16
+; LA64-NEXT:    slli.d $a0, $a0, 8
+; LA64-NEXT:    sub.d $a0, $a0, $a1
 ; LA64-NEXT:    ret
   %b = mul i64 %a, -65280
   ret i64 %b


        


More information about the llvm-commits mailing list