[llvm] 734c213 - [LoongArch] Optimize multiplication with immediates

Ben Shi via llvm-commits llvm-commits at lists.llvm.org
Sat Apr 1 03:12:08 PDT 2023


Author: Ben Shi
Date: 2023-04-01T18:11:50+08:00
New Revision: 734c213004305bfca04b5906a3c591735f43a020

URL: https://github.com/llvm/llvm-project/commit/734c213004305bfca04b5906a3c591735f43a020
DIFF: https://github.com/llvm/llvm-project/commit/734c213004305bfca04b5906a3c591735f43a020.diff

LOG: [LoongArch] Optimize multiplication with immediates

Optimize multiplication with some specific immediates to
a pair of `alsl`.

Reviewed By: xen0n, SixWeining

Differential Revision: https://reviews.llvm.org/D147305

Added: 
    

Modified: 
    llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
    llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index 343c070e32c64..011e62307eb15 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -859,6 +859,13 @@ class shiftopw<SDPatternOperator operator>
     : PatFrag<(ops node:$val, node:$count),
               (operator node:$val, (i64 (shiftMask32 node:$count)))>;
 
+def mul_const_oneuse : PatFrag<(ops node:$A, node:$B),
+                               (mul node:$A, node:$B), [{
+  if (auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1)))
+    return N1C->hasOneUse();
+  return false;
+}]>;
+
 let Predicates = [IsLA32] in {
 def : PatGprGpr<add, ADD_W>;
 def : PatGprImm<add, ADDI_W, simm12>;
@@ -939,6 +946,31 @@ def : Pat<(sext_inreg (add GPR:$rj, (AddiPair:$im)), i32),
                   (AddiPairImmSmall AddiPair:$im))>;
 } // Predicates = [IsLA64]
 
+let Predicates = [IsLA32] in {
+foreach Idx0 = 1...4 in {
+  foreach Idx1 = 1...4 in {
+    defvar CImm = !add(1, !shl(!add(1, !shl(1, Idx0)), Idx1));
+    def : Pat<(mul_const_oneuse GPR:$r, (i32 CImm)),
+          (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i32 Idx0)),
+                  GPR:$r, (i32 Idx1))>;
+  }
+}
+} // Predicates = [IsLA32]
+
+let Predicates = [IsLA64] in {
+foreach Idx0 = 1...4 in {
+  foreach Idx1 = 1...4 in {
+    defvar CImm = !add(1, !shl(!add(1, !shl(1, Idx0)), Idx1));
+    def : Pat<(sext_inreg (mul_const_oneuse GPR:$r, (i64 CImm)), i32),
+              (ALSL_W (ALSL_W GPR:$r, GPR:$r, (i64 Idx0)),
+                      GPR:$r, (i64 Idx1))>;
+    def : Pat<(mul_const_oneuse GPR:$r, (i64 CImm)),
+              (ALSL_D (ALSL_D GPR:$r, GPR:$r, (i64 Idx0)),
+                      GPR:$r, (i64 Idx1))>;
+  }
+}
+} // Predicates = [IsLA64]
+
 foreach Idx = 1...7 in {
   defvar ShamtA = !mul(8, Idx);
   defvar ShamtB = !mul(8, !sub(8, Idx));

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll
index 8ac4f3f781c6f..7be005f599532 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/mul.ll
@@ -288,15 +288,14 @@ define i64 @mulw_d_wu(i32 %a, i32 %b) {
 define signext i32 @mul_i32_11(i32 %a) {
 ; LA32-LABEL: mul_i32_11:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    ori $a1, $zero, 11
-; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    alsl.w $a1, $a0, $a0, 2
+; LA32-NEXT:    alsl.w $a0, $a1, $a0, 1
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32_11:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 11
-; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    alsl.w $a1, $a0, $a0, 2
+; LA64-NEXT:    alsl.w $a0, $a1, $a0, 1
 ; LA64-NEXT:    ret
   %b = mul i32 %a, 11
   ret i32 %b
@@ -305,15 +304,14 @@ define signext i32 @mul_i32_11(i32 %a) {
 define signext i32 @mul_i32_13(i32 %a) {
 ; LA32-LABEL: mul_i32_13:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    ori $a1, $zero, 13
-; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    alsl.w $a1, $a0, $a0, 1
+; LA32-NEXT:    alsl.w $a0, $a1, $a0, 2
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32_13:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 13
-; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    alsl.w $a1, $a0, $a0, 1
+; LA64-NEXT:    alsl.w $a0, $a1, $a0, 2
 ; LA64-NEXT:    ret
   %b = mul i32 %a, 13
   ret i32 %b
@@ -322,15 +320,14 @@ define signext i32 @mul_i32_13(i32 %a) {
 define signext i32 @mul_i32_19(i32 %a) {
 ; LA32-LABEL: mul_i32_19:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    ori $a1, $zero, 19
-; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    alsl.w $a1, $a0, $a0, 3
+; LA32-NEXT:    alsl.w $a0, $a1, $a0, 1
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32_19:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 19
-; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    alsl.w $a1, $a0, $a0, 3
+; LA64-NEXT:    alsl.w $a0, $a1, $a0, 1
 ; LA64-NEXT:    ret
   %b = mul i32 %a, 19
   ret i32 %b
@@ -339,15 +336,14 @@ define signext i32 @mul_i32_19(i32 %a) {
 define signext i32 @mul_i32_21(i32 %a) {
 ; LA32-LABEL: mul_i32_21:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    ori $a1, $zero, 21
-; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    alsl.w $a1, $a0, $a0, 2
+; LA32-NEXT:    alsl.w $a0, $a1, $a0, 2
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32_21:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 21
-; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    alsl.w $a1, $a0, $a0, 2
+; LA64-NEXT:    alsl.w $a0, $a1, $a0, 2
 ; LA64-NEXT:    ret
   %b = mul i32 %a, 21
   ret i32 %b
@@ -356,15 +352,14 @@ define signext i32 @mul_i32_21(i32 %a) {
 define signext i32 @mul_i32_25(i32 %a) {
 ; LA32-LABEL: mul_i32_25:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    ori $a1, $zero, 25
-; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    alsl.w $a1, $a0, $a0, 1
+; LA32-NEXT:    alsl.w $a0, $a1, $a0, 3
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32_25:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 25
-; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    alsl.w $a1, $a0, $a0, 1
+; LA64-NEXT:    alsl.w $a0, $a1, $a0, 3
 ; LA64-NEXT:    ret
   %b = mul i32 %a, 25
   ret i32 %b
@@ -373,15 +368,14 @@ define signext i32 @mul_i32_25(i32 %a) {
 define signext i32 @mul_i32_35(i32 %a) {
 ; LA32-LABEL: mul_i32_35:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    ori $a1, $zero, 35
-; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    alsl.w $a1, $a0, $a0, 4
+; LA32-NEXT:    alsl.w $a0, $a1, $a0, 1
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32_35:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 35
-; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    alsl.w $a1, $a0, $a0, 4
+; LA64-NEXT:    alsl.w $a0, $a1, $a0, 1
 ; LA64-NEXT:    ret
   %b = mul i32 %a, 35
   ret i32 %b
@@ -390,15 +384,14 @@ define signext i32 @mul_i32_35(i32 %a) {
 define signext i32 @mul_i32_37(i32 %a) {
 ; LA32-LABEL: mul_i32_37:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    ori $a1, $zero, 37
-; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    alsl.w $a1, $a0, $a0, 3
+; LA32-NEXT:    alsl.w $a0, $a1, $a0, 2
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32_37:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 37
-; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    alsl.w $a1, $a0, $a0, 3
+; LA64-NEXT:    alsl.w $a0, $a1, $a0, 2
 ; LA64-NEXT:    ret
   %b = mul i32 %a, 37
   ret i32 %b
@@ -407,15 +400,14 @@ define signext i32 @mul_i32_37(i32 %a) {
 define signext i32 @mul_i32_41(i32 %a) {
 ; LA32-LABEL: mul_i32_41:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    ori $a1, $zero, 41
-; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    alsl.w $a1, $a0, $a0, 2
+; LA32-NEXT:    alsl.w $a0, $a1, $a0, 3
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32_41:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 41
-; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    alsl.w $a1, $a0, $a0, 2
+; LA64-NEXT:    alsl.w $a0, $a1, $a0, 3
 ; LA64-NEXT:    ret
   %b = mul i32 %a, 41
   ret i32 %b
@@ -424,15 +416,14 @@ define signext i32 @mul_i32_41(i32 %a) {
 define signext i32 @mul_i32_49(i32 %a) {
 ; LA32-LABEL: mul_i32_49:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    ori $a1, $zero, 49
-; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    alsl.w $a1, $a0, $a0, 1
+; LA32-NEXT:    alsl.w $a0, $a1, $a0, 4
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32_49:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 49
-; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    alsl.w $a1, $a0, $a0, 1
+; LA64-NEXT:    alsl.w $a0, $a1, $a0, 4
 ; LA64-NEXT:    ret
   %b = mul i32 %a, 49
   ret i32 %b
@@ -441,15 +432,14 @@ define signext i32 @mul_i32_49(i32 %a) {
 define signext i32 @mul_i32_69(i32 %a) {
 ; LA32-LABEL: mul_i32_69:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    ori $a1, $zero, 69
-; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    alsl.w $a1, $a0, $a0, 4
+; LA32-NEXT:    alsl.w $a0, $a1, $a0, 2
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32_69:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 69
-; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    alsl.w $a1, $a0, $a0, 4
+; LA64-NEXT:    alsl.w $a0, $a1, $a0, 2
 ; LA64-NEXT:    ret
   %b = mul i32 %a, 69
   ret i32 %b
@@ -458,15 +448,14 @@ define signext i32 @mul_i32_69(i32 %a) {
 define signext i32 @mul_i32_73(i32 %a) {
 ; LA32-LABEL: mul_i32_73:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    ori $a1, $zero, 73
-; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    alsl.w $a1, $a0, $a0, 3
+; LA32-NEXT:    alsl.w $a0, $a1, $a0, 3
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32_73:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 73
-; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    alsl.w $a1, $a0, $a0, 3
+; LA64-NEXT:    alsl.w $a0, $a1, $a0, 3
 ; LA64-NEXT:    ret
   %b = mul i32 %a, 73
   ret i32 %b
@@ -475,15 +464,15 @@ define signext i32 @mul_i32_73(i32 %a) {
 define signext i32 @mul_i32_81(i32 %a) {
 ; LA32-LABEL: mul_i32_81:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    ori $a1, $zero, 81
-; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    alsl.w $a1, $a0, $a0, 2
+; LA32-NEXT:    alsl.w $a0, $a1, $a0, 4
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32_81:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 81
-; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    alsl.w $a1, $a0, $a0, 2
+; LA64-NEXT:    alsl.w $a0, $a1, $a0, 4
+; LA64-NEXT:    ret
   %b = mul i32 %a, 81
   ret i32 %b
 }
@@ -491,15 +480,14 @@ define signext i32 @mul_i32_81(i32 %a) {
 define signext i32 @mul_i32_137(i32 %a) {
 ; LA32-LABEL: mul_i32_137:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    ori $a1, $zero, 137
-; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    alsl.w $a1, $a0, $a0, 4
+; LA32-NEXT:    alsl.w $a0, $a1, $a0, 3
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32_137:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 137
-; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    alsl.w $a1, $a0, $a0, 4
+; LA64-NEXT:    alsl.w $a0, $a1, $a0, 3
 ; LA64-NEXT:    ret
   %b = mul i32 %a, 137
   ret i32 %b
@@ -508,15 +496,14 @@ define signext i32 @mul_i32_137(i32 %a) {
 define signext i32 @mul_i32_145(i32 %a) {
 ; LA32-LABEL: mul_i32_145:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    ori $a1, $zero, 145
-; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    alsl.w $a1, $a0, $a0, 3
+; LA32-NEXT:    alsl.w $a0, $a1, $a0, 4
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32_145:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 145
-; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    alsl.w $a1, $a0, $a0, 3
+; LA64-NEXT:    alsl.w $a0, $a1, $a0, 4
 ; LA64-NEXT:    ret
   %b = mul i32 %a, 145
   ret i32 %b
@@ -525,15 +512,14 @@ define signext i32 @mul_i32_145(i32 %a) {
 define signext i32 @mul_i32_273(i32 %a) {
 ; LA32-LABEL: mul_i32_273:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    ori $a1, $zero, 273
-; LA32-NEXT:    mul.w $a0, $a0, $a1
+; LA32-NEXT:    alsl.w $a1, $a0, $a0, 4
+; LA32-NEXT:    alsl.w $a0, $a1, $a0, 4
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: mul_i32_273:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 273
-; LA64-NEXT:    mul.d $a0, $a0, $a1
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    alsl.w $a1, $a0, $a0, 4
+; LA64-NEXT:    alsl.w $a0, $a1, $a0, 4
 ; LA64-NEXT:    ret
   %b = mul i32 %a, 273
   ret i32 %b
@@ -551,8 +537,8 @@ define i64 @mul_i64_11(i64 %a) {
 ;
 ; LA64-LABEL: mul_i64_11:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 11
-; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    alsl.d $a1, $a0, $a0, 2
+; LA64-NEXT:    alsl.d $a0, $a1, $a0, 1
 ; LA64-NEXT:    ret
   %b = mul i64 %a, 11
   ret i64 %b
@@ -570,8 +556,8 @@ define i64 @mul_i64_13(i64 %a) {
 ;
 ; LA64-LABEL: mul_i64_13:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 13
-; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    alsl.d $a1, $a0, $a0, 1
+; LA64-NEXT:    alsl.d $a0, $a1, $a0, 2
 ; LA64-NEXT:    ret
   %b = mul i64 %a, 13
   ret i64 %b
@@ -589,8 +575,8 @@ define i64 @mul_i64_19(i64 %a) {
 ;
 ; LA64-LABEL: mul_i64_19:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 19
-; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    alsl.d $a1, $a0, $a0, 3
+; LA64-NEXT:    alsl.d $a0, $a1, $a0, 1
 ; LA64-NEXT:    ret
   %b = mul i64 %a, 19
   ret i64 %b
@@ -608,8 +594,8 @@ define i64 @mul_i64_21(i64 %a) {
 ;
 ; LA64-LABEL: mul_i64_21:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 21
-; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    alsl.d $a1, $a0, $a0, 2
+; LA64-NEXT:    alsl.d $a0, $a1, $a0, 2
 ; LA64-NEXT:    ret
   %b = mul i64 %a, 21
   ret i64 %b
@@ -627,8 +613,8 @@ define i64 @mul_i64_25(i64 %a) {
 ;
 ; LA64-LABEL: mul_i64_25:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 25
-; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    alsl.d $a1, $a0, $a0, 1
+; LA64-NEXT:    alsl.d $a0, $a1, $a0, 3
 ; LA64-NEXT:    ret
   %b = mul i64 %a, 25
   ret i64 %b
@@ -646,8 +632,8 @@ define i64 @mul_i64_35(i64 %a) {
 ;
 ; LA64-LABEL: mul_i64_35:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 35
-; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    alsl.d $a1, $a0, $a0, 4
+; LA64-NEXT:    alsl.d $a0, $a1, $a0, 1
 ; LA64-NEXT:    ret
   %b = mul i64 %a, 35
   ret i64 %b
@@ -665,8 +651,8 @@ define i64 @mul_i64_37(i64 %a) {
 ;
 ; LA64-LABEL: mul_i64_37:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 37
-; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    alsl.d $a1, $a0, $a0, 3
+; LA64-NEXT:    alsl.d $a0, $a1, $a0, 2
 ; LA64-NEXT:    ret
   %b = mul i64 %a, 37
   ret i64 %b
@@ -684,8 +670,8 @@ define i64 @mul_i64_41(i64 %a) {
 ;
 ; LA64-LABEL: mul_i64_41:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 41
-; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    alsl.d $a1, $a0, $a0, 2
+; LA64-NEXT:    alsl.d $a0, $a1, $a0, 3
 ; LA64-NEXT:    ret
   %b = mul i64 %a, 41
   ret i64 %b
@@ -703,8 +689,8 @@ define i64 @mul_i64_49(i64 %a) {
 ;
 ; LA64-LABEL: mul_i64_49:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 49
-; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    alsl.d $a1, $a0, $a0, 1
+; LA64-NEXT:    alsl.d $a0, $a1, $a0, 4
 ; LA64-NEXT:    ret
   %b = mul i64 %a, 49
   ret i64 %b
@@ -722,8 +708,8 @@ define i64 @mul_i64_69(i64 %a) {
 ;
 ; LA64-LABEL: mul_i64_69:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 69
-; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    alsl.d $a1, $a0, $a0, 4
+; LA64-NEXT:    alsl.d $a0, $a1, $a0, 2
 ; LA64-NEXT:    ret
   %b = mul i64 %a, 69
   ret i64 %b
@@ -741,8 +727,8 @@ define i64 @mul_i64_73(i64 %a) {
 ;
 ; LA64-LABEL: mul_i64_73:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 73
-; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    alsl.d $a1, $a0, $a0, 3
+; LA64-NEXT:    alsl.d $a0, $a1, $a0, 3
 ; LA64-NEXT:    ret
   %b = mul i64 %a, 73
   ret i64 %b
@@ -760,8 +746,9 @@ define i64 @mul_i64_81(i64 %a) {
 ;
 ; LA64-LABEL: mul_i64_81:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 81
-; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    alsl.d $a1, $a0, $a0, 2
+; LA64-NEXT:    alsl.d $a0, $a1, $a0, 4
+; LA64-NEXT:    ret
   %b = mul i64 %a, 81
   ret i64 %b
 }
@@ -778,8 +765,8 @@ define i64 @mul_i64_137(i64 %a) {
 ;
 ; LA64-LABEL: mul_i64_137:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 137
-; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    alsl.d $a1, $a0, $a0, 4
+; LA64-NEXT:    alsl.d $a0, $a1, $a0, 3
 ; LA64-NEXT:    ret
   %b = mul i64 %a, 137
   ret i64 %b
@@ -797,8 +784,8 @@ define i64 @mul_i64_145(i64 %a) {
 ;
 ; LA64-LABEL: mul_i64_145:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 145
-; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    alsl.d $a1, $a0, $a0, 3
+; LA64-NEXT:    alsl.d $a0, $a1, $a0, 4
 ; LA64-NEXT:    ret
   %b = mul i64 %a, 145
   ret i64 %b
@@ -816,8 +803,8 @@ define i64 @mul_i64_273(i64 %a) {
 ;
 ; LA64-LABEL: mul_i64_273:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    ori $a1, $zero, 273
-; LA64-NEXT:    mul.d $a0, $a0, $a1
+; LA64-NEXT:    alsl.d $a1, $a0, $a0, 4
+; LA64-NEXT:    alsl.d $a0, $a1, $a0, 4
 ; LA64-NEXT:    ret
   %b = mul i64 %a, 273
   ret i64 %b


        


More information about the llvm-commits mailing list