[llvm] cb82de2 - [RISCV] Optimize multiplication by constant
Fangrui Song via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 7 18:50:39 PDT 2020
Author: Ben Shi
Date: 2020-07-07T18:50:24-07:00
New Revision: cb82de29601745d6c4beaf51ee1dbd1bf7acc186
URL: https://github.com/llvm/llvm-project/commit/cb82de29601745d6c4beaf51ee1dbd1bf7acc186
DIFF: https://github.com/llvm/llvm-project/commit/cb82de29601745d6c4beaf51ee1dbd1bf7acc186.diff
LOG: [RISCV] Optimize multiplication by constant
... to shift/add or shift/sub.
Do not enable it on riscv32 with the M extension where decomposeMulByConstant
may not be an optimization.
Reviewed By: luismarques, MaskRay
Differential Revision: https://reviews.llvm.org/D82660
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/lib/Target/RISCV/RISCVISelLowering.h
llvm/test/CodeGen/RISCV/mul.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index b2e51516b983..91fc69b5bc10 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -33,6 +33,7 @@
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -2978,6 +2979,26 @@ bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
return true;
}
+bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
+ SDValue C) const {
+ // Check integral scalar types.
+ if (VT.isScalarInteger()) {
+ // Do not perform the transformation on riscv32 with the M extension.
+ if (!Subtarget.is64Bit() && Subtarget.hasStdExtM())
+ return false;
+ if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
+ if (ConstNode->getAPIntValue().getBitWidth() > 8 * sizeof(int64_t))
+ return false;
+ int64_t Imm = ConstNode->getSExtValue();
+ if (isPowerOf2_64(Imm + 1) || isPowerOf2_64(Imm - 1) ||
+ isPowerOf2_64(1 - Imm) || isPowerOf2_64(-1 - Imm))
+ return true;
+ }
+ }
+
+ return false;
+}
+
#define GET_REGISTER_MATCHER
#include "RISCVGenAsmMatcher.inc"
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 691bb6d75d13..e420e879efc9 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -185,6 +185,9 @@ class RISCVTargetLowering : public TargetLowering {
bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
bool shouldConsiderGEPOffsetSplit() const override { return true; }
+ bool decomposeMulByConstant(LLVMContext &Context, EVT VT,
+ SDValue C) const override;
+
TargetLowering::AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
Value *emitMaskedAtomicRMWIntrinsic(IRBuilder<> &Builder, AtomicRMWInst *AI,
diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll
index 5808660b5713..89c4bce122fd 100644
--- a/llvm/test/CodeGen/RISCV/mul.ll
+++ b/llvm/test/CodeGen/RISCV/mul.ll
@@ -79,12 +79,8 @@ define signext i32 @mul(i32 %a, i32 %b) nounwind {
define signext i32 @mul_constant(i32 %a) nounwind {
; RV32I-LABEL: mul_constant:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp)
-; RV32I-NEXT: addi a1, zero, 5
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: lw ra, 12(sp)
-; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: slli a1, a0, 2
+; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: ret
;
; RV32IM-LABEL: mul_constant:
@@ -95,19 +91,14 @@ define signext i32 @mul_constant(i32 %a) nounwind {
;
; RV64I-LABEL: mul_constant:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp)
-; RV64I-NEXT: addi a1, zero, 5
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: ld ra, 8(sp)
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 2
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: ret
;
; RV64IM-LABEL: mul_constant:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: addi a1, zero, 5
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 2
+; RV64IM-NEXT: addw a0, a1, a0
; RV64IM-NEXT: ret
%1 = mul i32 %a, 5
ret i32 %1
@@ -177,13 +168,15 @@ define i64 @mul64(i64 %a, i64 %b) nounwind {
define i64 @mul64_constant(i64 %a) nounwind {
; RV32I-LABEL: mul64_constant:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp)
-; RV32I-NEXT: addi a2, zero, 5
-; RV32I-NEXT: mv a3, zero
-; RV32I-NEXT: call __muldi3
-; RV32I-NEXT: lw ra, 12(sp)
-; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: slli a3, a0, 2
+; RV32I-NEXT: add a2, a3, a0
+; RV32I-NEXT: sltu a3, a2, a3
+; RV32I-NEXT: srli a0, a0, 30
+; RV32I-NEXT: slli a4, a1, 2
+; RV32I-NEXT: or a0, a4, a0
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: add a1, a0, a3
+; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: ret
;
; RV32IM-LABEL: mul64_constant:
@@ -197,18 +190,14 @@ define i64 @mul64_constant(i64 %a) nounwind {
;
; RV64I-LABEL: mul64_constant:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp)
-; RV64I-NEXT: addi a1, zero, 5
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp)
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 2
+; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: ret
;
; RV64IM-LABEL: mul64_constant:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: addi a1, zero, 5
-; RV64IM-NEXT: mul a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 2
+; RV64IM-NEXT: add a0, a1, a0
; RV64IM-NEXT: ret
%1 = mul i64 %a, 5
ret i64 %1
@@ -305,12 +294,8 @@ define zeroext i32 @mulhu(i32 zeroext %a, i32 zeroext %b) nounwind {
define i32 @muli32_p65(i32 %a) nounwind {
; RV32I-LABEL: muli32_p65:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp)
-; RV32I-NEXT: addi a1, zero, 65
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: lw ra, 12(sp)
-; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: slli a1, a0, 6
+; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: ret
;
; RV32IM-LABEL: muli32_p65:
@@ -321,18 +306,14 @@ define i32 @muli32_p65(i32 %a) nounwind {
;
; RV64I-LABEL: muli32_p65:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp)
-; RV64I-NEXT: addi a1, zero, 65
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp)
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 6
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_p65:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: addi a1, zero, 65
-; RV64IM-NEXT: mul a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 6
+; RV64IM-NEXT: addw a0, a1, a0
; RV64IM-NEXT: ret
%1 = mul i32 %a, 65
ret i32 %1
@@ -341,12 +322,8 @@ define i32 @muli32_p65(i32 %a) nounwind {
define i32 @muli32_p63(i32 %a) nounwind {
; RV32I-LABEL: muli32_p63:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp)
-; RV32I-NEXT: addi a1, zero, 63
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: lw ra, 12(sp)
-; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: slli a1, a0, 6
+; RV32I-NEXT: sub a0, a1, a0
; RV32I-NEXT: ret
;
; RV32IM-LABEL: muli32_p63:
@@ -357,18 +334,14 @@ define i32 @muli32_p63(i32 %a) nounwind {
;
; RV64I-LABEL: muli32_p63:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp)
-; RV64I-NEXT: addi a1, zero, 63
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp)
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 6
+; RV64I-NEXT: subw a0, a1, a0
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_p63:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: addi a1, zero, 63
-; RV64IM-NEXT: mul a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 6
+; RV64IM-NEXT: subw a0, a1, a0
; RV64IM-NEXT: ret
%1 = mul i32 %a, 63
ret i32 %1
@@ -377,13 +350,15 @@ define i32 @muli32_p63(i32 %a) nounwind {
define i64 @muli64_p65(i64 %a) nounwind {
; RV32I-LABEL: muli64_p65:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp)
-; RV32I-NEXT: addi a2, zero, 65
-; RV32I-NEXT: mv a3, zero
-; RV32I-NEXT: call __muldi3
-; RV32I-NEXT: lw ra, 12(sp)
-; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: slli a3, a0, 6
+; RV32I-NEXT: add a2, a3, a0
+; RV32I-NEXT: sltu a3, a2, a3
+; RV32I-NEXT: srli a0, a0, 26
+; RV32I-NEXT: slli a4, a1, 6
+; RV32I-NEXT: or a0, a4, a0
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: add a1, a0, a3
+; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: ret
;
; RV32IM-LABEL: muli64_p65:
@@ -397,18 +372,14 @@ define i64 @muli64_p65(i64 %a) nounwind {
;
; RV64I-LABEL: muli64_p65:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp)
-; RV64I-NEXT: addi a1, zero, 65
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp)
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 6
+; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli64_p65:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: addi a1, zero, 65
-; RV64IM-NEXT: mul a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 6
+; RV64IM-NEXT: add a0, a1, a0
; RV64IM-NEXT: ret
%1 = mul i64 %a, 65
ret i64 %1
@@ -417,13 +388,14 @@ define i64 @muli64_p65(i64 %a) nounwind {
define i64 @muli64_p63(i64 %a) nounwind {
; RV32I-LABEL: muli64_p63:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp)
-; RV32I-NEXT: addi a2, zero, 63
-; RV32I-NEXT: mv a3, zero
-; RV32I-NEXT: call __muldi3
-; RV32I-NEXT: lw ra, 12(sp)
-; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: slli a2, a0, 6
+; RV32I-NEXT: sltu a3, a2, a0
+; RV32I-NEXT: srli a4, a0, 26
+; RV32I-NEXT: slli a5, a1, 6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: sub a1, a4, a1
+; RV32I-NEXT: sub a1, a1, a3
+; RV32I-NEXT: sub a0, a2, a0
; RV32I-NEXT: ret
;
; RV32IM-LABEL: muli64_p63:
@@ -437,18 +409,14 @@ define i64 @muli64_p63(i64 %a) nounwind {
;
; RV64I-LABEL: muli64_p63:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp)
-; RV64I-NEXT: addi a1, zero, 63
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp)
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 6
+; RV64I-NEXT: sub a0, a1, a0
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli64_p63:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: addi a1, zero, 63
-; RV64IM-NEXT: mul a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 6
+; RV64IM-NEXT: sub a0, a1, a0
; RV64IM-NEXT: ret
%1 = mul i64 %a, 63
ret i64 %1
@@ -457,12 +425,8 @@ define i64 @muli64_p63(i64 %a) nounwind {
define i32 @muli32_m63(i32 %a) nounwind {
; RV32I-LABEL: muli32_m63:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp)
-; RV32I-NEXT: addi a1, zero, -63
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: lw ra, 12(sp)
-; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: slli a1, a0, 6
+; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV32IM-LABEL: muli32_m63:
@@ -473,18 +437,14 @@ define i32 @muli32_m63(i32 %a) nounwind {
;
; RV64I-LABEL: muli32_m63:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp)
-; RV64I-NEXT: addi a1, zero, -63
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp)
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 6
+; RV64I-NEXT: subw a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_m63:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: addi a1, zero, -63
-; RV64IM-NEXT: mul a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 6
+; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, -63
ret i32 %1
@@ -493,12 +453,9 @@ define i32 @muli32_m63(i32 %a) nounwind {
define i32 @muli32_m65(i32 %a) nounwind {
; RV32I-LABEL: muli32_m65:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp)
-; RV32I-NEXT: addi a1, zero, -65
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: lw ra, 12(sp)
-; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: slli a1, a0, 6
+; RV32I-NEXT: add a0, a1, a0
+; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: ret
;
; RV32IM-LABEL: muli32_m65:
@@ -509,18 +466,16 @@ define i32 @muli32_m65(i32 %a) nounwind {
;
; RV64I-LABEL: muli32_m65:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp)
-; RV64I-NEXT: addi a1, zero, -65
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp)
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 6
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: negw a0, a0
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_m65:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: addi a1, zero, -65
-; RV64IM-NEXT: mul a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 6
+; RV64IM-NEXT: add a0, a1, a0
+; RV64IM-NEXT: negw a0, a0
; RV64IM-NEXT: ret
%1 = mul i32 %a, -65
ret i32 %1
@@ -529,13 +484,14 @@ define i32 @muli32_m65(i32 %a) nounwind {
define i64 @muli64_m63(i64 %a) nounwind {
; RV32I-LABEL: muli64_m63:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp)
-; RV32I-NEXT: addi a2, zero, -63
-; RV32I-NEXT: addi a3, zero, -1
-; RV32I-NEXT: call __muldi3
-; RV32I-NEXT: lw ra, 12(sp)
-; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: slli a2, a0, 6
+; RV32I-NEXT: sltu a3, a0, a2
+; RV32I-NEXT: srli a4, a0, 26
+; RV32I-NEXT: slli a5, a1, 6
+; RV32I-NEXT: or a4, a5, a4
+; RV32I-NEXT: sub a1, a1, a4
+; RV32I-NEXT: sub a1, a1, a3
+; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: ret
;
; RV32IM-LABEL: muli64_m63:
@@ -550,18 +506,14 @@ define i64 @muli64_m63(i64 %a) nounwind {
;
; RV64I-LABEL: muli64_m63:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp)
-; RV64I-NEXT: addi a1, zero, -63
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp)
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 6
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli64_m63:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: addi a1, zero, -63
-; RV64IM-NEXT: mul a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 6
+; RV64IM-NEXT: sub a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i64 %a, -63
ret i64 %1
@@ -570,13 +522,18 @@ define i64 @muli64_m63(i64 %a) nounwind {
define i64 @muli64_m65(i64 %a) nounwind {
; RV32I-LABEL: muli64_m65:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp)
-; RV32I-NEXT: addi a2, zero, -65
-; RV32I-NEXT: addi a3, zero, -1
-; RV32I-NEXT: call __muldi3
-; RV32I-NEXT: lw ra, 12(sp)
-; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: slli a2, a0, 6
+; RV32I-NEXT: add a3, a2, a0
+; RV32I-NEXT: sltu a2, a3, a2
+; RV32I-NEXT: srli a0, a0, 26
+; RV32I-NEXT: slli a4, a1, 6
+; RV32I-NEXT: or a0, a4, a0
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: snez a1, a3
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: neg a1, a0
+; RV32I-NEXT: neg a0, a3
; RV32I-NEXT: ret
;
; RV32IM-LABEL: muli64_m65:
@@ -591,18 +548,16 @@ define i64 @muli64_m65(i64 %a) nounwind {
;
; RV64I-LABEL: muli64_m65:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp)
-; RV64I-NEXT: addi a1, zero, -65
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp)
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 6
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli64_m65:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: addi a1, zero, -65
-; RV64IM-NEXT: mul a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 6
+; RV64IM-NEXT: add a0, a1, a0
+; RV64IM-NEXT: neg a0, a0
; RV64IM-NEXT: ret
%1 = mul i64 %a, -65
ret i64 %1
More information about the llvm-commits
mailing list