[llvm] [RISCV] Porting hasAllNBitUsers to RISCV GISel for instruction select (PR #125795)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 4 18:11:11 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Luke Quinn (lquinn2015)
<details>
<summary>Changes</summary>
Ported hasAllNBitUsers to RISCV GISel side. Add GISelPredicate code to each of the 16,32, and 64 bit words. It allows for generation of optimized packw sequences along with other transparent narrowing operations. Included a few new .ll files to expand testing and limited the OptW pass Optimization to fewer options until GISel is ready for more code generation paths
---
Patch is 533.66 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/125795.diff
10 Files Affected:
- (modified) llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp (+87)
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfo.td (+8-4)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/combine.ll (+1-1)
- (added) llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll (+811)
- (added) llvm/test/CodeGen/RISCV/GlobalISel/rotl-rotr.ll (+3412)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll (+38-38)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll (+4-11)
- (added) llvm/test/CodeGen/RISCV/GlobalISel/shifts.ll (+962)
- (added) llvm/test/CodeGen/RISCV/GlobalISel/wide-scalar-shift-by-byte-multiple-legalization.ll (+10982)
``````````diff
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 6a42fdf3c35672a..21921452e911191 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -57,6 +57,20 @@ class RISCVInstructionSelector : public InstructionSelector {
const TargetRegisterClass *
getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
+ static constexpr unsigned MaxRecursionDepth = 6;
+
+ bool hasAllNBitUsers(const MachineInstr &MI, unsigned Bits,
+ const unsigned Depth = 0) const;
+ bool hasAllBUsers(const MachineInstr &MI) const {
+ return hasAllNBitUsers(MI, 8);
+ }
+ bool hasAllHUsers(const MachineInstr &MI) const {
+ return hasAllNBitUsers(MI, 16);
+ }
+ bool hasAllWUsers(const MachineInstr &MI) const {
+ return hasAllNBitUsers(MI, 32);
+ }
+
bool isRegInGprb(Register Reg) const;
bool isRegInFprb(Register Reg) const;
@@ -184,6 +198,79 @@ RISCVInstructionSelector::RISCVInstructionSelector(
{
}
+// Mimics optimizations in ISel and RISCVOptWInst Pass
+bool RISCVInstructionSelector::hasAllNBitUsers(const MachineInstr &MI,
+ unsigned Bits,
+ const unsigned Depth) const {
+
+ assert((MI.getOpcode() == TargetOpcode::G_ADD ||
+ MI.getOpcode() == TargetOpcode::G_SUB ||
+ MI.getOpcode() == TargetOpcode::G_MUL ||
+ MI.getOpcode() == TargetOpcode::G_SHL ||
+ MI.getOpcode() == TargetOpcode::G_LSHR ||
+ MI.getOpcode() == TargetOpcode::G_AND ||
+ MI.getOpcode() == TargetOpcode::G_OR ||
+ MI.getOpcode() == TargetOpcode::G_XOR ||
+ MI.getOpcode() == TargetOpcode::G_SEXT_INREG || Depth != 0) &&
+ "Unexpected opcode");
+
+ if (Depth >= RISCVInstructionSelector::MaxRecursionDepth)
+ return false;
+
+ auto DestReg = MI.getOperand(0).getReg();
+ for (auto &UserOp : MRI->use_nodbg_operands(DestReg)) {
+ assert(UserOp.getParent() && "UserOp must have a parent");
+ const MachineInstr &UserMI = *UserOp.getParent();
+ unsigned OpIdx = UserOp.getOperandNo();
+
+ switch (UserMI.getOpcode()) {
+ default:
+ return false;
+ case RISCV::ADDW:
+ case RISCV::ADDIW:
+ case RISCV::SUBW:
+ if (Bits >= 32)
+ break;
+ return false;
+ case RISCV::SLL:
+ case RISCV::SRA:
+ case RISCV::SRL:
+ // Shift amount operands only use log2(Xlen) bits.
+ if (OpIdx == 2 && Bits >= Log2_32(Subtarget->getXLen()))
+ break;
+ return false;
+ case RISCV::SLLI:
+ // SLLI only uses the lower (XLen - ShAmt) bits.
+ if (Bits >= Subtarget->getXLen() - UserMI.getOperand(2).getImm())
+ break;
+ return false;
+ case RISCV::ANDI:
+ if (Bits >= (unsigned)llvm::bit_width<uint64_t>(
+ (uint64_t)UserMI.getOperand(2).getImm()))
+ break;
+ goto RecCheck;
+ case RISCV::AND:
+ case RISCV::OR:
+ case RISCV::XOR:
+ RecCheck:
+ if (hasAllNBitUsers(UserMI, Bits, Depth + 1))
+ break;
+ return false;
+ case RISCV::SRLI: {
+ unsigned ShAmt = UserMI.getOperand(2).getImm();
+ // If we are shifting right by less than Bits, and users don't demand any
+ // bits that were shifted into [Bits-1:0], then we can consider this as an
+ // N-Bit user.
+ if (Bits > ShAmt && hasAllNBitUsers(UserMI, Bits - ShAmt, Depth + 1))
+ break;
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
InstructionSelector::ComplexRendererFns
RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
unsigned ShiftWidth) const {
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index fec10864f95dc62..54fee1ac3130e1b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1949,15 +1949,19 @@ class binop_allhusers<SDPatternOperator operator>
: PatFrag<(ops node:$lhs, node:$rhs),
(XLenVT (operator node:$lhs, node:$rhs)), [{
return hasAllHUsers(Node);
-}]>;
+}]> {
+ let GISelPredicateCode = [{ return hasAllHUsers(MI); }];
+}
// PatFrag to allow ADDW/SUBW/MULW/SLLW to be selected from i64 add/sub/mul/shl
// if only the lower 32 bits of their result is used.
class binop_allwusers<SDPatternOperator operator>
- : PatFrag<(ops node:$lhs, node:$rhs),
- (i64 (operator node:$lhs, node:$rhs)), [{
+ : PatFrag<(ops node:$lhs, node:$rhs), (i64 (operator node:$lhs, node:$rhs)),
+ [{
return hasAllWUsers(Node);
-}]>;
+}]> {
+ let GISelPredicateCode = [{ return hasAllWUsers(MI); }];
+}
def sexti32_allwusers : PatFrag<(ops node:$src),
(sext_inreg node:$src, i32), [{
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/combine.ll b/llvm/test/CodeGen/RISCV/GlobalISel/combine.ll
index 360e84d37ec8584..61d1fa5a5b9f4b7 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/combine.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/combine.ll
@@ -20,7 +20,7 @@ define i32 @constant_to_rhs(i32 %x) {
; RV64-O0: # %bb.0:
; RV64-O0-NEXT: mv a1, a0
; RV64-O0-NEXT: li a0, 1
-; RV64-O0-NEXT: add a0, a0, a1
+; RV64-O0-NEXT: addw a0, a0, a1
; RV64-O0-NEXT: sext.w a0, a0
; RV64-O0-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll b/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll
new file mode 100644
index 000000000000000..f62902cdd14d9e6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll
@@ -0,0 +1,811 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs -global-isel < %s \
+; RUN: | FileCheck -check-prefixes=RV32,RV32IM %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zba,+zbb \
+; RUN: -verify-machineinstrs -global-isel < %s \
+; RUN: | FileCheck -check-prefixes=RV32,RV32IMZB %s
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs -global-isel < %s \
+; RUN: | FileCheck -check-prefixes=RV64,RV64IM %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zba,+zbb \
+; RUN: -verify-machineinstrs -global-isel < %s \
+; RUN: | FileCheck -check-prefixes=RV64,RV64IMZB %s
+
+; Test that there is a single shift after the mul and no addition.
+define i32 @udiv_constant_no_add(i32 %a) nounwind {
+; RV32-LABEL: udiv_constant_no_add:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a1, 838861
+; RV32-NEXT: addi a1, a1, -819
+; RV32-NEXT: mulhu a0, a0, a1
+; RV32-NEXT: srli a0, a0, 2
+; RV32-NEXT: ret
+;
+; RV64IM-LABEL: udiv_constant_no_add:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: slli a0, a0, 32
+; RV64IM-NEXT: lui a1, 205
+; RV64IM-NEXT: srli a0, a0, 32
+; RV64IM-NEXT: addiw a1, a1, -819
+; RV64IM-NEXT: slli a1, a1, 12
+; RV64IM-NEXT: addi a1, a1, -819
+; RV64IM-NEXT: mul a0, a0, a1
+; RV64IM-NEXT: srli a0, a0, 34
+; RV64IM-NEXT: ret
+;
+; RV64IMZB-LABEL: udiv_constant_no_add:
+; RV64IMZB: # %bb.0:
+; RV64IMZB-NEXT: zext.w a0, a0
+; RV64IMZB-NEXT: lui a1, 838861
+; RV64IMZB-NEXT: addi a1, a1, -819
+; RV64IMZB-NEXT: zext.w a1, a1
+; RV64IMZB-NEXT: mul a0, a0, a1
+; RV64IMZB-NEXT: srli a0, a0, 34
+; RV64IMZB-NEXT: ret
+ %1 = udiv i32 %a, 5
+ ret i32 %1
+}
+
+; This constant requires a sub, shrli, add sequence after the mul.
+define i32 @udiv_constant_add(i32 %a) nounwind {
+; RV32-LABEL: udiv_constant_add:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a1, 149797
+; RV32-NEXT: addi a1, a1, -1755
+; RV32-NEXT: mulhu a1, a0, a1
+; RV32-NEXT: sub a0, a0, a1
+; RV32-NEXT: srli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: srli a0, a0, 2
+; RV32-NEXT: ret
+;
+; RV64IM-LABEL: udiv_constant_add:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: lui a1, 149797
+; RV64IM-NEXT: slli a2, a0, 32
+; RV64IM-NEXT: addiw a1, a1, -1755
+; RV64IM-NEXT: srli a2, a2, 32
+; RV64IM-NEXT: mul a1, a2, a1
+; RV64IM-NEXT: srli a1, a1, 32
+; RV64IM-NEXT: subw a0, a0, a1
+; RV64IM-NEXT: srliw a0, a0, 1
+; RV64IM-NEXT: add a0, a0, a1
+; RV64IM-NEXT: srliw a0, a0, 2
+; RV64IM-NEXT: ret
+;
+; RV64IMZB-LABEL: udiv_constant_add:
+; RV64IMZB: # %bb.0:
+; RV64IMZB-NEXT: lui a1, 149797
+; RV64IMZB-NEXT: addiw a1, a1, -1755
+; RV64IMZB-NEXT: zext.w a2, a0
+; RV64IMZB-NEXT: mul a1, a2, a1
+; RV64IMZB-NEXT: srli a1, a1, 32
+; RV64IMZB-NEXT: subw a0, a0, a1
+; RV64IMZB-NEXT: srliw a0, a0, 1
+; RV64IMZB-NEXT: add a0, a0, a1
+; RV64IMZB-NEXT: srliw a0, a0, 2
+; RV64IMZB-NEXT: ret
+ %1 = udiv i32 %a, 7
+ ret i32 %1
+}
+
+define i64 @udiv64_constant_no_add(i64 %a) nounwind {
+; RV32-LABEL: udiv64_constant_no_add:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a2, 838861
+; RV32-NEXT: mulhu a3, a0, zero
+; RV32-NEXT: addi a4, a2, -819
+; RV32-NEXT: addi a2, a2, -820
+; RV32-NEXT: mul a5, a1, a4
+; RV32-NEXT: mul a6, a0, a2
+; RV32-NEXT: mulhu a7, a0, a4
+; RV32-NEXT: mul t0, zero, a4
+; RV32-NEXT: mul t1, a1, a2
+; RV32-NEXT: mulhu t2, a1, a4
+; RV32-NEXT: mulhu a0, a0, a2
+; RV32-NEXT: mulhu a1, a1, a2
+; RV32-NEXT: mul a2, zero, a2
+; RV32-NEXT: mulhu a4, zero, a4
+; RV32-NEXT: add a5, a5, a6
+; RV32-NEXT: add a2, t0, a2
+; RV32-NEXT: add t0, t0, t1
+; RV32-NEXT: add a1, a4, a1
+; RV32-NEXT: sltu a4, a5, a6
+; RV32-NEXT: add a5, a5, a7
+; RV32-NEXT: sltu a6, t0, t1
+; RV32-NEXT: sltiu t1, t0, 0
+; RV32-NEXT: add t0, t0, t2
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sltu a2, a5, a7
+; RV32-NEXT: add a6, a6, t1
+; RV32-NEXT: sltu a5, t0, t2
+; RV32-NEXT: add t0, t0, a0
+; RV32-NEXT: add a1, a1, a3
+; RV32-NEXT: add a2, a4, a2
+; RV32-NEXT: add a5, a6, a5
+; RV32-NEXT: sltu a0, t0, a0
+; RV32-NEXT: add a0, a5, a0
+; RV32-NEXT: add t0, t0, a2
+; RV32-NEXT: sltu a2, t0, a2
+; RV32-NEXT: srli a3, t0, 2
+; RV32-NEXT: add a0, a0, a2
+; RV32-NEXT: add a1, a1, a0
+; RV32-NEXT: slli a0, a1, 30
+; RV32-NEXT: or a0, a3, a0
+; RV32-NEXT: srli a1, a1, 2
+; RV32-NEXT: ret
+;
+; RV64-LABEL: udiv64_constant_no_add:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a1, 1035469
+; RV64-NEXT: addi a1, a1, -819
+; RV64-NEXT: slli a1, a1, 12
+; RV64-NEXT: addi a1, a1, -819
+; RV64-NEXT: slli a1, a1, 12
+; RV64-NEXT: addi a1, a1, -819
+; RV64-NEXT: slli a1, a1, 12
+; RV64-NEXT: addi a1, a1, -819
+; RV64-NEXT: mulhu a0, a0, a1
+; RV64-NEXT: srli a0, a0, 2
+; RV64-NEXT: ret
+ %1 = udiv i64 %a, 5
+ ret i64 %1
+}
+
+define i64 @udiv64_constant_add(i64 %a) nounwind {
+; RV32-LABEL: udiv64_constant_add:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a2, 599186
+; RV32-NEXT: lui a3, 149797
+; RV32-NEXT: mulhu a4, a0, zero
+; RV32-NEXT: addi a2, a2, 1171
+; RV32-NEXT: addi a3, a3, -1756
+; RV32-NEXT: mul a5, a1, a2
+; RV32-NEXT: mul a6, a0, a3
+; RV32-NEXT: mulhu a7, a0, a2
+; RV32-NEXT: mul t0, zero, a2
+; RV32-NEXT: mulhu t1, zero, a2
+; RV32-NEXT: mulhu t2, a1, a3
+; RV32-NEXT: add t1, t1, t2
+; RV32-NEXT: mul t2, zero, a3
+; RV32-NEXT: add t2, t0, t2
+; RV32-NEXT: add t1, t2, t1
+; RV32-NEXT: mul t2, a1, a3
+; RV32-NEXT: mulhu a2, a1, a2
+; RV32-NEXT: mulhu a3, a0, a3
+; RV32-NEXT: add a5, a5, a6
+; RV32-NEXT: add t0, t0, t2
+; RV32-NEXT: sltu a6, a5, a6
+; RV32-NEXT: add a5, a5, a7
+; RV32-NEXT: sltu t2, t0, t2
+; RV32-NEXT: sltu a5, a5, a7
+; RV32-NEXT: sltiu a7, t0, 0
+; RV32-NEXT: add t0, t0, a2
+; RV32-NEXT: add a7, t2, a7
+; RV32-NEXT: sltu a2, t0, a2
+; RV32-NEXT: add t0, t0, a3
+; RV32-NEXT: add a4, t1, a4
+; RV32-NEXT: add a5, a6, a5
+; RV32-NEXT: add a2, a7, a2
+; RV32-NEXT: sltu a3, t0, a3
+; RV32-NEXT: add a2, a2, a3
+; RV32-NEXT: add t0, t0, a5
+; RV32-NEXT: sltu a3, t0, a5
+; RV32-NEXT: sub a5, a0, t0
+; RV32-NEXT: sltu a0, a0, t0
+; RV32-NEXT: add a2, a2, a3
+; RV32-NEXT: sub a1, a1, a0
+; RV32-NEXT: srli a5, a5, 1
+; RV32-NEXT: add a2, a4, a2
+; RV32-NEXT: sub a1, a1, a2
+; RV32-NEXT: slli a0, a1, 31
+; RV32-NEXT: srli a1, a1, 1
+; RV32-NEXT: or a0, a5, a0
+; RV32-NEXT: add a1, a1, a2
+; RV32-NEXT: add a0, a0, t0
+; RV32-NEXT: sltu a2, a0, t0
+; RV32-NEXT: srli a0, a0, 2
+; RV32-NEXT: add a1, a1, a2
+; RV32-NEXT: slli a2, a1, 30
+; RV32-NEXT: or a0, a0, a2
+; RV32-NEXT: srli a1, a1, 2
+; RV32-NEXT: ret
+;
+; RV64-LABEL: udiv64_constant_add:
+; RV64: # %bb.0:
+; RV64-NEXT: lui a1, %hi(.LCPI3_0)
+; RV64-NEXT: ld a1, %lo(.LCPI3_0)(a1)
+; RV64-NEXT: mulhu a1, a0, a1
+; RV64-NEXT: sub a0, a0, a1
+; RV64-NEXT: srli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: srli a0, a0, 2
+; RV64-NEXT: ret
+ %1 = udiv i64 %a, 7
+ ret i64 %1
+}
+
+define i8 @udiv8_constant_no_add(i8 %a) nounwind {
+; RV32-LABEL: udiv8_constant_no_add:
+; RV32: # %bb.0:
+; RV32-NEXT: andi a0, a0, 255
+; RV32-NEXT: li a1, 205
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: srli a0, a0, 10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: udiv8_constant_no_add:
+; RV64: # %bb.0:
+; RV64-NEXT: andi a0, a0, 255
+; RV64-NEXT: li a1, 205
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: srli a0, a0, 10
+; RV64-NEXT: ret
+ %1 = udiv i8 %a, 5
+ ret i8 %1
+}
+
+define i8 @udiv8_constant_add(i8 %a) nounwind {
+; RV32-LABEL: udiv8_constant_add:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 37
+; RV32-NEXT: andi a2, a0, 255
+; RV32-NEXT: mul a1, a2, a1
+; RV32-NEXT: srli a1, a1, 8
+; RV32-NEXT: sub a0, a0, a1
+; RV32-NEXT: andi a0, a0, 255
+; RV32-NEXT: srli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: andi a0, a0, 255
+; RV32-NEXT: srli a0, a0, 2
+; RV32-NEXT: ret
+;
+; RV64-LABEL: udiv8_constant_add:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 37
+; RV64-NEXT: andi a2, a0, 255
+; RV64-NEXT: mul a1, a2, a1
+; RV64-NEXT: srli a1, a1, 8
+; RV64-NEXT: subw a0, a0, a1
+; RV64-NEXT: andi a0, a0, 255
+; RV64-NEXT: srli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: andi a0, a0, 255
+; RV64-NEXT: srli a0, a0, 2
+; RV64-NEXT: ret
+ %1 = udiv i8 %a, 7
+ ret i8 %1
+}
+
+define i16 @udiv16_constant_no_add(i16 %a) nounwind {
+; RV32IM-LABEL: udiv16_constant_no_add:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: slli a0, a0, 16
+; RV32IM-NEXT: lui a1, 13
+; RV32IM-NEXT: srli a0, a0, 16
+; RV32IM-NEXT: addi a1, a1, -819
+; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: srli a0, a0, 18
+; RV32IM-NEXT: ret
+;
+; RV32IMZB-LABEL: udiv16_constant_no_add:
+; RV32IMZB: # %bb.0:
+; RV32IMZB-NEXT: zext.h a0, a0
+; RV32IMZB-NEXT: lui a1, 13
+; RV32IMZB-NEXT: addi a1, a1, -819
+; RV32IMZB-NEXT: mul a0, a0, a1
+; RV32IMZB-NEXT: srli a0, a0, 18
+; RV32IMZB-NEXT: ret
+;
+; RV64IM-LABEL: udiv16_constant_no_add:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: slli a0, a0, 48
+; RV64IM-NEXT: lui a1, 13
+; RV64IM-NEXT: srli a0, a0, 48
+; RV64IM-NEXT: addiw a1, a1, -819
+; RV64IM-NEXT: mul a0, a0, a1
+; RV64IM-NEXT: srli a0, a0, 18
+; RV64IM-NEXT: ret
+;
+; RV64IMZB-LABEL: udiv16_constant_no_add:
+; RV64IMZB: # %bb.0:
+; RV64IMZB-NEXT: zext.h a0, a0
+; RV64IMZB-NEXT: lui a1, 13
+; RV64IMZB-NEXT: addiw a1, a1, -819
+; RV64IMZB-NEXT: mul a0, a0, a1
+; RV64IMZB-NEXT: srli a0, a0, 18
+; RV64IMZB-NEXT: ret
+ %1 = udiv i16 %a, 5
+ ret i16 %1
+}
+
+define i16 @udiv16_constant_add(i16 %a) nounwind {
+; RV32IM-LABEL: udiv16_constant_add:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: lui a1, 2
+; RV32IM-NEXT: lui a2, 16
+; RV32IM-NEXT: addi a1, a1, 1171
+; RV32IM-NEXT: addi a2, a2, -1
+; RV32IM-NEXT: and a3, a0, a2
+; RV32IM-NEXT: mul a1, a3, a1
+; RV32IM-NEXT: srli a1, a1, 16
+; RV32IM-NEXT: sub a0, a0, a1
+; RV32IM-NEXT: and a0, a0, a2
+; RV32IM-NEXT: srli a0, a0, 1
+; RV32IM-NEXT: add a0, a0, a1
+; RV32IM-NEXT: and a0, a0, a2
+; RV32IM-NEXT: srli a0, a0, 2
+; RV32IM-NEXT: ret
+;
+; RV32IMZB-LABEL: udiv16_constant_add:
+; RV32IMZB: # %bb.0:
+; RV32IMZB-NEXT: lui a1, 2
+; RV32IMZB-NEXT: addi a1, a1, 1171
+; RV32IMZB-NEXT: zext.h a2, a0
+; RV32IMZB-NEXT: mul a1, a2, a1
+; RV32IMZB-NEXT: srli a1, a1, 16
+; RV32IMZB-NEXT: sub a0, a0, a1
+; RV32IMZB-NEXT: zext.h a0, a0
+; RV32IMZB-NEXT: srli a0, a0, 1
+; RV32IMZB-NEXT: add a0, a0, a1
+; RV32IMZB-NEXT: zext.h a0, a0
+; RV32IMZB-NEXT: srli a0, a0, 2
+; RV32IMZB-NEXT: ret
+;
+; RV64IM-LABEL: udiv16_constant_add:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: lui a1, 2
+; RV64IM-NEXT: lui a2, 16
+; RV64IM-NEXT: addiw a1, a1, 1171
+; RV64IM-NEXT: addiw a2, a2, -1
+; RV64IM-NEXT: and a3, a0, a2
+; RV64IM-NEXT: mul a1, a3, a1
+; RV64IM-NEXT: srli a1, a1, 16
+; RV64IM-NEXT: sub a0, a0, a1
+; RV64IM-NEXT: and a0, a0, a2
+; RV64IM-NEXT: srli a0, a0, 1
+; RV64IM-NEXT: add a0, a0, a1
+; RV64IM-NEXT: and a0, a0, a2
+; RV64IM-NEXT: srli a0, a0, 2
+; RV64IM-NEXT: ret
+;
+; RV64IMZB-LABEL: udiv16_constant_add:
+; RV64IMZB: # %bb.0:
+; RV64IMZB-NEXT: lui a1, 2
+; RV64IMZB-NEXT: addi a1, a1, 1171
+; RV64IMZB-NEXT: zext.h a2, a0
+; RV64IMZB-NEXT: mul a1, a2, a1
+; RV64IMZB-NEXT: srli a1, a1, 16
+; RV64IMZB-NEXT: sub a0, a0, a1
+; RV64IMZB-NEXT: zext.h a0, a0
+; RV64IMZB-NEXT: srli a0, a0, 1
+; RV64IMZB-NEXT: add a0, a0, a1
+; RV64IMZB-NEXT: zext.h a0, a0
+; RV64IMZB-NEXT: srli a0, a0, 2
+; RV64IMZB-NEXT: ret
+ %1 = udiv i16 %a, 7
+ ret i16 %1
+}
+
+; Test the simplest case a srli and an add after the mul. No srai.
+define i32 @sdiv_constant_no_srai(i32 %a) nounwind {
+; RV32-LABEL: sdiv_constant_no_srai:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 3
+; RV32-NEXT: div a0, a0, a1
+; RV32-NEXT: ret
+;
+; RV64-LABEL: sdiv_constant_no_srai:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 3
+; RV64-NEXT: divw a0, a0, a1
+; RV64-NEXT: ret
+ %1 = sdiv i32 %a, 3
+ ret i32 %1
+}
+
+; This constant requires an srai between the mul and the add.
+define i32 @sdiv_constant_srai(i32 %a) nounwind {
+; RV32-LABEL: sdiv_constant_srai:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 5
+; RV32-NEXT: div a0, a0, a1
+; RV32-NEXT: ret
+;
+; RV64-LABEL: sdiv_constant_srai:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 5
+; RV64-NEXT: divw a0, a0, a1
+; RV64-NEXT: ret
+ %1 = sdiv i32 %a, 5
+ ret i32 %1
+}
+
+; This constant requires an add and an srai after the mul.
+define i32 @sdiv_constant_add_srai(i32 %a) nounwind {
+; RV32-LABEL: sdiv_constant_add_srai:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 7
+; RV32-NEXT: div a0, a0, a1
+; RV32-NEXT: ret
+;
+; RV64-LABEL: sdiv_constant_add_srai:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 7
+; RV64-NEXT: divw a0, a0, a1
+; RV64-NEXT: ret
+ %1 = sdiv i32 %a, 7
+ ret i32 %1
+}
+
+; This constant requires a sub and an srai after the mul.
+define i32 @sdiv_constant_sub_srai(i32 %a) nounwind {
+; RV32-LABEL: sdiv_constant_sub_srai:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, -7
+; RV32-NEXT: div a0, a0, a1
+; RV32-NEXT: ret
+;
+; RV64-LABEL: sdiv_constant_sub_srai:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, -7
+; RV64-NEXT: divw a0, a0, a1
+; RV64-NEXT: ret
+ %1 = sdiv i32 %a, -7
+ ret i32 %1
+}
+
+define i64 @sdiv64_constant_no_srai(i64 %a) nounwind {
+; RV32-LABEL: sdiv64_constant_no_srai:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: li a2, 3
+; RV32-NEXT: li a3, 0
+; RV32-NEXT: call __divdi3
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: sdiv64_constant_no_srai:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 3
+; RV64-NEXT: div a0, a0, a1
+; RV64-NEXT: ret
+ %1 = sdiv i64 %a, 3
+ ret i64 %1
+}
+
+define i64 @sdiv64_constant_srai(i64 %a) nounwind {
+; RV32-LABEL: sdiv64_constant_srai:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: li a2, 5
+; RV32-NEXT: li a3, 0
+; RV32-NEXT: call __divdi3
+; RV32-NEXT:...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/125795
More information about the llvm-commits
mailing list