[llvm] d292157 - [LoongArch] Add codegen support for division operations

Weining Lu via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 6 02:55:53 PDT 2022


Author: Weining Lu
Date: 2022-07-06T17:54:57+08:00
New Revision: d29215790f0ff1ef9772c38ffe59e42fa9450db0

URL: https://github.com/llvm/llvm-project/commit/d29215790f0ff1ef9772c38ffe59e42fa9450db0
DIFF: https://github.com/llvm/llvm-project/commit/d29215790f0ff1ef9772c38ffe59e42fa9450db0.diff

LOG: [LoongArch] Add codegen support for division operations

These operations include sdiv/udiv/srem/urem.

As the ISA [https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_div_wudu_mod_wudu]
described, when the divisor is 0, the result can be any value, but no
exception will be triggered. Unlike gcc, which by default emit code
that checks divide-by-zero after the division or modulus instruction,
we only emit this check when the `-loongarch-check-zero-division`
option is passed.

Differential Revision: https://reviews.llvm.org/D128572

Added: 
    llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll

Modified: 
    llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
    llvm/lib/Target/LoongArch/LoongArchISelLowering.h
    llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
    llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index d0fb814749d11..25823b777cbe1 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -26,6 +26,11 @@ using namespace llvm;
 
 #define DEBUG_TYPE "loongarch-isel-lowering"
 
+static cl::opt<bool> ZeroDivCheck(
+    "loongarch-check-zero-division", cl::Hidden,
+    cl::desc("Trap on integer division by zero."),
+    cl::init(false));
+
 LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
                                                  const LoongArchSubtarget &STI)
     : TargetLowering(TM), Subtarget(STI) {
@@ -386,6 +391,54 @@ SDValue LoongArchTargetLowering::PerformDAGCombine(SDNode *N,
   return SDValue();
 }
 
+static MachineBasicBlock *insertDivByZeroTrap(MachineInstr &MI,
+                                              MachineBasicBlock &MBB,
+                                              const TargetInstrInfo &TII) {
+  if (!ZeroDivCheck)
+    return &MBB;
+
+  // Build instructions:
+  //   div(or mod)   $dst, $dividend, $divisor
+  //   bnez          $divisor, 8
+  //   break         7
+  //   fallthrough
+  MachineOperand &Divisor = MI.getOperand(2);
+  auto FallThrough = std::next(MI.getIterator());
+
+  BuildMI(MBB, FallThrough, MI.getDebugLoc(), TII.get(LoongArch::BNEZ))
+      .addReg(Divisor.getReg(), getKillRegState(Divisor.isKill()))
+      .addImm(8);
+
+  // See linux header file arch/loongarch/include/uapi/asm/break.h for the
+  // definition of BRK_DIVZERO.
+  BuildMI(MBB, FallThrough, MI.getDebugLoc(), TII.get(LoongArch::BREAK))
+      .addImm(7/*BRK_DIVZERO*/);
+
+  // Clear Divisor's kill flag.
+  Divisor.setIsKill(false);
+
+  return &MBB;
+}
+
+MachineBasicBlock *LoongArchTargetLowering::EmitInstrWithCustomInserter(
+    MachineInstr &MI, MachineBasicBlock *BB) const {
+
+  switch (MI.getOpcode()) {
+  default:
+    llvm_unreachable("Unexpected instr type to insert");
+  case LoongArch::DIV_W:
+  case LoongArch::DIV_WU:
+  case LoongArch::MOD_W:
+  case LoongArch::MOD_WU:
+  case LoongArch::DIV_D:
+  case LoongArch::DIV_DU:
+  case LoongArch::MOD_D:
+  case LoongArch::MOD_DU:
+    return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo());
+    break;
+  }
+}
+
 const char *LoongArchTargetLowering::getTargetNodeName(unsigned Opcode) const {
   switch ((LoongArchISD::NodeType)Opcode) {
   case LoongArchISD::FIRST_NUMBER:

diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 719e664a4f654..80845d2d627dd 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -92,6 +92,10 @@ class LoongArchTargetLowering : public TargetLowering {
   SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;
+
+  MachineBasicBlock *
+  EmitInstrWithCustomInserter(MachineInstr &MI,
+                              MachineBasicBlock *BB) const override;
 };
 
 } // end namespace llvm

diff  --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index f4b126476b64a..98c3bffb0c405 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -310,10 +310,12 @@ def XORI : ALU_2RI12<0b0000001111, "xori", uimm12>;
 def MUL_W   : ALU_3R<0b00000000000111000, "mul.w">;
 def MULH_W  : ALU_3R<0b00000000000111001, "mulh.w">;
 def MULH_WU : ALU_3R<0b00000000000111010, "mulh.wu">;
+let usesCustomInserter = true in {
 def DIV_W   : ALU_3R<0b00000000001000000, "div.w">;
 def MOD_W   : ALU_3R<0b00000000001000001, "mod.w">;
 def DIV_WU  : ALU_3R<0b00000000001000010, "div.wu">;
 def MOD_WU  : ALU_3R<0b00000000001000011, "mod.wu">;
+} // usesCustomInserter = true
 
 // Bit-shift Instructions
 def SLL_W  : ALU_3R<0b00000000000101110, "sll.w">;
@@ -415,10 +417,12 @@ def MULH_D    : ALU_3R<0b00000000000111100, "mulh.d">;
 def MULH_DU   : ALU_3R<0b00000000000111101, "mulh.du">;
 def MULW_D_W  : ALU_3R<0b00000000000111110, "mulw.d.w">;
 def MULW_D_WU : ALU_3R<0b00000000000111111, "mulw.d.wu">;
+let usesCustomInserter = true in {
 def DIV_D     : ALU_3R<0b00000000001000100, "div.d">;
 def MOD_D     : ALU_3R<0b00000000001000101, "mod.d">;
 def DIV_DU    : ALU_3R<0b00000000001000110, "div.du">;
 def MOD_DU    : ALU_3R<0b00000000001000111, "mod.du">;
+} // usesCustomInserter = true
 
 // Bit-shift Instructions for 64-bits
 def SLL_D  : ALU_3R<0b00000000000110001, "sll.d">;
@@ -592,6 +596,10 @@ let Predicates = [IsLA32] in {
 def : PatGprGpr<add, ADD_W>;
 def : PatGprImm<add, ADDI_W, simm12>;
 def : PatGprGpr<sub, SUB_W>;
+def : PatGprGpr<sdiv, DIV_W>;
+def : PatGprGpr<udiv, DIV_WU>;
+def : PatGprGpr<srem, MOD_W>;
+def : PatGprGpr<urem, MOD_WU>;
 } // Predicates = [IsLA32]
 
 let Predicates = [IsLA64] in {
@@ -601,6 +609,10 @@ def : PatGprImm<add, ADDI_D, simm12>;
 def : PatGprImm_32<add, ADDI_W, simm12>;
 def : PatGprGpr<sub, SUB_D>;
 def : PatGprGpr_32<sub, SUB_W>;
+def : PatGprGpr<sdiv, DIV_D>;
+def : PatGprGpr<udiv, DIV_DU>;
+def : PatGprGpr<srem, MOD_D>;
+def : PatGprGpr<urem, MOD_DU>;
 } // Predicates = [IsLA64]
 
 def : PatGprGpr<and, AND>;

diff  --git a/llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp b/llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp
index 5c33044788615..6c77a79369ad5 100644
--- a/llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp
@@ -63,9 +63,12 @@ bool llvm::lowerLoongArchMachineOperandToMCOperand(const MachineOperand &MO,
   case MachineOperand::MO_MachineBasicBlock:
     MCOp = lowerSymbolOperand(MO, MO.getMBB()->getSymbol(), AP);
     break;
+  case MachineOperand::MO_ExternalSymbol:
+    MCOp = lowerSymbolOperand(
+        MO, AP.GetExternalSymbolSymbol(MO.getSymbolName()), AP);
+    break;
   // TODO: lower special operands
   case MachineOperand::MO_BlockAddress:
-  case MachineOperand::MO_ExternalSymbol:
   case MachineOperand::MO_ConstantPoolIndex:
   case MachineOperand::MO_JumpTableIndex:
     break;

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll
new file mode 100644
index 0000000000000..1f1a5c9b920cb
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/sdiv-udiv-srem-urem.ll
@@ -0,0 +1,685 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+; RUN: llc --mtriple=loongarch32 -loongarch-check-zero-division < %s \
+; RUN:     | FileCheck %s --check-prefix=LA32-TRAP
+; RUN: llc --mtriple=loongarch64 -loongarch-check-zero-division < %s \
+; RUN:     | FileCheck %s --check-prefix=LA64-TRAP
+
+;; Test the sdiv/udiv/srem/urem LLVM IR.
+
+define i1 @sdiv_i1(i1 %a, i1 %b) {
+; LA32-LABEL: sdiv_i1:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sdiv_i1:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: sdiv_i1:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: sdiv_i1:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = sdiv i1 %a, %b
+  ret i1 %r
+}
+
+define i8 @sdiv_i8(i8 %a, i8 %b) {
+; LA32-LABEL: sdiv_i8:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    ext.w.b $a1, $a1
+; LA32-NEXT:    ext.w.b $a0, $a0
+; LA32-NEXT:    div.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sdiv_i8:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    ext.w.b $a0, $a0
+; LA64-NEXT:    div.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: sdiv_i8:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    ext.w.b $a1, $a1
+; LA32-TRAP-NEXT:    ext.w.b $a0, $a0
+; LA32-TRAP-NEXT:    div.w $a0, $a0, $a1
+; LA32-TRAP-NEXT:    bnez $a1, 8
+; LA32-TRAP-NEXT:    break 7
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: sdiv_i8:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    ext.w.b $a1, $a1
+; LA64-TRAP-NEXT:    ext.w.b $a0, $a0
+; LA64-TRAP-NEXT:    div.d $a0, $a0, $a1
+; LA64-TRAP-NEXT:    bnez $a1, 8
+; LA64-TRAP-NEXT:    break 7
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = sdiv i8 %a, %b
+  ret i8 %r
+}
+
+define i16 @sdiv_i16(i16 %a, i16 %b) {
+; LA32-LABEL: sdiv_i16:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    ext.w.h $a1, $a1
+; LA32-NEXT:    ext.w.h $a0, $a0
+; LA32-NEXT:    div.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sdiv_i16:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    ext.w.h $a0, $a0
+; LA64-NEXT:    div.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: sdiv_i16:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    ext.w.h $a1, $a1
+; LA32-TRAP-NEXT:    ext.w.h $a0, $a0
+; LA32-TRAP-NEXT:    div.w $a0, $a0, $a1
+; LA32-TRAP-NEXT:    bnez $a1, 8
+; LA32-TRAP-NEXT:    break 7
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: sdiv_i16:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    ext.w.h $a1, $a1
+; LA64-TRAP-NEXT:    ext.w.h $a0, $a0
+; LA64-TRAP-NEXT:    div.d $a0, $a0, $a1
+; LA64-TRAP-NEXT:    bnez $a1, 8
+; LA64-TRAP-NEXT:    break 7
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = sdiv i16 %a, %b
+  ret i16 %r
+}
+
+define i32 @sdiv_i32(i32 %a, i32 %b) {
+; LA32-LABEL: sdiv_i32:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    div.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sdiv_i32:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    div.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: sdiv_i32:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    div.w $a0, $a0, $a1
+; LA32-TRAP-NEXT:    bnez $a1, 8
+; LA32-TRAP-NEXT:    break 7
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: sdiv_i32:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    addi.w $a1, $a1, 0
+; LA64-TRAP-NEXT:    addi.w $a0, $a0, 0
+; LA64-TRAP-NEXT:    div.d $a0, $a0, $a1
+; LA64-TRAP-NEXT:    bnez $a1, 8
+; LA64-TRAP-NEXT:    break 7
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = sdiv i32 %a, %b
+  ret i32 %r
+}
+
+define i64 @sdiv_i64(i64 %a, i64 %b) {
+; LA32-LABEL: sdiv_i64:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    .cfi_def_cfa_offset 16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    .cfi_offset 1, -4
+; LA32-NEXT:    bl __divdi3
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sdiv_i64:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    div.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: sdiv_i64:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    addi.w $sp, $sp, -16
+; LA32-TRAP-NEXT:    .cfi_def_cfa_offset 16
+; LA32-TRAP-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-TRAP-NEXT:    .cfi_offset 1, -4
+; LA32-TRAP-NEXT:    bl __divdi3
+; LA32-TRAP-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-TRAP-NEXT:    addi.w $sp, $sp, 16
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: sdiv_i64:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    div.d $a0, $a0, $a1
+; LA64-TRAP-NEXT:    bnez $a1, 8
+; LA64-TRAP-NEXT:    break 7
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = sdiv i64 %a, %b
+  ret i64 %r
+}
+
+define i1 @udiv_i1(i1 %a, i1 %b) {
+; LA32-LABEL: udiv_i1:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: udiv_i1:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: udiv_i1:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: udiv_i1:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = udiv i1 %a, %b
+  ret i1 %r
+}
+
+define i8 @udiv_i8(i8 %a, i8 %b) {
+; LA32-LABEL: udiv_i8:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    andi $a0, $a0, 255
+; LA32-NEXT:    div.wu $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: udiv_i8:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    andi $a0, $a0, 255
+; LA64-NEXT:    div.du $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: udiv_i8:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    andi $a1, $a1, 255
+; LA32-TRAP-NEXT:    andi $a0, $a0, 255
+; LA32-TRAP-NEXT:    div.wu $a0, $a0, $a1
+; LA32-TRAP-NEXT:    bnez $a1, 8
+; LA32-TRAP-NEXT:    break 7
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: udiv_i8:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    andi $a1, $a1, 255
+; LA64-TRAP-NEXT:    andi $a0, $a0, 255
+; LA64-TRAP-NEXT:    div.du $a0, $a0, $a1
+; LA64-TRAP-NEXT:    bnez $a1, 8
+; LA64-TRAP-NEXT:    break 7
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = udiv i8 %a, %b
+  ret i8 %r
+}
+
+define i16 @udiv_i16(i16 %a, i16 %b) {
+; LA32-LABEL: udiv_i16:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    bstrpick.w $a0, $a0, 15, 0
+; LA32-NEXT:    div.wu $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: udiv_i16:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    bstrpick.d $a0, $a0, 15, 0
+; LA64-NEXT:    div.du $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: udiv_i16:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-TRAP-NEXT:    bstrpick.w $a0, $a0, 15, 0
+; LA32-TRAP-NEXT:    div.wu $a0, $a0, $a1
+; LA32-TRAP-NEXT:    bnez $a1, 8
+; LA32-TRAP-NEXT:    break 7
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: udiv_i16:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-TRAP-NEXT:    bstrpick.d $a0, $a0, 15, 0
+; LA64-TRAP-NEXT:    div.du $a0, $a0, $a1
+; LA64-TRAP-NEXT:    bnez $a1, 8
+; LA64-TRAP-NEXT:    break 7
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = udiv i16 %a, %b
+  ret i16 %r
+}
+
+define i32 @udiv_i32(i32 %a, i32 %b) {
+; LA32-LABEL: udiv_i32:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    div.wu $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: udiv_i32:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    bstrpick.d $a1, $a1, 31, 0
+; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT:    div.du $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: udiv_i32:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    div.wu $a0, $a0, $a1
+; LA32-TRAP-NEXT:    bnez $a1, 8
+; LA32-TRAP-NEXT:    break 7
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: udiv_i32:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    bstrpick.d $a1, $a1, 31, 0
+; LA64-TRAP-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64-TRAP-NEXT:    div.du $a0, $a0, $a1
+; LA64-TRAP-NEXT:    bnez $a1, 8
+; LA64-TRAP-NEXT:    break 7
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = udiv i32 %a, %b
+  ret i32 %r
+}
+
+define i64 @udiv_i64(i64 %a, i64 %b) {
+; LA32-LABEL: udiv_i64:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    .cfi_def_cfa_offset 16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    .cfi_offset 1, -4
+; LA32-NEXT:    bl __udivdi3
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: udiv_i64:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    div.du $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: udiv_i64:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    addi.w $sp, $sp, -16
+; LA32-TRAP-NEXT:    .cfi_def_cfa_offset 16
+; LA32-TRAP-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-TRAP-NEXT:    .cfi_offset 1, -4
+; LA32-TRAP-NEXT:    bl __udivdi3
+; LA32-TRAP-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-TRAP-NEXT:    addi.w $sp, $sp, 16
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: udiv_i64:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    div.du $a0, $a0, $a1
+; LA64-TRAP-NEXT:    bnez $a1, 8
+; LA64-TRAP-NEXT:    break 7
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = udiv i64 %a, %b
+  ret i64 %r
+}
+
+define i1 @srem_i1(i1 %a, i1 %b) {
+; LA32-LABEL: srem_i1:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    move $a0, $zero
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: srem_i1:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    move $a0, $zero
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: srem_i1:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    move $a0, $zero
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: srem_i1:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    move $a0, $zero
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = srem i1 %a, %b
+  ret i1 %r
+}
+
+define i8 @srem_i8(i8 %a, i8 %b) {
+; LA32-LABEL: srem_i8:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    ext.w.b $a1, $a1
+; LA32-NEXT:    ext.w.b $a0, $a0
+; LA32-NEXT:    mod.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: srem_i8:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    ext.w.b $a0, $a0
+; LA64-NEXT:    mod.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: srem_i8:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    ext.w.b $a1, $a1
+; LA32-TRAP-NEXT:    ext.w.b $a0, $a0
+; LA32-TRAP-NEXT:    mod.w $a0, $a0, $a1
+; LA32-TRAP-NEXT:    bnez $a1, 8
+; LA32-TRAP-NEXT:    break 7
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: srem_i8:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    ext.w.b $a1, $a1
+; LA64-TRAP-NEXT:    ext.w.b $a0, $a0
+; LA64-TRAP-NEXT:    mod.d $a0, $a0, $a1
+; LA64-TRAP-NEXT:    bnez $a1, 8
+; LA64-TRAP-NEXT:    break 7
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = srem i8 %a, %b
+  ret i8 %r
+}
+
+define i16 @srem_i16(i16 %a, i16 %b) {
+; LA32-LABEL: srem_i16:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    ext.w.h $a1, $a1
+; LA32-NEXT:    ext.w.h $a0, $a0
+; LA32-NEXT:    mod.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: srem_i16:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    ext.w.h $a0, $a0
+; LA64-NEXT:    mod.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: srem_i16:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    ext.w.h $a1, $a1
+; LA32-TRAP-NEXT:    ext.w.h $a0, $a0
+; LA32-TRAP-NEXT:    mod.w $a0, $a0, $a1
+; LA32-TRAP-NEXT:    bnez $a1, 8
+; LA32-TRAP-NEXT:    break 7
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: srem_i16:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    ext.w.h $a1, $a1
+; LA64-TRAP-NEXT:    ext.w.h $a0, $a0
+; LA64-TRAP-NEXT:    mod.d $a0, $a0, $a1
+; LA64-TRAP-NEXT:    bnez $a1, 8
+; LA64-TRAP-NEXT:    break 7
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = srem i16 %a, %b
+  ret i16 %r
+}
+
+define i32 @srem_i32(i32 %a, i32 %b) {
+; LA32-LABEL: srem_i32:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    mod.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: srem_i32:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    addi.w $a1, $a1, 0
+; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    mod.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: srem_i32:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    mod.w $a0, $a0, $a1
+; LA32-TRAP-NEXT:    bnez $a1, 8
+; LA32-TRAP-NEXT:    break 7
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: srem_i32:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    addi.w $a1, $a1, 0
+; LA64-TRAP-NEXT:    addi.w $a0, $a0, 0
+; LA64-TRAP-NEXT:    mod.d $a0, $a0, $a1
+; LA64-TRAP-NEXT:    bnez $a1, 8
+; LA64-TRAP-NEXT:    break 7
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = srem i32 %a, %b
+  ret i32 %r
+}
+
+define i64 @srem_i64(i64 %a, i64 %b) {
+; LA32-LABEL: srem_i64:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    .cfi_def_cfa_offset 16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    .cfi_offset 1, -4
+; LA32-NEXT:    bl __moddi3
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: srem_i64:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    mod.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: srem_i64:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    addi.w $sp, $sp, -16
+; LA32-TRAP-NEXT:    .cfi_def_cfa_offset 16
+; LA32-TRAP-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-TRAP-NEXT:    .cfi_offset 1, -4
+; LA32-TRAP-NEXT:    bl __moddi3
+; LA32-TRAP-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-TRAP-NEXT:    addi.w $sp, $sp, 16
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: srem_i64:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    mod.d $a0, $a0, $a1
+; LA64-TRAP-NEXT:    bnez $a1, 8
+; LA64-TRAP-NEXT:    break 7
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = srem i64 %a, %b
+  ret i64 %r
+}
+
+define i1 @urem_i1(i1 %a, i1 %b) {
+; LA32-LABEL: urem_i1:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    move $a0, $zero
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: urem_i1:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    move $a0, $zero
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: urem_i1:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    move $a0, $zero
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: urem_i1:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    move $a0, $zero
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = urem i1 %a, %b
+  ret i1 %r
+}
+
+define i8 @urem_i8(i8 %a, i8 %b) {
+; LA32-LABEL: urem_i8:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    andi $a1, $a1, 255
+; LA32-NEXT:    andi $a0, $a0, 255
+; LA32-NEXT:    mod.wu $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: urem_i8:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    andi $a0, $a0, 255
+; LA64-NEXT:    mod.du $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: urem_i8:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    andi $a1, $a1, 255
+; LA32-TRAP-NEXT:    andi $a0, $a0, 255
+; LA32-TRAP-NEXT:    mod.wu $a0, $a0, $a1
+; LA32-TRAP-NEXT:    bnez $a1, 8
+; LA32-TRAP-NEXT:    break 7
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: urem_i8:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    andi $a1, $a1, 255
+; LA64-TRAP-NEXT:    andi $a0, $a0, 255
+; LA64-TRAP-NEXT:    mod.du $a0, $a0, $a1
+; LA64-TRAP-NEXT:    bnez $a1, 8
+; LA64-TRAP-NEXT:    break 7
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = urem i8 %a, %b
+  ret i8 %r
+}
+
+define i16 @urem_i16(i16 %a, i16 %b) {
+; LA32-LABEL: urem_i16:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT:    bstrpick.w $a0, $a0, 15, 0
+; LA32-NEXT:    mod.wu $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: urem_i16:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    bstrpick.d $a0, $a0, 15, 0
+; LA64-NEXT:    mod.du $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: urem_i16:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    bstrpick.w $a1, $a1, 15, 0
+; LA32-TRAP-NEXT:    bstrpick.w $a0, $a0, 15, 0
+; LA32-TRAP-NEXT:    mod.wu $a0, $a0, $a1
+; LA32-TRAP-NEXT:    bnez $a1, 8
+; LA32-TRAP-NEXT:    break 7
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: urem_i16:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-TRAP-NEXT:    bstrpick.d $a0, $a0, 15, 0
+; LA64-TRAP-NEXT:    mod.du $a0, $a0, $a1
+; LA64-TRAP-NEXT:    bnez $a1, 8
+; LA64-TRAP-NEXT:    break 7
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = urem i16 %a, %b
+  ret i16 %r
+}
+
+define i32 @urem_i32(i32 %a, i32 %b) {
+; LA32-LABEL: urem_i32:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    mod.wu $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: urem_i32:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    bstrpick.d $a1, $a1, 31, 0
+; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT:    mod.du $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: urem_i32:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    mod.wu $a0, $a0, $a1
+; LA32-TRAP-NEXT:    bnez $a1, 8
+; LA32-TRAP-NEXT:    break 7
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: urem_i32:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    bstrpick.d $a1, $a1, 31, 0
+; LA64-TRAP-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64-TRAP-NEXT:    mod.du $a0, $a0, $a1
+; LA64-TRAP-NEXT:    bnez $a1, 8
+; LA64-TRAP-NEXT:    break 7
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = urem i32 %a, %b
+  ret i32 %r
+}
+
+define i64 @urem_i64(i64 %a, i64 %b) {
+; LA32-LABEL: urem_i64:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    .cfi_def_cfa_offset 16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    .cfi_offset 1, -4
+; LA32-NEXT:    bl __umoddi3
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: urem_i64:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    mod.du $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+;
+; LA32-TRAP-LABEL: urem_i64:
+; LA32-TRAP:       # %bb.0: # %entry
+; LA32-TRAP-NEXT:    addi.w $sp, $sp, -16
+; LA32-TRAP-NEXT:    .cfi_def_cfa_offset 16
+; LA32-TRAP-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-TRAP-NEXT:    .cfi_offset 1, -4
+; LA32-TRAP-NEXT:    bl __umoddi3
+; LA32-TRAP-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-TRAP-NEXT:    addi.w $sp, $sp, 16
+; LA32-TRAP-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-TRAP-LABEL: urem_i64:
+; LA64-TRAP:       # %bb.0: # %entry
+; LA64-TRAP-NEXT:    mod.du $a0, $a0, $a1
+; LA64-TRAP-NEXT:    bnez $a1, 8
+; LA64-TRAP-NEXT:    break 7
+; LA64-TRAP-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = urem i64 %a, %b
+  ret i64 %r
+}


        


More information about the llvm-commits mailing list