[llvm] 538b90c - [RISCV][GlobalISel] Select ALU GPR instructions

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 28 10:11:32 PDT 2023


Author: Craig Topper
Date: 2023-08-28T10:11:09-07:00
New Revision: 538b90cf4c63c7f7b55a9eb9812e951529abd66c

URL: https://github.com/llvm/llvm-project/commit/538b90cf4c63c7f7b55a9eb9812e951529abd66c
DIFF: https://github.com/llvm/llvm-project/commit/538b90cf4c63c7f7b55a9eb9812e951529abd66c.diff

LOG: [RISCV][GlobalISel] Select ALU GPR instructions

Some instruction selection patterns required for ALU GPR instructions have already been automatically imported from existing TableGen descriptions - this patch simply adds testing for them. Logic for selecting constants and copies has been added, along with the first of the GIComplexPatternEquiv definitions required to select the shiftMaskXLen ComplexPattern. New patterns have been added to directly select RV64 W instructions from gMIR rather than using custom gMIR operations earlier in the pipeline. In future this could also support the checks present in the DAGToDAGISel for finding ops whose users only require the lower 32 bits.

Differential Revision: https://reviews.llvm.org/D76445

Added: 
    llvm/lib/Target/RISCV/RISCVGISel.td
    llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
    llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv32.mir
    llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu_m-rv32.mir
    llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu_m-rv64.mir

Modified: 
    llvm/lib/Target/RISCV/CMakeLists.txt
    llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt
index 60d81da57da0e1..4111c8267566ab 100644
--- a/llvm/lib/Target/RISCV/CMakeLists.txt
+++ b/llvm/lib/Target/RISCV/CMakeLists.txt
@@ -7,7 +7,6 @@ tablegen(LLVM RISCVGenAsmWriter.inc -gen-asm-writer)
 tablegen(LLVM RISCVGenCompressInstEmitter.inc -gen-compress-inst-emitter)
 tablegen(LLVM RISCVGenDAGISel.inc -gen-dag-isel)
 tablegen(LLVM RISCVGenDisassemblerTables.inc -gen-disassembler)
-tablegen(LLVM RISCVGenGlobalISel.inc -gen-global-isel)
 tablegen(LLVM RISCVGenInstrInfo.inc -gen-instr-info)
 tablegen(LLVM RISCVGenMCCodeEmitter.inc -gen-emitter)
 tablegen(LLVM RISCVGenMCPseudoLowering.inc -gen-pseudo-lowering)
@@ -16,6 +15,9 @@ tablegen(LLVM RISCVGenRegisterInfo.inc -gen-register-info)
 tablegen(LLVM RISCVGenSearchableTables.inc -gen-searchable-tables)
 tablegen(LLVM RISCVGenSubtargetInfo.inc -gen-subtarget)
 
+set(LLVM_TARGET_DEFINITIONS RISCVGISel.td)
+tablegen(LLVM RISCVGenGlobalISel.inc -gen-global-isel)
+
 add_public_tablegen_target(RISCVCommonTableGen)
 
 add_llvm_target(RISCVCodeGen

diff  --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index f56b2184407f24..6d610d094e9f44 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -50,6 +50,8 @@ class RISCVInstructionSelector : public InstructionSelector {
   bool selectConstant(MachineInstr &MI, MachineIRBuilder &MIB,
                       MachineRegisterInfo &MRI) const;
 
+  ComplexRendererFns selectS32ShiftMask(MachineOperand &Root) const;
+
   const RISCVSubtarget &STI;
   const RISCVInstrInfo &TII;
   const RISCVRegisterInfo &TRI;
@@ -89,6 +91,16 @@ RISCVInstructionSelector::RISCVInstructionSelector(
 {
 }
 
+InstructionSelector::ComplexRendererFns
+RISCVInstructionSelector::selectS32ShiftMask(MachineOperand &Root) const {
+  // TODO: Also check if we are seeing the result of an AND operation which
+  // could be bypassed since we only check the lower log2(xlen) bits.
+  return {{
+      [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
+      [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
+  }};
+}
+
 bool RISCVInstructionSelector::select(MachineInstr &MI) {
   unsigned Opc = MI.getOpcode();
   MachineBasicBlock &MBB = *MI.getParent();
@@ -111,6 +123,7 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
 
   switch (Opc) {
   case TargetOpcode::G_ANYEXT:
+  case TargetOpcode::G_TRUNC:
     MI.setDesc(TII.get(TargetOpcode::COPY));
     return true;
   case TargetOpcode::G_CONSTANT:
@@ -129,7 +142,7 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
 const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
     LLT Ty, const RegisterBank &RB, bool GetAllRegSet) const {
   if (RB.getID() == RISCV::GPRRegBankID) {
-    if (Ty.getSizeInBits() == (STI.is64Bit() ? 64 : 32))
+    if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
       return &RISCV::GPRRegClass;
   }
 

diff  --git a/llvm/lib/Target/RISCV/RISCVGISel.td b/llvm/lib/Target/RISCV/RISCVGISel.td
new file mode 100644
index 00000000000000..bb74a59f05dfcc
--- /dev/null
+++ b/llvm/lib/Target/RISCV/RISCVGISel.td
@@ -0,0 +1,45 @@
+//===-- RISCVGIsel.td - RISCV GlobalISel Patterns ----------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file contains patterns that are relevant to GlobalISel, including
+/// GIComplexOperandMatcher definitions for equivalent SelectionDAG
+/// ComplexPatterns.
+//
+//===----------------------------------------------------------------------===//
+
+include "RISCV.td"
+
+// FIXME: This is labelled as handling 's32', however the ComplexPattern it
+// refers to handles both i32 and i64 based on the HwMode. Currently this LLT
+// parameter appears to be ignored so this pattern works for both, however we
+// should add a LowLevelTypeByHwMode, and use that to define our XLenLLT instead
+// here.
+def s32ShiftMaskGI :
+    GIComplexOperandMatcher<s32, "selectS32ShiftMask">,
+    GIComplexPatternEquiv<shiftMaskXLen>;
+
+let Predicates = [IsRV64] in {
+def : Pat<(i32 (add GPR:$rs1, GPR:$rs2)), (ADDW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(i32 (sub GPR:$rs1, GPR:$rs2)), (SUBW GPR:$rs1, GPR:$rs2)>;
+
+def : Pat<(i32 (shl GPR:$rs1, (i32 GPR:$rs2))), (SLLW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(i32 (sra GPR:$rs1, (i32 GPR:$rs2))), (SRAW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(i32 (srl GPR:$rs1, (i32 GPR:$rs2))), (SRLW GPR:$rs1, GPR:$rs2)>;
+}
+
+let Predicates = [HasStdExtMOrZmmul, IsRV64] in {
+def : Pat<(i32 (mul GPR:$rs1, GPR:$rs2)), (MULW GPR:$rs1, GPR:$rs2)>;
+}
+
+let Predicates = [HasStdExtM, IsRV64] in {
+def : Pat<(i32 (sdiv GPR:$rs1, GPR:$rs2)), (DIVW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(i32 (srem GPR:$rs1, GPR:$rs2)), (REMW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(i32 (udiv GPR:$rs1, GPR:$rs2)), (DIVUW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(i32 (urem GPR:$rs1, GPR:$rs2)), (REMUW GPR:$rs1, GPR:$rs2)>;
+}

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll b/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
new file mode 100644
index 00000000000000..4530f017b8c2d8
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
@@ -0,0 +1,380 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+m -global-isel -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefix=RV32IM
+; RUN: llc -mtriple=riscv64 -mattr=+m -global-isel -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefix=RV64IM
+
+; Extends to 32 bits exhaustively tested for add only.
+
+define i8 @add_i8(i8 %a, i8 %b) {
+; RV32IM-LABEL: add_i8:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    add a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: add_i8:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    addw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = add i8 %a, %b
+  ret i8 %0
+}
+
+define i32 @add_i8_signext_i32(i8 %a, i8 %b) {
+; RV32IM-LABEL: add_i8_signext_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    slli a0, a0, 24
+; RV32IM-NEXT:    srai a0, a0, 24
+; RV32IM-NEXT:    slli a1, a1, 24
+; RV32IM-NEXT:    srai a1, a1, 24
+; RV32IM-NEXT:    add a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: add_i8_signext_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    li a2, 24
+; RV64IM-NEXT:    sllw a0, a0, a2
+; RV64IM-NEXT:    sraw a0, a0, a2
+; RV64IM-NEXT:    sllw a1, a1, a2
+; RV64IM-NEXT:    sraw a1, a1, a2
+; RV64IM-NEXT:    addw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = sext i8 %a to i32
+  %1 = sext i8 %b to i32
+  %2 = add i32 %0, %1
+  ret i32 %2
+}
+
+define i32 @add_i8_zeroext_i32(i8 %a, i8 %b) {
+; RV32IM-LABEL: add_i8_zeroext_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    andi a0, a0, 255
+; RV32IM-NEXT:    andi a1, a1, 255
+; RV32IM-NEXT:    add a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: add_i8_zeroext_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    andi a0, a0, 255
+; RV64IM-NEXT:    andi a1, a1, 255
+; RV64IM-NEXT:    addw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = zext i8 %a to i32
+  %1 = zext i8 %b to i32
+  %2 = add i32 %0, %1
+  ret i32 %2
+}
+
+; TODO: Handle G_IMPLICIT_DEF, which is needed to have i8 -> i64 extends working
+; on RV32.
+
+define i32 @add_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: add_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    add a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: add_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    addw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = add i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @sub_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: sub_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    sub a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: sub_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    subw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = sub i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @shl_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: shl_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    sll a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: shl_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sllw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = shl i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @ashr_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: ashr_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    sra a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: ashr_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sraw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = ashr i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @lshr_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: lshr_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    srl a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: lshr_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    srlw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = lshr i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @and_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: and_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    and a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: and_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    and a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = and i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @or_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: or_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    or a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: or_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    or a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = or i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @xor_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: xor_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    xor a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: xor_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    xor a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = xor i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @mul_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: mul_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    mul a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: mul_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = mul i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @sdiv_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: sdiv_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    div a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: sdiv_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = sdiv i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @srem_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: srem_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    rem a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: srem_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = srem i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @udiv_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: udiv_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    divu a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: udiv_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = udiv i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @urem_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: urem_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    remu a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: urem_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = urem i32 %a, %b
+  ret i32 %0
+}
+
+define i64 @add_i64(i64 %a, i64 %b) {
+; RV32IM-LABEL: add_i64:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    add a0, a0, a2
+; RV32IM-NEXT:    sltu a2, a0, a2
+; RV32IM-NEXT:    add a1, a1, a3
+; RV32IM-NEXT:    andi a2, a2, 1
+; RV32IM-NEXT:    add a1, a1, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: add_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    add a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = add i64 %a, %b
+  ret i64 %0
+}
+
+define i64 @sub_i64(i64 %a, i64 %b) {
+; RV32IM-LABEL: sub_i64:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    sub a4, a0, a2
+; RV32IM-NEXT:    sltu a0, a0, a2
+; RV32IM-NEXT:    sub a1, a1, a3
+; RV32IM-NEXT:    andi a0, a0, 1
+; RV32IM-NEXT:    sub a1, a1, a0
+; RV32IM-NEXT:    mv a0, a4
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: sub_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sub a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = sub i64 %a, %b
+  ret i64 %0
+}
+
+; TODO: Handle G_SELECT, which is needed to have i64 shifts working on RV32.
+
+define i64 @and_i64(i64 %a, i64 %b) {
+; RV32IM-LABEL: and_i64:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    and a0, a0, a2
+; RV32IM-NEXT:    and a1, a1, a3
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: and_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    and a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = and i64 %a, %b
+  ret i64 %0
+}
+
+define i64 @or_i64(i64 %a, i64 %b) {
+; RV32IM-LABEL: or_i64:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    or a0, a0, a2
+; RV32IM-NEXT:    or a1, a1, a3
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: or_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    or a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = or i64 %a, %b
+  ret i64 %0
+}
+
+define i64 @xor_i64(i64 %a, i64 %b) {
+; RV32IM-LABEL: xor_i64:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    xor a0, a0, a2
+; RV32IM-NEXT:    xor a1, a1, a3
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: xor_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    xor a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = xor i64 %a, %b
+  ret i64 %0
+}
+
+define i64 @mul_i64(i64 %a, i64 %b) {
+; RV32IM-LABEL: mul_i64:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    mul a4, a0, a2
+; RV32IM-NEXT:    mul a1, a1, a2
+; RV32IM-NEXT:    mul a3, a0, a3
+; RV32IM-NEXT:    mulhu a0, a0, a2
+; RV32IM-NEXT:    add a1, a1, a3
+; RV32IM-NEXT:    add a1, a1, a0
+; RV32IM-NEXT:    mv a0, a4
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: mul_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = mul i64 %a, %b
+  ret i64 %0
+}
+
+; TODO: Handle G_SDIV, G_SREM, G_UDIV, G_UREM for i64 on RV32. Likely will be
+; dispatched to a libcall?

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv32.mir
new file mode 100644
index 00000000000000..09bf29fce4b057
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv32.mir
@@ -0,0 +1,480 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=riscv32 -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV32I %s
+
+---
+name:            add_i8_signext
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_i8_signext
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[ADD]], 24
+    ; RV32I-NEXT: [[SRAI:%[0-9]+]]:gpr = SRAI [[SLLI]], 24
+    ; RV32I-NEXT: $x10 = COPY [[SRAI]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_ADD %0, %1
+    %3:gprb(s32) = G_CONSTANT i32 24
+    %4:gprb(s32) = G_SHL %2, %3(s32)
+    %5:gprb(s32) = G_ASHR %4, %3(s32)
+    $x10 = COPY %5(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i8_zeroext
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_i8_zeroext
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[ADD]], 255
+    ; RV32I-NEXT: $x10 = COPY [[ANDI]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_ADD %0, %1
+    %3:gprb(s32) = G_CONSTANT i32 255
+    %4:gprb(s32) = G_AND %2, %3
+    $x10 = COPY %4(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i16_signext
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_i16_signext
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[ADD]], 16
+    ; RV32I-NEXT: [[SRAI:%[0-9]+]]:gpr = SRAI [[SLLI]], 16
+    ; RV32I-NEXT: $x10 = COPY [[SRAI]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_ADD %0, %1
+    %3:gprb(s32) = G_CONSTANT i32 16
+    %4:gprb(s32) = G_SHL %2, %3(s32)
+    %5:gprb(s32) = G_ASHR %4, %3(s32)
+    $x10 = COPY %5(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i16_zeroext
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_i16_zeroext
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: [[LUI:%[0-9]+]]:gpr = LUI 16
+    ; RV32I-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[LUI]], -1
+    ; RV32I-NEXT: [[AND:%[0-9]+]]:gpr = AND [[ADD]], [[ADDI]]
+    ; RV32I-NEXT: $x10 = COPY [[AND]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_ADD %0, %1
+    %3:gprb(s32) = G_CONSTANT i32 65535
+    %4:gprb(s32) = G_AND %2, %3
+    $x10 = COPY %4(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[ADD]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_ADD %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            addi_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV32I-LABEL: name: addi_i32
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[COPY]], 1234
+    ; RV32I-NEXT: $x10 = COPY [[ADDI]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = G_CONSTANT i32 1234
+    %2:gprb(s32) = G_ADD %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[SUB]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_SUB %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            shl_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: shl_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[SLL]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_SHL %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            ashr_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: ashr_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[SRA:%[0-9]+]]:gpr = SRA [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[SRA]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_ASHR %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            lshr_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: lshr_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[SRL:%[0-9]+]]:gpr = SRL [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[SRL]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_LSHR %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            and_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: and_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[AND:%[0-9]+]]:gpr = AND [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[AND]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_AND %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            andi_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV32I-LABEL: name: andi_i32
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY]], 1234
+    ; RV32I-NEXT: $x10 = COPY [[ANDI]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = G_CONSTANT i32 1234
+    %2:gprb(s32) = G_AND %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            or_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: or_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[OR:%[0-9]+]]:gpr = OR [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[OR]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_OR %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            ori_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV32I-LABEL: name: ori_i32
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[ORI:%[0-9]+]]:gpr = ORI [[COPY]], 1234
+    ; RV32I-NEXT: $x10 = COPY [[ORI]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = G_CONSTANT i32 1234
+    %2:gprb(s32) = G_OR %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            xor_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: xor_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[XOR:%[0-9]+]]:gpr = XOR [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[XOR]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_XOR %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            xori_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV32I-LABEL: name: xori_i32
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[XORI:%[0-9]+]]:gpr = XORI [[COPY]], 1234
+    ; RV32I-NEXT: $x10 = COPY [[XORI]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = G_CONSTANT i32 1234
+    %2:gprb(s32) = G_XOR %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11, $x12, $x13
+
+    ; RV32I-LABEL: name: add_i64
+    ; RV32I: liveins: $x10, $x11, $x12, $x13
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x12
+    ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x13
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[COPY2]]
+    ; RV32I-NEXT: [[SLTU:%[0-9]+]]:gpr = SLTU [[ADD]], [[COPY2]]
+    ; RV32I-NEXT: [[ADD1:%[0-9]+]]:gpr = ADD [[COPY1]], [[COPY3]]
+    ; RV32I-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[SLTU]], 1
+    ; RV32I-NEXT: [[ADD2:%[0-9]+]]:gpr = ADD [[ADD1]], [[ANDI]]
+    ; RV32I-NEXT: $x10 = COPY [[ADD]]
+    ; RV32I-NEXT: $x11 = COPY [[ADD2]]
+    ; RV32I-NEXT: PseudoRET implicit $x10, implicit $x11
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = COPY $x12
+    %3:gprb(s32) = COPY $x13
+    %4:gprb(s32) = G_ADD %0, %2
+    %5:gprb(s32) = G_ICMP intpred(ult), %4(s32), %2
+    %6:gprb(s32) = G_ADD %1, %3
+    %7:gprb(s32) = G_CONSTANT i32 1
+    %8:gprb(s32) = G_AND %5, %7
+    %9:gprb(s32) = G_ADD %6, %8
+    $x10 = COPY %4(s32)
+    $x11 = COPY %9(s32)
+    PseudoRET implicit $x10, implicit $x11
+
+...
+---
+name:            sub_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11, $x12, $x13
+
+    ; RV32I-LABEL: name: sub_i64
+    ; RV32I: liveins: $x10, $x11, $x12, $x13
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x12
+    ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x13
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY]], [[COPY2]]
+    ; RV32I-NEXT: [[SLTU:%[0-9]+]]:gpr = SLTU [[COPY]], [[COPY2]]
+    ; RV32I-NEXT: [[SUB1:%[0-9]+]]:gpr = SUB [[COPY1]], [[COPY3]]
+    ; RV32I-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[SLTU]], 1
+    ; RV32I-NEXT: [[SUB2:%[0-9]+]]:gpr = SUB [[SUB1]], [[ANDI]]
+    ; RV32I-NEXT: $x10 = COPY [[SUB]]
+    ; RV32I-NEXT: $x11 = COPY [[SUB2]]
+    ; RV32I-NEXT: PseudoRET implicit $x10, implicit $x11
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = COPY $x12
+    %3:gprb(s32) = COPY $x13
+    %4:gprb(s32) = G_SUB %0, %2
+    %5:gprb(s32) = G_ICMP intpred(ult), %0(s32), %2
+    %6:gprb(s32) = G_SUB %1, %3
+    %7:gprb(s32) = G_CONSTANT i32 1
+    %8:gprb(s32) = G_AND %5, %7
+    %9:gprb(s32) = G_SUB %6, %8
+    $x10 = COPY %4(s32)
+    $x11 = COPY %9(s32)
+    PseudoRET implicit $x10, implicit $x11
+
+...

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv64.mir
new file mode 100644
index 00000000000000..63f45cb42a4e0a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv64.mir
@@ -0,0 +1,621 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=riscv64 -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV64I %s
+
+---
+name:            add_i8_zeroext
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_i8_zeroext
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[ADDW:%[0-9]+]]:gpr = ADDW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[ADDW]], 255
+    ; RV64I-NEXT: $x10 = COPY [[ANDI]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %2:gprb(s64) = COPY $x10
+    %3:gprb(s64) = COPY $x11
+    %6:gprb(s32) = G_TRUNC %2(s64)
+    %7:gprb(s32) = G_TRUNC %3(s64)
+    %8:gprb(s32) = G_ADD %6, %7
+    %9:gprb(s64) = G_CONSTANT i64 255
+    %10:gprb(s64) = G_ANYEXT %8(s32)
+    %5:gprb(s64) = G_AND %10, %9
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i16_signext
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_i16_signext
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[ADDW:%[0-9]+]]:gpr = ADDW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[ADDW]], 48
+    ; RV64I-NEXT: [[SRAI:%[0-9]+]]:gpr = SRAI [[SLLI]], 48
+    ; RV64I-NEXT: $x10 = COPY [[SRAI]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %2:gprb(s64) = COPY $x10
+    %3:gprb(s64) = COPY $x11
+    %6:gprb(s32) = G_TRUNC %2(s64)
+    %7:gprb(s32) = G_TRUNC %3(s64)
+    %8:gprb(s32) = G_ADD %6, %7
+    %9:gprb(s64) = G_ANYEXT %8(s32)
+    %11:gprb(s64) = G_CONSTANT i64 48
+    %10:gprb(s64) = G_SHL %9, %11(s64)
+    %5:gprb(s64) = G_ASHR %10, %11(s64)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i16_zeroext
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_i16_zeroext
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[ADDW:%[0-9]+]]:gpr = ADDW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: [[LUI:%[0-9]+]]:gpr = LUI 16
+    ; RV64I-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[LUI]], -1
+    ; RV64I-NEXT: [[AND:%[0-9]+]]:gpr = AND [[ADDW]], [[ADDIW]]
+    ; RV64I-NEXT: $x10 = COPY [[AND]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %2:gprb(s64) = COPY $x10
+    %3:gprb(s64) = COPY $x11
+    %6:gprb(s32) = G_TRUNC %2(s64)
+    %7:gprb(s32) = G_TRUNC %3(s64)
+    %8:gprb(s32) = G_ADD %6, %7
+    %9:gprb(s64) = G_CONSTANT i64 65535
+    %10:gprb(s64) = G_ANYEXT %8(s32)
+    %5:gprb(s64) = G_AND %10, %9
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[ADDW:%[0-9]+]]:gpr = ADDW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[ADDW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_ADD %1, %3
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            addi_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV64I-LABEL: name: addi_i32
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 1234
+    ; RV64I-NEXT: [[ADDW:%[0-9]+]]:gpr = ADDW [[COPY]], [[ADDI]]
+    ; RV64I-NEXT: $x10 = COPY [[ADDW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s32) = G_CONSTANT i32 1234
+    %3:gprb(s32) = G_ADD %1, %2
+    %4:gprb(s64) = G_ANYEXT %3(s32)
+    $x10 = COPY %4(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[SUBW:%[0-9]+]]:gpr = SUBW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[SUBW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_SUB %1, %3
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            shl_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: shl_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[SLLW:%[0-9]+]]:gpr = SLLW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[SLLW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_SHL %1, %3(s32)
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            ashr_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: ashr_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[SRAW:%[0-9]+]]:gpr = SRAW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[SRAW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_ASHR %1, %3(s32)
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            lshr_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: lshr_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[SRLW:%[0-9]+]]:gpr = SRLW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[SRLW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_LSHR %1, %3(s32)
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[ADD]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_ADD %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            addi_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV64I-LABEL: name: addi_i64
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[COPY]], 1234
+    ; RV64I-NEXT: $x10 = COPY [[ADDI]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = G_CONSTANT i64 1234
+    %2:gprb(s64) = G_ADD %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[SUB]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_SUB %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            shl_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: shl_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[SLL]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_SHL %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            ashr_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: ashr_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[SRA:%[0-9]+]]:gpr = SRA [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[SRA]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_ASHR %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            lshr_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: lshr_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[SRL:%[0-9]+]]:gpr = SRL [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[SRL]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_LSHR %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            and_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: and_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[AND:%[0-9]+]]:gpr = AND [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[AND]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_AND %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            andi_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV64I-LABEL: name: andi_i64
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY]], 1234
+    ; RV64I-NEXT: $x10 = COPY [[ANDI]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = G_CONSTANT i64 1234
+    %2:gprb(s64) = G_AND %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            or_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: or_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[OR:%[0-9]+]]:gpr = OR [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[OR]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_OR %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            ori_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV64I-LABEL: name: ori_i64
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[ORI:%[0-9]+]]:gpr = ORI [[COPY]], 1234
+    ; RV64I-NEXT: $x10 = COPY [[ORI]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = G_CONSTANT i64 1234
+    %2:gprb(s64) = G_OR %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            xor_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: xor_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[XOR:%[0-9]+]]:gpr = XOR [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[XOR]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_XOR %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            xori_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV64I-LABEL: name: xori_i64
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[XORI:%[0-9]+]]:gpr = XORI [[COPY]], 1234
+    ; RV64I-NEXT: $x10 = COPY [[XORI]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = G_CONSTANT i64 1234
+    %2:gprb(s64) = G_XOR %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i128
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11, $x12, $x13
+
+    ; RV64I-LABEL: name: add_i128
+    ; RV64I: liveins: $x10, $x11, $x12, $x13
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x12
+    ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x13
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[COPY2]]
+    ; RV64I-NEXT: [[SLTU:%[0-9]+]]:gpr = SLTU [[ADD]], [[COPY2]]
+    ; RV64I-NEXT: [[ADD1:%[0-9]+]]:gpr = ADD [[COPY1]], [[COPY3]]
+    ; RV64I-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[SLTU]], 1
+    ; RV64I-NEXT: [[ADD2:%[0-9]+]]:gpr = ADD [[ADD1]], [[ANDI]]
+    ; RV64I-NEXT: $x10 = COPY [[ADD]]
+    ; RV64I-NEXT: $x11 = COPY [[ADD2]]
+    ; RV64I-NEXT: PseudoRET implicit $x10, implicit $x11
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = COPY $x12
+    %3:gprb(s64) = COPY $x13
+    %4:gprb(s64) = G_ADD %0, %2
+    %5:gprb(s64) = G_ICMP intpred(ult), %4(s64), %2
+    %6:gprb(s64) = G_ADD %1, %3
+    %7:gprb(s64) = G_CONSTANT i64 1
+    %8:gprb(s64) = G_AND %5, %7
+    %9:gprb(s64) = G_ADD %6, %8
+    $x10 = COPY %4(s64)
+    $x11 = COPY %9(s64)
+    PseudoRET implicit $x10, implicit $x11
+
+...
+---
+name:            sub_i128
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11, $x12, $x13
+
+    ; RV64I-LABEL: name: sub_i128
+    ; RV64I: liveins: $x10, $x11, $x12, $x13
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x12
+    ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x13
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY]], [[COPY2]]
+    ; RV64I-NEXT: [[SLTU:%[0-9]+]]:gpr = SLTU [[COPY]], [[COPY2]]
+    ; RV64I-NEXT: [[SUB1:%[0-9]+]]:gpr = SUB [[COPY1]], [[COPY3]]
+    ; RV64I-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[SLTU]], 1
+    ; RV64I-NEXT: [[SUB2:%[0-9]+]]:gpr = SUB [[SUB1]], [[ANDI]]
+    ; RV64I-NEXT: $x10 = COPY [[SUB]]
+    ; RV64I-NEXT: $x11 = COPY [[SUB2]]
+    ; RV64I-NEXT: PseudoRET implicit $x10, implicit $x11
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = COPY $x12
+    %3:gprb(s64) = COPY $x13
+    %4:gprb(s64) = G_SUB %0, %2
+    %5:gprb(s64) = G_ICMP intpred(ult), %0(s64), %2
+    %6:gprb(s64) = G_SUB %1, %3
+    %7:gprb(s64) = G_CONSTANT i64 1
+    %8:gprb(s64) = G_AND %5, %7
+    %9:gprb(s64) = G_SUB %6, %8
+    $x10 = COPY %4(s64)
+    $x11 = COPY %9(s64)
+    PseudoRET implicit $x10, implicit $x11
+
+...

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu_m-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu_m-rv32.mir
new file mode 100644
index 00000000000000..f36b8ea31c11ee
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu_m-rv32.mir
@@ -0,0 +1,164 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=riscv32 -mattr=+m -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV32I %s
+
+---
+name:            mul_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: mul_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[MUL]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_MUL %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sdiv_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sdiv_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[DIV:%[0-9]+]]:gpr = DIV [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[DIV]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_SDIV %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            srem_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: srem_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[REM:%[0-9]+]]:gpr = REM [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[REM]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_SREM %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            udiv_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: udiv_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[DIVU:%[0-9]+]]:gpr = DIVU [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[DIVU]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_UDIV %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            urem_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: urem_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[REMU:%[0-9]+]]:gpr = REMU [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[REMU]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_UREM %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            mul_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11, $x12, $x13
+
+    ; RV32I-LABEL: name: mul_i64
+    ; RV32I: liveins: $x10, $x11, $x12, $x13
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x12
+    ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x13
+    ; RV32I-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[COPY]], [[COPY2]]
+    ; RV32I-NEXT: [[MUL1:%[0-9]+]]:gpr = MUL [[COPY1]], [[COPY2]]
+    ; RV32I-NEXT: [[MUL2:%[0-9]+]]:gpr = MUL [[COPY]], [[COPY3]]
+    ; RV32I-NEXT: [[MULHU:%[0-9]+]]:gpr = MULHU [[COPY]], [[COPY2]]
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[MUL1]], [[MUL2]]
+    ; RV32I-NEXT: [[ADD1:%[0-9]+]]:gpr = ADD [[ADD]], [[MULHU]]
+    ; RV32I-NEXT: $x10 = COPY [[MUL]]
+    ; RV32I-NEXT: $x11 = COPY [[ADD1]]
+    ; RV32I-NEXT: PseudoRET implicit $x10, implicit $x11
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = COPY $x12
+    %3:gprb(s32) = COPY $x13
+    %4:gprb(s32) = G_MUL %0, %2
+    %5:gprb(s32) = G_MUL %1, %2
+    %6:gprb(s32) = G_MUL %0, %3
+    %7:gprb(s32) = G_UMULH %0, %2
+    %8:gprb(s32) = G_ADD %5, %6
+    %9:gprb(s32) = G_ADD %8, %7
+    $x10 = COPY %4(s32)
+    $x11 = COPY %9(s32)
+    PseudoRET implicit $x10, implicit $x11
+
+...

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu_m-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu_m-rv64.mir
new file mode 100644
index 00000000000000..3b2e04c78b5e8e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu_m-rv64.mir
@@ -0,0 +1,299 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=riscv64 -mattr=+m -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV64I %s
+
+---
+name:            mul_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: mul_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[MULW:%[0-9]+]]:gpr = MULW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[MULW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_MUL %1, %3
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sdiv_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sdiv_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[DIVW:%[0-9]+]]:gpr = DIVW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[DIVW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_SDIV %1, %3
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            srem_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: srem_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[REMW:%[0-9]+]]:gpr = REMW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[REMW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_SREM %1, %3
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            udiv_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: udiv_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[DIVUW:%[0-9]+]]:gpr = DIVUW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[DIVUW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_UDIV %1, %3
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            urem_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: urem_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[REMUW:%[0-9]+]]:gpr = REMUW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[REMUW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_UREM %1, %3
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            mul_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: mul_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[MUL]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_MUL %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sdiv_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sdiv_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[DIV:%[0-9]+]]:gpr = DIV [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[DIV]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_SDIV %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            srem_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: srem_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[REM:%[0-9]+]]:gpr = REM [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[REM]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_SREM %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            udiv_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: udiv_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[DIVU:%[0-9]+]]:gpr = DIVU [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[DIVU]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_UDIV %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            urem_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: urem_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[REMU:%[0-9]+]]:gpr = REMU [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[REMU]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_UREM %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            mul_i128
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11, $x12, $x13
+
+    ; RV64I-LABEL: name: mul_i128
+    ; RV64I: liveins: $x10, $x11, $x12, $x13
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x12
+    ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x13
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[COPY]], [[COPY2]]
+    ; RV64I-NEXT: [[MUL1:%[0-9]+]]:gpr = MUL [[COPY1]], [[COPY2]]
+    ; RV64I-NEXT: [[MUL2:%[0-9]+]]:gpr = MUL [[COPY]], [[COPY3]]
+    ; RV64I-NEXT: [[MULHU:%[0-9]+]]:gpr = MULHU [[COPY]], [[COPY2]]
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[MUL1]], [[MUL2]]
+    ; RV64I-NEXT: [[ADD1:%[0-9]+]]:gpr = ADD [[ADD]], [[MULHU]]
+    ; RV64I-NEXT: $x10 = COPY [[MUL]]
+    ; RV64I-NEXT: $x11 = COPY [[ADD1]]
+    ; RV64I-NEXT: PseudoRET implicit $x10, implicit $x11
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = COPY $x12
+    %3:gprb(s64) = COPY $x13
+    %4:gprb(s64) = G_MUL %0, %2
+    %5:gprb(s64) = G_MUL %1, %2
+    %6:gprb(s64) = G_MUL %0, %3
+    %7:gprb(s64) = G_UMULH %0, %2
+    %8:gprb(s64) = G_ADD %5, %6
+    %9:gprb(s64) = G_ADD %8, %7
+    $x10 = COPY %4(s64)
+    $x11 = COPY %9(s64)
+    PseudoRET implicit $x10, implicit $x11
+
+...


        


More information about the llvm-commits mailing list