[llvm] ac182de - [RISCV][GlobalISel] Select ALU GPR instructions

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 15 15:49:53 PDT 2023


Author: Craig Topper
Date: 2023-09-15T15:49:38-07:00
New Revision: ac182deee828731d717ed062977bc8ed358e31b6

URL: https://github.com/llvm/llvm-project/commit/ac182deee828731d717ed062977bc8ed358e31b6
DIFF: https://github.com/llvm/llvm-project/commit/ac182deee828731d717ed062977bc8ed358e31b6.diff

LOG: [RISCV][GlobalISel] Select ALU GPR instructions

Some instruction selection patterns required for ALU GPR instructions have
already been automatically imported from existing TableGen descriptions -
this patch simply adds testing for them. The first of the GIComplexPatternEquiv
definitions required to select the shiftMaskXLen ComplexPattern has been added.

Some instructions require special handling due to i32 not being a legal
type on RV64 in SelectionDAG so we can't reuse SelectionDAG patterns.

Co-authored-by: Lewis Revill <lewis.revill at embecosm.com>

Reviewed By: nitinjohnraj

Differential Revision: https://reviews.llvm.org/D76445

Added: 
    llvm/lib/Target/RISCV/RISCVGISel.td
    llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip-rv64.ll
    llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
    llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv32.mir
    llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu_m-rv32.mir
    llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu_m-rv64.mir

Modified: 
    llvm/lib/Target/RISCV/CMakeLists.txt
    llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
    llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt
index 60d81da57da0e1e..4111c8267566aba 100644
--- a/llvm/lib/Target/RISCV/CMakeLists.txt
+++ b/llvm/lib/Target/RISCV/CMakeLists.txt
@@ -7,7 +7,6 @@ tablegen(LLVM RISCVGenAsmWriter.inc -gen-asm-writer)
 tablegen(LLVM RISCVGenCompressInstEmitter.inc -gen-compress-inst-emitter)
 tablegen(LLVM RISCVGenDAGISel.inc -gen-dag-isel)
 tablegen(LLVM RISCVGenDisassemblerTables.inc -gen-disassembler)
-tablegen(LLVM RISCVGenGlobalISel.inc -gen-global-isel)
 tablegen(LLVM RISCVGenInstrInfo.inc -gen-instr-info)
 tablegen(LLVM RISCVGenMCCodeEmitter.inc -gen-emitter)
 tablegen(LLVM RISCVGenMCPseudoLowering.inc -gen-pseudo-lowering)
@@ -16,6 +15,9 @@ tablegen(LLVM RISCVGenRegisterInfo.inc -gen-register-info)
 tablegen(LLVM RISCVGenSearchableTables.inc -gen-searchable-tables)
 tablegen(LLVM RISCVGenSubtargetInfo.inc -gen-subtarget)
 
+set(LLVM_TARGET_DEFINITIONS RISCVGISel.td)
+tablegen(LLVM RISCVGenGlobalISel.inc -gen-global-isel)
+
 add_public_tablegen_target(RISCVCommonTableGen)
 
 add_llvm_target(RISCVCodeGen

diff  --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index f56b2184407f245..4c246d7de1da952 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -50,6 +50,15 @@ class RISCVInstructionSelector : public InstructionSelector {
   bool selectConstant(MachineInstr &MI, MachineIRBuilder &MIB,
                       MachineRegisterInfo &MRI) const;
 
+  bool earlySelectShift(unsigned Opc, MachineInstr &I, MachineIRBuilder &MIB,
+                        const MachineRegisterInfo &MRI);
+
+  ComplexRendererFns selectShiftMask(MachineOperand &Root) const;
+
+  // Custom renderers for tablegen
+  void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
+                    int OpIdx) const;
+
   const RISCVSubtarget &STI;
   const RISCVInstrInfo &TII;
   const RISCVRegisterInfo &TRI;
@@ -89,12 +98,43 @@ RISCVInstructionSelector::RISCVInstructionSelector(
 {
 }
 
+InstructionSelector::ComplexRendererFns
+RISCVInstructionSelector::selectShiftMask(MachineOperand &Root) const {
+  // TODO: Also check if we are seeing the result of an AND operation which
+  // could be bypassed since we only check the lower log2(xlen) bits.
+  return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
+}
+
+// Tablegen doesn't allow us to write SRLIW/SRAIW/SLLIW patterns because the
+// immediate Operand has type XLenVT. GlobalISel wants it to be i32.
+bool RISCVInstructionSelector::earlySelectShift(
+    unsigned Opc, MachineInstr &I, MachineIRBuilder &MIB,
+    const MachineRegisterInfo &MRI) {
+  if (!Subtarget->is64Bit())
+    return false;
+
+  LLT Ty = MRI.getType(I.getOperand(0).getReg());
+  if (!Ty.isScalar() || Ty.getSizeInBits() != 32)
+    return false;
+
+  std::optional<int64_t> CstVal =
+      getIConstantVRegSExtVal(I.getOperand(2).getReg(), MRI);
+  if (!CstVal || !isUInt<5>(*CstVal))
+    return false;
+
+  auto NewI = MIB.buildInstr(Opc, {I.getOperand(0).getReg()},
+                             {I.getOperand(1).getReg()})
+                  .addImm(*CstVal);
+  I.eraseFromParent();
+  return constrainSelectedInstRegOperands(*NewI, TII, TRI, RBI);
+}
+
 bool RISCVInstructionSelector::select(MachineInstr &MI) {
   unsigned Opc = MI.getOpcode();
   MachineBasicBlock &MBB = *MI.getParent();
   MachineFunction &MF = *MBB.getParent();
   MachineRegisterInfo &MRI = MF.getRegInfo();
-  MachineIRBuilder MIB(MF);
+  MachineIRBuilder MIB(MI);
 
   if (!isPreISelGenericOpcode(Opc)) {
     // Certain non-generic instructions also need some special handling.
@@ -104,13 +144,61 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
     return true;
   }
 
+  switch (Opc) {
+  case TargetOpcode::G_ADD: {
+    // Tablegen doesn't pick up the ADDIW pattern because i32 isn't a legal
+    // type for RV64 in SelectionDAG. Manually select it here.
+    LLT Ty = MRI.getType(MI.getOperand(0).getReg());
+    if (Subtarget->is64Bit() && Ty.isScalar() && Ty.getSizeInBits() == 32) {
+      std::optional<int64_t> CstVal =
+          getIConstantVRegSExtVal(MI.getOperand(2).getReg(), MRI);
+      if (CstVal && isInt<12>(*CstVal)) {
+        auto NewI = MIB.buildInstr(RISCV::ADDIW, {MI.getOperand(0).getReg()},
+                                   {MI.getOperand(1).getReg()})
+                        .addImm(*CstVal);
+        MI.eraseFromParent();
+        return constrainSelectedInstRegOperands(*NewI, TII, TRI, RBI);
+      }
+    }
+    break;
+  }
+  case TargetOpcode::G_SUB: {
+    // Tablegen doesn't pick up the ADDIW pattern because i32 isn't a legal
+    // type for RV64 in SelectionDAG. Manually select it here.
+    LLT Ty = MRI.getType(MI.getOperand(0).getReg());
+    if (Subtarget->is64Bit() && Ty.isScalar() && Ty.getSizeInBits() == 32) {
+      std::optional<int64_t> CstVal =
+          getIConstantVRegSExtVal(MI.getOperand(2).getReg(), MRI);
+      if (CstVal && ((isInt<12>(*CstVal) && *CstVal != -2048) || *CstVal == 2048)) {
+        auto NewI = MIB.buildInstr(RISCV::ADDIW, {MI.getOperand(0).getReg()},
+                                   {MI.getOperand(1).getReg()})
+                        .addImm(-*CstVal);
+        MI.eraseFromParent();
+        return constrainSelectedInstRegOperands(*NewI, TII, TRI, RBI);
+      }
+    }
+    break;
+  }
+  case TargetOpcode::G_ASHR:
+    if (earlySelectShift(RISCV::SRAIW, MI, MIB, MRI))
+      return true;
+    break;
+  case TargetOpcode::G_LSHR:
+    if (earlySelectShift(RISCV::SRLIW, MI, MIB, MRI))
+      return true;
+    break;
+  case TargetOpcode::G_SHL:
+    if (earlySelectShift(RISCV::SLLIW, MI, MIB, MRI))
+      return true;
+    break;
+  }
+
   if (selectImpl(MI, *CoverageInfo))
     return true;
 
-  MIB.setInstrAndDebugLoc(MI);
-
   switch (Opc) {
   case TargetOpcode::G_ANYEXT:
+  case TargetOpcode::G_TRUNC:
     MI.setDesc(TII.get(TargetOpcode::COPY));
     return true;
   case TargetOpcode::G_CONSTANT:
@@ -126,10 +214,19 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
   return true;
 }
 
+void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
+                                            const MachineInstr &MI,
+                                            int OpIdx) const {
+  assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
+         "Expected G_CONSTANT");
+  int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
+  MIB.addImm(-CstVal);
+}
+
 const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
     LLT Ty, const RegisterBank &RB, bool GetAllRegSet) const {
   if (RB.getID() == RISCV::GPRRegBankID) {
-    if (Ty.getSizeInBits() == (STI.is64Bit() ? 64 : 32))
+    if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
       return &RISCV::GPRRegClass;
   }
 

diff  --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index ea738daeb924574..1f9b80856bbe134 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -47,7 +47,7 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) {
       .lowerFor({{XLenLLT, s1}});
 
   getActionDefinitionsBuilder({G_ASHR, G_LSHR, G_SHL})
-      .legalFor({{s32, s32}, {s32, XLenLLT}, {XLenLLT, XLenLLT}})
+      .legalFor({{s32, s32}, {XLenLLT, XLenLLT}})
       .widenScalarToNextPow2(0)
       .clampScalar(1, s32, XLenLLT)
       .clampScalar(0, s32, XLenLLT);

diff  --git a/llvm/lib/Target/RISCV/RISCVGISel.td b/llvm/lib/Target/RISCV/RISCVGISel.td
new file mode 100644
index 000000000000000..a993a7e66368390
--- /dev/null
+++ b/llvm/lib/Target/RISCV/RISCVGISel.td
@@ -0,0 +1,55 @@
+//===-- RISCVGIsel.td - RISCV GlobalISel Patterns ----------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file contains patterns that are relevant to GlobalISel, including
+/// GIComplexOperandMatcher definitions for equivalent SelectionDAG
+/// ComplexPatterns.
+//
+//===----------------------------------------------------------------------===//
+
+include "RISCV.td"
+
+def simm12Plus1 : ImmLeaf<XLenVT, [{
+    return (isInt<12>(Imm) && Imm != -2048) || Imm == 2048;}]>;
+
+def GINegImm : GICustomOperandRenderer<"renderNegImm">,
+  GISDNodeXFormEquiv<NegImm>;
+
+// FIXME: This is labelled as handling 's32', however the ComplexPattern it
+// refers to handles both i32 and i64 based on the HwMode. Currently this LLT
+// parameter appears to be ignored so this pattern works for both, however we
+// should add a LowLevelTypeByHwMode, and use that to define our XLenLLT instead
+// here.
+def ShiftMaskGI :
+    GIComplexOperandMatcher<s32, "selectShiftMask">,
+    GIComplexPatternEquiv<shiftMaskXLen>;
+
+// FIXME: Canonicalize (sub X, C) -> (add X, -C) earlier.
+def : Pat<(XLenVT (sub GPR:$rs1, simm12Plus1:$imm)),
+          (ADDI GPR:$rs1, (NegImm simm12Plus1:$imm))>;
+
+let Predicates = [IsRV64] in {
+def : Pat<(i32 (add GPR:$rs1, GPR:$rs2)), (ADDW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(i32 (sub GPR:$rs1, GPR:$rs2)), (SUBW GPR:$rs1, GPR:$rs2)>;
+
+def : Pat<(i32 (shl GPR:$rs1, (i32 GPR:$rs2))), (SLLW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(i32 (sra GPR:$rs1, (i32 GPR:$rs2))), (SRAW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(i32 (srl GPR:$rs1, (i32 GPR:$rs2))), (SRLW GPR:$rs1, GPR:$rs2)>;
+}
+
+let Predicates = [HasStdExtMOrZmmul, IsRV64] in {
+def : Pat<(i32 (mul GPR:$rs1, GPR:$rs2)), (MULW GPR:$rs1, GPR:$rs2)>;
+}
+
+let Predicates = [HasStdExtM, IsRV64] in {
+def : Pat<(i32 (sdiv GPR:$rs1, GPR:$rs2)), (DIVW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(i32 (srem GPR:$rs1, GPR:$rs2)), (REMW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(i32 (udiv GPR:$rs1, GPR:$rs2)), (DIVUW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(i32 (urem GPR:$rs1, GPR:$rs2)), (REMUW GPR:$rs1, GPR:$rs2)>;
+}

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip-rv64.ll
new file mode 100644
index 000000000000000..d4acca17930d5a0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip-rv64.ll
@@ -0,0 +1,103 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+m -global-isel -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefix=RV64IM
+
+define i64 @sll_i64(i64 %a, i64 %b) {
+; RV64IM-LABEL: sll_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sll a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = shl i64 %a, %b
+  ret i64 %0
+}
+
+define i64 @slli_i64(i64 %a) {
+; RV64IM-LABEL: slli_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    slli a0, a0, 33
+; RV64IM-NEXT:    ret
+entry:
+  %0 = shl i64 %a, 33
+  ret i64 %0
+}
+
+define i64 @sra_i64(i64 %a, i64 %b) {
+; RV64IM-LABEL: sra_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sra a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = ashr i64 %a, %b
+  ret i64 %0
+}
+
+define i64 @srai_i64(i64 %a) {
+; RV64IM-LABEL: srai_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    srai a0, a0, 47
+; RV64IM-NEXT:    ret
+entry:
+  %0 = ashr i64 %a, 47
+  ret i64 %0
+}
+
+define i64 @srl_i64(i64 %a, i64 %b) {
+; RV64IM-LABEL: srl_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    srl a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = lshr i64 %a, %b
+  ret i64 %0
+}
+
+define i64 @srli_i64(i64 %a, i64 %b) {
+; RV64IM-LABEL: srli_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    srli a0, a0, 55
+; RV64IM-NEXT:    ret
+entry:
+  %0 = lshr i64 %a, 55
+  ret i64 %0
+}
+
+define i64 @sdiv_i64(i64 %a, i64 %b) {
+; RV64IM-LABEL: sdiv_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    div a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = sdiv i64 %a, %b
+  ret i64 %0
+}
+
+define i64 @srem_i64(i64 %a, i64 %b) {
+; RV64IM-LABEL: srem_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    rem a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = srem i64 %a, %b
+  ret i64 %0
+}
+
+define i64 @udiv_i64(i64 %a, i64 %b) {
+; RV64IM-LABEL: udiv_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    divu a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = udiv i64 %a, %b
+  ret i64 %0
+}
+
+define i64 @urem_i64(i64 %a, i64 %b) {
+; RV64IM-LABEL: urem_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    remu a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = urem i64 %a, %b
+  ret i64 %0
+}

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll b/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
new file mode 100644
index 000000000000000..9557f581be4ecec
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
@@ -0,0 +1,590 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+m -global-isel -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefix=RV32IM
+; RUN: llc -mtriple=riscv64 -mattr=+m -global-isel -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefix=RV64IM
+
+; Extends to 32 bits exhaustively tested for add only.
+
+define i8 @add_i8(i8 %a, i8 %b) {
+; RV32IM-LABEL: add_i8:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    add a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: add_i8:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    addw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = add i8 %a, %b
+  ret i8 %0
+}
+
+define i32 @add_i8_signext_i32(i8 %a, i8 %b) {
+; RV32IM-LABEL: add_i8_signext_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    slli a0, a0, 24
+; RV32IM-NEXT:    srai a0, a0, 24
+; RV32IM-NEXT:    slli a1, a1, 24
+; RV32IM-NEXT:    srai a1, a1, 24
+; RV32IM-NEXT:    add a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: add_i8_signext_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    slli a0, a0, 24
+; RV64IM-NEXT:    sraiw a0, a0, 24
+; RV64IM-NEXT:    slli a1, a1, 24
+; RV64IM-NEXT:    sraiw a1, a1, 24
+; RV64IM-NEXT:    addw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = sext i8 %a to i32
+  %1 = sext i8 %b to i32
+  %2 = add i32 %0, %1
+  ret i32 %2
+}
+
+define i32 @add_i8_zeroext_i32(i8 %a, i8 %b) {
+; RV32IM-LABEL: add_i8_zeroext_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    andi a0, a0, 255
+; RV32IM-NEXT:    andi a1, a1, 255
+; RV32IM-NEXT:    add a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: add_i8_zeroext_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    andi a0, a0, 255
+; RV64IM-NEXT:    andi a1, a1, 255
+; RV64IM-NEXT:    addw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = zext i8 %a to i32
+  %1 = zext i8 %b to i32
+  %2 = add i32 %0, %1
+  ret i32 %2
+}
+
+; TODO: Handle G_IMPLICIT_DEF, which is needed to have i8 -> i64 extends working
+; on RV32.
+
+define i32 @add_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: add_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    add a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: add_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    addw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = add i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @addi_i32(i32 %a) {
+; RV32IM-LABEL: addi_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    addi a0, a0, 1234
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: addi_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    addiw a0, a0, 1234
+; RV64IM-NEXT:    ret
+entry:
+  %0 = add i32 %a, 1234
+  ret i32 %0
+}
+
+define i32 @sub_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: sub_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    sub a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: sub_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    subw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = sub i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @subi_i32(i32 %a) {
+; RV32IM-LABEL: subi_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    addi a0, a0, -1234
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: subi_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    addiw a0, a0, -1234
+; RV64IM-NEXT:    ret
+entry:
+  %0 = sub i32 %a, 1234
+  ret i32 %0
+}
+
+define i32 @sll_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: sll_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    sll a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: sll_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sllw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = shl i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @slli_i32(i32 %a) {
+; RV32IM-LABEL: slli_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    slli a0, a0, 11
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: slli_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    slliw a0, a0, 11
+; RV64IM-NEXT:    ret
+entry:
+  %0 = shl i32 %a, 11
+  ret i32 %0
+}
+
+define i32 @sra_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: sra_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    sra a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: sra_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sraw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = ashr i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @srai_i32(i32 %a) {
+; RV32IM-LABEL: srai_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    srai a0, a0, 17
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: srai_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sraiw a0, a0, 17
+; RV64IM-NEXT:    ret
+entry:
+  %0 = ashr i32 %a, 17
+  ret i32 %0
+}
+
+define i32 @srl_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: srl_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    srl a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: srl_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    srlw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = lshr i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @srli_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: srli_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    srli a0, a0, 23
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: srli_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    srliw a0, a0, 23
+; RV64IM-NEXT:    ret
+entry:
+  %0 = lshr i32 %a, 23
+  ret i32 %0
+}
+
+define i32 @and_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: and_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    and a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: and_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    and a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = and i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @andi_i32(i32 %a) {
+; RV32IM-LABEL: andi_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    andi a0, a0, 1234
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: andi_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    andi a0, a0, 1234
+; RV64IM-NEXT:    ret
+entry:
+  %0 = and i32 %a, 1234
+  ret i32 %0
+}
+
+define i32 @or_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: or_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    or a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: or_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    or a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = or i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @ori_i32(i32 %a) {
+; RV32IM-LABEL: ori_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    ori a0, a0, 1234
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: ori_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    ori a0, a0, 1234
+; RV64IM-NEXT:    ret
+entry:
+  %0 = or i32 %a, 1234
+  ret i32 %0
+}
+
+define i32 @xor_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: xor_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    xor a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: xor_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    xor a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = xor i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @xori_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: xori_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    xori a0, a0, 1234
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: xori_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    xori a0, a0, 1234
+; RV64IM-NEXT:    ret
+entry:
+  %0 = xor i32 %a, 1234
+  ret i32 %0
+}
+
+define i32 @mul_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: mul_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    mul a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: mul_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = mul i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @sdiv_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: sdiv_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    div a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: sdiv_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    divw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = sdiv i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @srem_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: srem_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    rem a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: srem_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    remw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = srem i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @udiv_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: udiv_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    divu a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: udiv_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    divuw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = udiv i32 %a, %b
+  ret i32 %0
+}
+
+define i32 @urem_i32(i32 %a, i32 %b) {
+; RV32IM-LABEL: urem_i32:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    remu a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: urem_i32:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    remuw a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = urem i32 %a, %b
+  ret i32 %0
+}
+
+define i64 @add_i64(i64 %a, i64 %b) {
+; RV32IM-LABEL: add_i64:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    add a0, a0, a2
+; RV32IM-NEXT:    sltu a2, a0, a2
+; RV32IM-NEXT:    add a1, a1, a3
+; RV32IM-NEXT:    andi a2, a2, 1
+; RV32IM-NEXT:    add a1, a1, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: add_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    add a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = add i64 %a, %b
+  ret i64 %0
+}
+
+define i64 @addi_i64(i64 %a) {
+; RV32IM-LABEL: addi_i64:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    addi a0, a0, 1234
+; RV32IM-NEXT:    sltiu a2, a0, 1234
+; RV32IM-NEXT:    mv a1, a1
+; RV32IM-NEXT:    andi a2, a2, 1
+; RV32IM-NEXT:    add a1, a1, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: addi_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    addi a0, a0, 1234
+; RV64IM-NEXT:    ret
+entry:
+  %0 = add i64 %a, 1234
+  ret i64 %0
+}
+
+define i64 @sub_i64(i64 %a, i64 %b) {
+; RV32IM-LABEL: sub_i64:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    sub a4, a0, a2
+; RV32IM-NEXT:    sltu a0, a0, a2
+; RV32IM-NEXT:    sub a1, a1, a3
+; RV32IM-NEXT:    andi a0, a0, 1
+; RV32IM-NEXT:    sub a1, a1, a0
+; RV32IM-NEXT:    mv a0, a4
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: sub_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sub a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = sub i64 %a, %b
+  ret i64 %0
+}
+
+define i64 @subi_i64(i64 %a) {
+; RV32IM-LABEL: subi_i64:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    lui a2, 301
+; RV32IM-NEXT:    addi a3, a2, 1548
+; RV32IM-NEXT:    sub a2, a0, a3
+; RV32IM-NEXT:    sltu a0, a0, a3
+; RV32IM-NEXT:    mv a1, a1
+; RV32IM-NEXT:    andi a0, a0, 1
+; RV32IM-NEXT:    sub a1, a1, a0
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: subi_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    lui a1, 301
+; RV64IM-NEXT:    addiw a1, a1, 1548
+; RV64IM-NEXT:    sub a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = sub i64 %a, 1234444
+  ret i64 %0
+}
+
+; TODO: Handle G_SELECT, which is needed to have i64 shifts working on RV32.
+
+define i64 @and_i64(i64 %a, i64 %b) {
+; RV32IM-LABEL: and_i64:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    and a0, a0, a2
+; RV32IM-NEXT:    and a1, a1, a3
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: and_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    and a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = and i64 %a, %b
+  ret i64 %0
+}
+
+define i64 @andi_i64(i64 %a) {
+; RV32IM-LABEL: andi_i64:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    andi a0, a0, 1234
+; RV32IM-NEXT:    andi a1, a1, 0
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: andi_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    andi a0, a0, 1234
+; RV64IM-NEXT:    ret
+entry:
+  %0 = and i64 %a, 1234
+  ret i64 %0
+}
+
+define i64 @or_i64(i64 %a, i64 %b) {
+; RV32IM-LABEL: or_i64:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    or a0, a0, a2
+; RV32IM-NEXT:    or a1, a1, a3
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: or_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    or a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = or i64 %a, %b
+  ret i64 %0
+}
+
+define i64 @ori_i64(i64 %a) {
+; RV32IM-LABEL: ori_i64:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    ori a0, a0, 1234
+; RV32IM-NEXT:    ori a1, a1, 0
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: ori_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    ori a0, a0, 1234
+; RV64IM-NEXT:    ret
+entry:
+  %0 = or i64 %a, 1234
+  ret i64 %0
+}
+
+define i64 @xor_i64(i64 %a, i64 %b) {
+; RV32IM-LABEL: xor_i64:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    xor a0, a0, a2
+; RV32IM-NEXT:    xor a1, a1, a3
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: xor_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    xor a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = xor i64 %a, %b
+  ret i64 %0
+}
+
+define i64 @xori_i64(i64 %a) {
+; RV32IM-LABEL: xori_i64:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    xori a0, a0, 1234
+; RV32IM-NEXT:    xori a1, a1, 0
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: xori_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    xori a0, a0, 1234
+; RV64IM-NEXT:    ret
+entry:
+  %0 = xor i64 %a, 1234
+  ret i64 %0
+}
+
+define i64 @mul_i64(i64 %a, i64 %b) {
+; RV32IM-LABEL: mul_i64:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    mul a4, a0, a2
+; RV32IM-NEXT:    mul a1, a1, a2
+; RV32IM-NEXT:    mul a3, a0, a3
+; RV32IM-NEXT:    mulhu a0, a0, a2
+; RV32IM-NEXT:    add a1, a1, a3
+; RV32IM-NEXT:    add a1, a1, a0
+; RV32IM-NEXT:    mv a0, a4
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: mul_i64:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    ret
+entry:
+  %0 = mul i64 %a, %b
+  ret i64 %0
+}
+
+; TODO: Handle G_SDIV, G_SREM, G_UDIV, G_UREM for i64 on RV32. Likely will be
+; dispatched to a libcall?

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv32.mir
new file mode 100644
index 000000000000000..c063e5a97c394ec
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv32.mir
@@ -0,0 +1,572 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=riscv32 -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV32I %s
+
+---
+name:            add_i8_signext
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_i8_signext
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[ADD]], 24
+    ; RV32I-NEXT: [[SRAI:%[0-9]+]]:gpr = SRAI [[SLLI]], 24
+    ; RV32I-NEXT: $x10 = COPY [[SRAI]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_ADD %0, %1
+    %3:gprb(s32) = G_CONSTANT i32 24
+    %4:gprb(s32) = G_SHL %2, %3(s32)
+    %5:gprb(s32) = G_ASHR %4, %3(s32)
+    $x10 = COPY %5(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i8_zeroext
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_i8_zeroext
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[ADD]], 255
+    ; RV32I-NEXT: $x10 = COPY [[ANDI]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_ADD %0, %1
+    %3:gprb(s32) = G_CONSTANT i32 255
+    %4:gprb(s32) = G_AND %2, %3
+    $x10 = COPY %4(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i16_signext
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_i16_signext
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[ADD]], 16
+    ; RV32I-NEXT: [[SRAI:%[0-9]+]]:gpr = SRAI [[SLLI]], 16
+    ; RV32I-NEXT: $x10 = COPY [[SRAI]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_ADD %0, %1
+    %3:gprb(s32) = G_CONSTANT i32 16
+    %4:gprb(s32) = G_SHL %2, %3(s32)
+    %5:gprb(s32) = G_ASHR %4, %3(s32)
+    $x10 = COPY %5(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i16_zeroext
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_i16_zeroext
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: [[LUI:%[0-9]+]]:gpr = LUI 16
+    ; RV32I-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[LUI]], -1
+    ; RV32I-NEXT: [[AND:%[0-9]+]]:gpr = AND [[ADD]], [[ADDI]]
+    ; RV32I-NEXT: $x10 = COPY [[AND]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_ADD %0, %1
+    %3:gprb(s32) = G_CONSTANT i32 65535
+    %4:gprb(s32) = G_AND %2, %3
+    $x10 = COPY %4(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: add_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[ADD]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_ADD %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            addi_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV32I-LABEL: name: addi_i32
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[COPY]], 1234
+    ; RV32I-NEXT: $x10 = COPY [[ADDI]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = G_CONSTANT i32 1234
+    %2:gprb(s32) = G_ADD %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sub_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[SUB]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_SUB %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            subi_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV32I-LABEL: name: subi_i32
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[COPY]], 1234
+    ; RV32I-NEXT: $x10 = COPY [[ADDI]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = G_CONSTANT i32 -1234
+    %2:gprb(s32) = G_SUB %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sll_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sll_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[SLL]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_SHL %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            slli_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV32I-LABEL: name: slli_i32
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[COPY]], 31
+    ; RV32I-NEXT: $x10 = COPY [[SLLI]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = G_CONSTANT i32 31
+    %2:gprb(s32) = G_SHL %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sra_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sra_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[SRA:%[0-9]+]]:gpr = SRA [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[SRA]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_ASHR %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            srai_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV32I-LABEL: name: srai_i32
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[SRAI:%[0-9]+]]:gpr = SRAI [[COPY]], 31
+    ; RV32I-NEXT: $x10 = COPY [[SRAI]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = G_CONSTANT i32 31
+    %2:gprb(s32) = G_ASHR %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            srl_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: srl_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[SRL:%[0-9]+]]:gpr = SRL [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[SRL]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_LSHR %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            srli_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV32I-LABEL: name: srli_i32
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[COPY]], 31
+    ; RV32I-NEXT: $x10 = COPY [[SRLI]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = G_CONSTANT i32 31
+    %2:gprb(s32) = G_LSHR %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            and_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: and_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[AND:%[0-9]+]]:gpr = AND [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[AND]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_AND %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            andi_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV32I-LABEL: name: andi_i32
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY]], 1234
+    ; RV32I-NEXT: $x10 = COPY [[ANDI]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = G_CONSTANT i32 1234
+    %2:gprb(s32) = G_AND %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            or_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: or_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[OR:%[0-9]+]]:gpr = OR [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[OR]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_OR %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            ori_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV32I-LABEL: name: ori_i32
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[ORI:%[0-9]+]]:gpr = ORI [[COPY]], 1234
+    ; RV32I-NEXT: $x10 = COPY [[ORI]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = G_CONSTANT i32 1234
+    %2:gprb(s32) = G_OR %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            xor_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: xor_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[XOR:%[0-9]+]]:gpr = XOR [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[XOR]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_XOR %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            xori_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV32I-LABEL: name: xori_i32
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[XORI:%[0-9]+]]:gpr = XORI [[COPY]], 1234
+    ; RV32I-NEXT: $x10 = COPY [[XORI]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = G_CONSTANT i32 1234
+    %2:gprb(s32) = G_XOR %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11, $x12, $x13
+
+    ; RV32I-LABEL: name: add_i64
+    ; RV32I: liveins: $x10, $x11, $x12, $x13
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x12
+    ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x13
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[COPY2]]
+    ; RV32I-NEXT: [[SLTU:%[0-9]+]]:gpr = SLTU [[ADD]], [[COPY2]]
+    ; RV32I-NEXT: [[ADD1:%[0-9]+]]:gpr = ADD [[COPY1]], [[COPY3]]
+    ; RV32I-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[SLTU]], 1
+    ; RV32I-NEXT: [[ADD2:%[0-9]+]]:gpr = ADD [[ADD1]], [[ANDI]]
+    ; RV32I-NEXT: $x10 = COPY [[ADD]]
+    ; RV32I-NEXT: $x11 = COPY [[ADD2]]
+    ; RV32I-NEXT: PseudoRET implicit $x10, implicit $x11
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = COPY $x12
+    %3:gprb(s32) = COPY $x13
+    %4:gprb(s32) = G_ADD %0, %2
+    %5:gprb(s32) = G_ICMP intpred(ult), %4(s32), %2
+    %6:gprb(s32) = G_ADD %1, %3
+    %7:gprb(s32) = G_CONSTANT i32 1
+    %8:gprb(s32) = G_AND %5, %7
+    %9:gprb(s32) = G_ADD %6, %8
+    $x10 = COPY %4(s32)
+    $x11 = COPY %9(s32)
+    PseudoRET implicit $x10, implicit $x11
+
+...
+---
+name:            sub_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11, $x12, $x13
+
+    ; RV32I-LABEL: name: sub_i64
+    ; RV32I: liveins: $x10, $x11, $x12, $x13
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x12
+    ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x13
+    ; RV32I-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY]], [[COPY2]]
+    ; RV32I-NEXT: [[SLTU:%[0-9]+]]:gpr = SLTU [[COPY]], [[COPY2]]
+    ; RV32I-NEXT: [[SUB1:%[0-9]+]]:gpr = SUB [[COPY1]], [[COPY3]]
+    ; RV32I-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[SLTU]], 1
+    ; RV32I-NEXT: [[SUB2:%[0-9]+]]:gpr = SUB [[SUB1]], [[ANDI]]
+    ; RV32I-NEXT: $x10 = COPY [[SUB]]
+    ; RV32I-NEXT: $x11 = COPY [[SUB2]]
+    ; RV32I-NEXT: PseudoRET implicit $x10, implicit $x11
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = COPY $x12
+    %3:gprb(s32) = COPY $x13
+    %4:gprb(s32) = G_SUB %0, %2
+    %5:gprb(s32) = G_ICMP intpred(ult), %0(s32), %2
+    %6:gprb(s32) = G_SUB %1, %3
+    %7:gprb(s32) = G_CONSTANT i32 1
+    %8:gprb(s32) = G_AND %5, %7
+    %9:gprb(s32) = G_SUB %6, %8
+    $x10 = COPY %4(s32)
+    $x11 = COPY %9(s32)
+    PseudoRET implicit $x10, implicit $x11
+
+...

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv64.mir
new file mode 100644
index 000000000000000..84cc55f422a6a0c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu-rv64.mir
@@ -0,0 +1,812 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=riscv64 -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV64I %s
+
+---
+name:            add_i8_zeroext
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_i8_zeroext
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[ADDW:%[0-9]+]]:gpr = ADDW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[ADDW]], 255
+    ; RV64I-NEXT: $x10 = COPY [[ANDI]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %2:gprb(s64) = COPY $x10
+    %3:gprb(s64) = COPY $x11
+    %6:gprb(s32) = G_TRUNC %2(s64)
+    %7:gprb(s32) = G_TRUNC %3(s64)
+    %8:gprb(s32) = G_ADD %6, %7
+    %9:gprb(s64) = G_CONSTANT i64 255
+    %10:gprb(s64) = G_ANYEXT %8(s32)
+    %5:gprb(s64) = G_AND %10, %9
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i16_signext
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_i16_signext
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[ADDW:%[0-9]+]]:gpr = ADDW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[ADDW]], 48
+    ; RV64I-NEXT: [[SRAI:%[0-9]+]]:gpr = SRAI [[SLLI]], 48
+    ; RV64I-NEXT: $x10 = COPY [[SRAI]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %2:gprb(s64) = COPY $x10
+    %3:gprb(s64) = COPY $x11
+    %6:gprb(s32) = G_TRUNC %2(s64)
+    %7:gprb(s32) = G_TRUNC %3(s64)
+    %8:gprb(s32) = G_ADD %6, %7
+    %9:gprb(s64) = G_ANYEXT %8(s32)
+    %11:gprb(s64) = G_CONSTANT i64 48
+    %10:gprb(s64) = G_SHL %9, %11(s64)
+    %5:gprb(s64) = G_ASHR %10, %11(s64)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i16_zeroext
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_i16_zeroext
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[ADDW:%[0-9]+]]:gpr = ADDW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: [[LUI:%[0-9]+]]:gpr = LUI 16
+    ; RV64I-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[LUI]], -1
+    ; RV64I-NEXT: [[AND:%[0-9]+]]:gpr = AND [[ADDW]], [[ADDIW]]
+    ; RV64I-NEXT: $x10 = COPY [[AND]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %2:gprb(s64) = COPY $x10
+    %3:gprb(s64) = COPY $x11
+    %6:gprb(s32) = G_TRUNC %2(s64)
+    %7:gprb(s32) = G_TRUNC %3(s64)
+    %8:gprb(s32) = G_ADD %6, %7
+    %9:gprb(s64) = G_CONSTANT i64 65535
+    %10:gprb(s64) = G_ANYEXT %8(s32)
+    %5:gprb(s64) = G_AND %10, %9
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[ADDW:%[0-9]+]]:gpr = ADDW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[ADDW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_ADD %1, %3
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            addi_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV64I-LABEL: name: addi_i32
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[COPY]], 1234
+    ; RV64I-NEXT: $x10 = COPY [[ADDIW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s32) = G_CONSTANT i32 1234
+    %3:gprb(s32) = G_ADD %1, %2
+    %4:gprb(s64) = G_ANYEXT %3(s32)
+    $x10 = COPY %4(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[SUBW:%[0-9]+]]:gpr = SUBW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[SUBW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_SUB %1, %3
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            subi_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV64I-LABEL: name: subi_i32
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[COPY]], 1234
+    ; RV64I-NEXT: $x10 = COPY [[ADDIW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s32) = G_CONSTANT i32 -1234
+    %3:gprb(s32) = G_SUB %1, %2
+    %4:gprb(s64) = G_ANYEXT %3(s32)
+    $x10 = COPY %4(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sll_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sll_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[SLLW:%[0-9]+]]:gpr = SLLW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[SLLW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_SHL %1, %3(s32)
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            slli_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV64I-LABEL: name: slli_i32
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[SLLIW:%[0-9]+]]:gpr = SLLIW [[COPY]], 31
+    ; RV64I-NEXT: $x10 = COPY [[SLLIW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s32) = G_CONSTANT i32 31
+    %3:gprb(s32) = G_SHL %1, %2(s32)
+    %4:gprb(s64) = G_ANYEXT %3(s32)
+    $x10 = COPY %4(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sra_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sra_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[SRAW:%[0-9]+]]:gpr = SRAW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[SRAW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_ASHR %1, %3(s32)
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            srai_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV64I-LABEL: name: srai_i32
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[SRAIW:%[0-9]+]]:gpr = SRAIW [[COPY]], 31
+    ; RV64I-NEXT: $x10 = COPY [[SRAIW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s32) = G_CONSTANT i32 31
+    %3:gprb(s32) = G_ASHR %1, %2(s32)
+    %4:gprb(s64) = G_ANYEXT %3(s32)
+    $x10 = COPY %4(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            srl_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: srl_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[SRLW:%[0-9]+]]:gpr = SRLW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[SRLW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_LSHR %1, %3(s32)
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            srli_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV64I-LABEL: name: srli_i32
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[SRLIW:%[0-9]+]]:gpr = SRLIW [[COPY]], 31
+    ; RV64I-NEXT: $x10 = COPY [[SRLIW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s32) = G_CONSTANT i32 31
+    %3:gprb(s32) = G_LSHR %1, %2(s32)
+    %4:gprb(s64) = G_ANYEXT %3(s32)
+    $x10 = COPY %4(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: add_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[ADD]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_ADD %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            addi_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV64I-LABEL: name: addi_i64
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[COPY]], 1234
+    ; RV64I-NEXT: $x10 = COPY [[ADDI]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = G_CONSTANT i64 1234
+    %2:gprb(s64) = G_ADD %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sub_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sub_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[SUB]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_SUB %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            subi_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV64I-LABEL: name: subi_i64
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[COPY]], 1234
+    ; RV64I-NEXT: $x10 = COPY [[ADDI]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = G_CONSTANT i64 -1234
+    %2:gprb(s64) = G_SUB %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sll_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sll_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[SLL]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_SHL %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            slli_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV64I-LABEL: name: slli_i64
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[COPY]], 63
+    ; RV64I-NEXT: $x10 = COPY [[SLLI]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = G_CONSTANT i64 63
+    %2:gprb(s64) = G_SHL %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sra_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sra_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[SRA:%[0-9]+]]:gpr = SRA [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[SRA]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_ASHR %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            srai_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV64I-LABEL: name: srai_i64
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[SRAI:%[0-9]+]]:gpr = SRAI [[COPY]], 63
+    ; RV64I-NEXT: $x10 = COPY [[SRAI]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = G_CONSTANT i64 63
+    %2:gprb(s64) = G_ASHR %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            lshr_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: lshr_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[SRL:%[0-9]+]]:gpr = SRL [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[SRL]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_LSHR %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            srli_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV64I-LABEL: name: srli_i64
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[COPY]], 63
+    ; RV64I-NEXT: $x10 = COPY [[SRLI]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = G_CONSTANT i64 63
+    %2:gprb(s64) = G_LSHR %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            and_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: and_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[AND:%[0-9]+]]:gpr = AND [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[AND]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_AND %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            andi_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV64I-LABEL: name: andi_i64
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY]], 1234
+    ; RV64I-NEXT: $x10 = COPY [[ANDI]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = G_CONSTANT i64 1234
+    %2:gprb(s64) = G_AND %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            or_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: or_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[OR:%[0-9]+]]:gpr = OR [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[OR]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_OR %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            ori_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV64I-LABEL: name: ori_i64
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[ORI:%[0-9]+]]:gpr = ORI [[COPY]], 1234
+    ; RV64I-NEXT: $x10 = COPY [[ORI]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = G_CONSTANT i64 1234
+    %2:gprb(s64) = G_OR %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            xor_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: xor_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[XOR:%[0-9]+]]:gpr = XOR [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[XOR]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_XOR %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            xori_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10
+
+    ; RV64I-LABEL: name: xori_i64
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[XORI:%[0-9]+]]:gpr = XORI [[COPY]], 1234
+    ; RV64I-NEXT: $x10 = COPY [[XORI]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = G_CONSTANT i64 1234
+    %2:gprb(s64) = G_XOR %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            add_i128
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11, $x12, $x13
+
+    ; RV64I-LABEL: name: add_i128
+    ; RV64I: liveins: $x10, $x11, $x12, $x13
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x12
+    ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x13
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[COPY2]]
+    ; RV64I-NEXT: [[SLTU:%[0-9]+]]:gpr = SLTU [[ADD]], [[COPY2]]
+    ; RV64I-NEXT: [[ADD1:%[0-9]+]]:gpr = ADD [[COPY1]], [[COPY3]]
+    ; RV64I-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[SLTU]], 1
+    ; RV64I-NEXT: [[ADD2:%[0-9]+]]:gpr = ADD [[ADD1]], [[ANDI]]
+    ; RV64I-NEXT: $x10 = COPY [[ADD]]
+    ; RV64I-NEXT: $x11 = COPY [[ADD2]]
+    ; RV64I-NEXT: PseudoRET implicit $x10, implicit $x11
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = COPY $x12
+    %3:gprb(s64) = COPY $x13
+    %4:gprb(s64) = G_ADD %0, %2
+    %5:gprb(s64) = G_ICMP intpred(ult), %4(s64), %2
+    %6:gprb(s64) = G_ADD %1, %3
+    %7:gprb(s64) = G_CONSTANT i64 1
+    %8:gprb(s64) = G_AND %5, %7
+    %9:gprb(s64) = G_ADD %6, %8
+    $x10 = COPY %4(s64)
+    $x11 = COPY %9(s64)
+    PseudoRET implicit $x10, implicit $x11
+
+...
+---
+name:            sub_i128
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11, $x12, $x13
+
+    ; RV64I-LABEL: name: sub_i128
+    ; RV64I: liveins: $x10, $x11, $x12, $x13
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x12
+    ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x13
+    ; RV64I-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY]], [[COPY2]]
+    ; RV64I-NEXT: [[SLTU:%[0-9]+]]:gpr = SLTU [[COPY]], [[COPY2]]
+    ; RV64I-NEXT: [[SUB1:%[0-9]+]]:gpr = SUB [[COPY1]], [[COPY3]]
+    ; RV64I-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[SLTU]], 1
+    ; RV64I-NEXT: [[SUB2:%[0-9]+]]:gpr = SUB [[SUB1]], [[ANDI]]
+    ; RV64I-NEXT: $x10 = COPY [[SUB]]
+    ; RV64I-NEXT: $x11 = COPY [[SUB2]]
+    ; RV64I-NEXT: PseudoRET implicit $x10, implicit $x11
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = COPY $x12
+    %3:gprb(s64) = COPY $x13
+    %4:gprb(s64) = G_SUB %0, %2
+    %5:gprb(s64) = G_ICMP intpred(ult), %0(s64), %2
+    %6:gprb(s64) = G_SUB %1, %3
+    %7:gprb(s64) = G_CONSTANT i64 1
+    %8:gprb(s64) = G_AND %5, %7
+    %9:gprb(s64) = G_SUB %6, %8
+    $x10 = COPY %4(s64)
+    $x11 = COPY %9(s64)
+    PseudoRET implicit $x10, implicit $x11
+
+...

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu_m-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu_m-rv32.mir
new file mode 100644
index 000000000000000..f36b8ea31c11eec
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu_m-rv32.mir
@@ -0,0 +1,164 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=riscv32 -mattr=+m -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV32I %s
+
+---
+name:            mul_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: mul_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[MUL]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_MUL %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sdiv_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: sdiv_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[DIV:%[0-9]+]]:gpr = DIV [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[DIV]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_SDIV %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            srem_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: srem_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[REM:%[0-9]+]]:gpr = REM [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[REM]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_SREM %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            udiv_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: udiv_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[DIVU:%[0-9]+]]:gpr = DIVU [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[DIVU]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_UDIV %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            urem_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV32I-LABEL: name: urem_i32
+    ; RV32I: liveins: $x10, $x11
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[REMU:%[0-9]+]]:gpr = REMU [[COPY]], [[COPY1]]
+    ; RV32I-NEXT: $x10 = COPY [[REMU]]
+    ; RV32I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = G_UREM %0, %1
+    $x10 = COPY %2(s32)
+    PseudoRET implicit $x10
+
+...
+---
+name:            mul_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11, $x12, $x13
+
+    ; RV32I-LABEL: name: mul_i64
+    ; RV32I: liveins: $x10, $x11, $x12, $x13
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x12
+    ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x13
+    ; RV32I-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[COPY]], [[COPY2]]
+    ; RV32I-NEXT: [[MUL1:%[0-9]+]]:gpr = MUL [[COPY1]], [[COPY2]]
+    ; RV32I-NEXT: [[MUL2:%[0-9]+]]:gpr = MUL [[COPY]], [[COPY3]]
+    ; RV32I-NEXT: [[MULHU:%[0-9]+]]:gpr = MULHU [[COPY]], [[COPY2]]
+    ; RV32I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[MUL1]], [[MUL2]]
+    ; RV32I-NEXT: [[ADD1:%[0-9]+]]:gpr = ADD [[ADD]], [[MULHU]]
+    ; RV32I-NEXT: $x10 = COPY [[MUL]]
+    ; RV32I-NEXT: $x11 = COPY [[ADD1]]
+    ; RV32I-NEXT: PseudoRET implicit $x10, implicit $x11
+    %0:gprb(s32) = COPY $x10
+    %1:gprb(s32) = COPY $x11
+    %2:gprb(s32) = COPY $x12
+    %3:gprb(s32) = COPY $x13
+    %4:gprb(s32) = G_MUL %0, %2
+    %5:gprb(s32) = G_MUL %1, %2
+    %6:gprb(s32) = G_MUL %0, %3
+    %7:gprb(s32) = G_UMULH %0, %2
+    %8:gprb(s32) = G_ADD %5, %6
+    %9:gprb(s32) = G_ADD %8, %7
+    $x10 = COPY %4(s32)
+    $x11 = COPY %9(s32)
+    PseudoRET implicit $x10, implicit $x11
+
+...

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu_m-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu_m-rv64.mir
new file mode 100644
index 000000000000000..3b2e04c78b5e8eb
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/alu_m-rv64.mir
@@ -0,0 +1,299 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=riscv64 -mattr=+m -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - \
+# RUN: | FileCheck -check-prefix=RV64I %s
+
+---
+name:            mul_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: mul_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[MULW:%[0-9]+]]:gpr = MULW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[MULW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_MUL %1, %3
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sdiv_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sdiv_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[DIVW:%[0-9]+]]:gpr = DIVW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[DIVW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_SDIV %1, %3
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            srem_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: srem_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[REMW:%[0-9]+]]:gpr = REMW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[REMW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_SREM %1, %3
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            udiv_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: udiv_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[DIVUW:%[0-9]+]]:gpr = DIVUW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[DIVUW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_UDIV %1, %3
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            urem_i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: urem_i32
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[REMUW:%[0-9]+]]:gpr = REMUW [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[REMUW]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s32) = G_TRUNC %0(s64)
+    %2:gprb(s64) = COPY $x11
+    %3:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_UREM %1, %3
+    %5:gprb(s64) = G_ANYEXT %4(s32)
+    $x10 = COPY %5(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            mul_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: mul_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[MUL]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_MUL %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            sdiv_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: sdiv_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[DIV:%[0-9]+]]:gpr = DIV [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[DIV]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_SDIV %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            srem_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: srem_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[REM:%[0-9]+]]:gpr = REM [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[REM]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_SREM %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            udiv_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: udiv_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[DIVU:%[0-9]+]]:gpr = DIVU [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[DIVU]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_UDIV %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            urem_i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11
+
+    ; RV64I-LABEL: name: urem_i64
+    ; RV64I: liveins: $x10, $x11
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[REMU:%[0-9]+]]:gpr = REMU [[COPY]], [[COPY1]]
+    ; RV64I-NEXT: $x10 = COPY [[REMU]]
+    ; RV64I-NEXT: PseudoRET implicit $x10
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = G_UREM %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+
+...
+---
+name:            mul_i128
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    liveins: $x10, $x11, $x12, $x13
+
+    ; RV64I-LABEL: name: mul_i128
+    ; RV64I: liveins: $x10, $x11, $x12, $x13
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x12
+    ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x13
+    ; RV64I-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[COPY]], [[COPY2]]
+    ; RV64I-NEXT: [[MUL1:%[0-9]+]]:gpr = MUL [[COPY1]], [[COPY2]]
+    ; RV64I-NEXT: [[MUL2:%[0-9]+]]:gpr = MUL [[COPY]], [[COPY3]]
+    ; RV64I-NEXT: [[MULHU:%[0-9]+]]:gpr = MULHU [[COPY]], [[COPY2]]
+    ; RV64I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[MUL1]], [[MUL2]]
+    ; RV64I-NEXT: [[ADD1:%[0-9]+]]:gpr = ADD [[ADD]], [[MULHU]]
+    ; RV64I-NEXT: $x10 = COPY [[MUL]]
+    ; RV64I-NEXT: $x11 = COPY [[ADD1]]
+    ; RV64I-NEXT: PseudoRET implicit $x10, implicit $x11
+    %0:gprb(s64) = COPY $x10
+    %1:gprb(s64) = COPY $x11
+    %2:gprb(s64) = COPY $x12
+    %3:gprb(s64) = COPY $x13
+    %4:gprb(s64) = G_MUL %0, %2
+    %5:gprb(s64) = G_MUL %1, %2
+    %6:gprb(s64) = G_MUL %0, %3
+    %7:gprb(s64) = G_UMULH %0, %2
+    %8:gprb(s64) = G_ADD %5, %6
+    %9:gprb(s64) = G_ADD %8, %7
+    $x10 = COPY %4(s64)
+    $x11 = COPY %9(s64)
+    PseudoRET implicit $x10, implicit $x11
+
+...


        


More information about the llvm-commits mailing list