[llvm] 1d7f79c - [MIPS GlobalISel] MSA vector generic and builtin sdiv, srem, udiv, urem

Petar Avramovic via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 24 01:06:49 PDT 2019


Author: Petar Avramovic
Date: 2019-10-24T10:03:36+02:00
New Revision: 1d7f79c0171df921e5519c7f19a94d9a53c7d248

URL: https://github.com/llvm/llvm-project/commit/1d7f79c0171df921e5519c7f19a94d9a53c7d248
DIFF: https://github.com/llvm/llvm-project/commit/1d7f79c0171df921e5519c7f19a94d9a53c7d248.diff

LOG: [MIPS GlobalISel] MSA vector generic and builtin sdiv, srem, udiv, urem

Select vector G_SDIV, G_SREM, G_UDIV and G_UREM for MIPS32 with MSA. We
have to set bank for vector operands to fprb and selectImpl will do the
rest. __builtin_msa_div_s_<format>, __builtin_msa_mod_s_<format>,
__builtin_msa_div_u_<format> and __builtin_msa_mod_u_<format> will be
transformed into G_SDIV, G_SREM, G_UDIV and G_UREM in legalizeIntrinsic
respectively and selected in the same way.

Differential Revision: https://reviews.llvm.org/D69333

Added: 
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/rem_and_div_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec_builtin.mir
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div_vec.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div_vec_builtin.ll
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/rem_and_div_vec.mir

Modified: 
    llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
    llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
index 3c931d77e941..e44f5e7d1ce9 100644
--- a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
@@ -141,8 +141,14 @@ MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
       .legalFor({s32})
       .clampScalar(0, s32, s32);
 
-  getActionDefinitionsBuilder({G_SDIV, G_SREM, G_UREM, G_UDIV})
-      .legalFor({s32})
+  getActionDefinitionsBuilder({G_SDIV, G_SREM, G_UDIV, G_UREM})
+      .legalIf([=, &ST](const LegalityQuery &Query) {
+        if (CheckTyN(0, Query, {s32}))
+          return true;
+        if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
+          return true;
+        return false;
+      })
       .minScalar(0, s32)
       .libcallFor({s64});
 
@@ -378,6 +384,26 @@ bool MipsLegalizerInfo::legalizeIntrinsic(MachineInstr &MI,
   case Intrinsic::mips_mulv_w:
   case Intrinsic::mips_mulv_d:
     return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_MUL, MIRBuilder, ST);
+  case Intrinsic::mips_div_s_b:
+  case Intrinsic::mips_div_s_h:
+  case Intrinsic::mips_div_s_w:
+  case Intrinsic::mips_div_s_d:
+    return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SDIV, MIRBuilder, ST);
+  case Intrinsic::mips_mod_s_b:
+  case Intrinsic::mips_mod_s_h:
+  case Intrinsic::mips_mod_s_w:
+  case Intrinsic::mips_mod_s_d:
+    return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SREM, MIRBuilder, ST);
+  case Intrinsic::mips_div_u_b:
+  case Intrinsic::mips_div_u_h:
+  case Intrinsic::mips_div_u_w:
+  case Intrinsic::mips_div_u_d:
+    return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_UDIV, MIRBuilder, ST);
+  case Intrinsic::mips_mod_u_b:
+  case Intrinsic::mips_mod_u_h:
+  case Intrinsic::mips_mod_u_w:
+  case Intrinsic::mips_mod_u_d:
+    return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_UREM, MIRBuilder, ST);
   default:
     break;
   }

diff  --git a/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp b/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp
index f8531f888568..c8700c86373d 100644
--- a/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp
@@ -449,10 +449,6 @@ MipsRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
   case G_SHL:
   case G_ASHR:
   case G_LSHR:
-  case G_SDIV:
-  case G_UDIV:
-  case G_SREM:
-  case G_UREM:
   case G_BRINDIRECT:
   case G_VASTART:
     OperandsMapping = &Mips::ValueMappings[Mips::GPRIdx];
@@ -460,6 +456,10 @@ MipsRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
   case G_ADD:
   case G_SUB:
   case G_MUL:
+  case G_SDIV:
+  case G_SREM:
+  case G_UDIV:
+  case G_UREM:
     OperandsMapping = &Mips::ValueMappings[Mips::GPRIdx];
     if (Op0Size == 128)
       OperandsMapping = getMSAMapping(MF);

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/rem_and_div_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/rem_and_div_vec.mir
new file mode 100644
index 000000000000..14abf0af763d
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/rem_and_div_vec.mir
@@ -0,0 +1,505 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
+--- |
+
+  define void @sdiv_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @sdiv_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @sdiv_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @sdiv_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+
+  define void @srem_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @srem_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @srem_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @srem_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+
+  define void @udiv_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @udiv_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @udiv_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @udiv_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+
+  define void @urem_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @urem_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @urem_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @urem_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+
+...
+---
+name:            sdiv_v16i8
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: sdiv_v16i8
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
+    ; P5600: [[LD_B:%[0-9]+]]:msa128b = LD_B [[COPY]], 0 :: (load 16 from %ir.a)
+    ; P5600: [[LD_B1:%[0-9]+]]:msa128b = LD_B [[COPY1]], 0 :: (load 16 from %ir.b)
+    ; P5600: [[DIV_S_B:%[0-9]+]]:msa128b = DIV_S_B [[LD_B]], [[LD_B1]]
+    ; P5600: ST_B [[DIV_S_B]], [[COPY2]], 0 :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:gprb(p0) = COPY $a0
+    %1:gprb(p0) = COPY $a1
+    %2:gprb(p0) = COPY $a2
+    %3:fprb(<16 x s8>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:fprb(<16 x s8>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:fprb(<16 x s8>) = G_SDIV %3, %4
+    G_STORE %5(<16 x s8>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            sdiv_v8i16
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: sdiv_v8i16
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
+    ; P5600: [[LD_H:%[0-9]+]]:msa128h = LD_H [[COPY]], 0 :: (load 16 from %ir.a)
+    ; P5600: [[LD_H1:%[0-9]+]]:msa128h = LD_H [[COPY1]], 0 :: (load 16 from %ir.b)
+    ; P5600: [[DIV_S_H:%[0-9]+]]:msa128h = DIV_S_H [[LD_H]], [[LD_H1]]
+    ; P5600: ST_H [[DIV_S_H]], [[COPY2]], 0 :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:gprb(p0) = COPY $a0
+    %1:gprb(p0) = COPY $a1
+    %2:gprb(p0) = COPY $a2
+    %3:fprb(<8 x s16>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:fprb(<8 x s16>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:fprb(<8 x s16>) = G_SDIV %3, %4
+    G_STORE %5(<8 x s16>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            sdiv_v4i32
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: sdiv_v4i32
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
+    ; P5600: [[LD_W:%[0-9]+]]:msa128w = LD_W [[COPY]], 0 :: (load 16 from %ir.a)
+    ; P5600: [[LD_W1:%[0-9]+]]:msa128w = LD_W [[COPY1]], 0 :: (load 16 from %ir.b)
+    ; P5600: [[DIV_S_W:%[0-9]+]]:msa128w = DIV_S_W [[LD_W]], [[LD_W1]]
+    ; P5600: ST_W [[DIV_S_W]], [[COPY2]], 0 :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:gprb(p0) = COPY $a0
+    %1:gprb(p0) = COPY $a1
+    %2:gprb(p0) = COPY $a2
+    %3:fprb(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:fprb(<4 x s32>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:fprb(<4 x s32>) = G_SDIV %3, %4
+    G_STORE %5(<4 x s32>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            sdiv_v2i64
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: sdiv_v2i64
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
+    ; P5600: [[LD_D:%[0-9]+]]:msa128d = LD_D [[COPY]], 0 :: (load 16 from %ir.a)
+    ; P5600: [[LD_D1:%[0-9]+]]:msa128d = LD_D [[COPY1]], 0 :: (load 16 from %ir.b)
+    ; P5600: [[DIV_S_D:%[0-9]+]]:msa128d = DIV_S_D [[LD_D]], [[LD_D1]]
+    ; P5600: ST_D [[DIV_S_D]], [[COPY2]], 0 :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:gprb(p0) = COPY $a0
+    %1:gprb(p0) = COPY $a1
+    %2:gprb(p0) = COPY $a2
+    %3:fprb(<2 x s64>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:fprb(<2 x s64>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:fprb(<2 x s64>) = G_SDIV %3, %4
+    G_STORE %5(<2 x s64>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            srem_v16i8
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: srem_v16i8
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
+    ; P5600: [[LD_B:%[0-9]+]]:msa128b = LD_B [[COPY]], 0 :: (load 16 from %ir.a)
+    ; P5600: [[LD_B1:%[0-9]+]]:msa128b = LD_B [[COPY1]], 0 :: (load 16 from %ir.b)
+    ; P5600: [[MOD_S_B:%[0-9]+]]:msa128b = MOD_S_B [[LD_B]], [[LD_B1]]
+    ; P5600: ST_B [[MOD_S_B]], [[COPY2]], 0 :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:gprb(p0) = COPY $a0
+    %1:gprb(p0) = COPY $a1
+    %2:gprb(p0) = COPY $a2
+    %3:fprb(<16 x s8>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:fprb(<16 x s8>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:fprb(<16 x s8>) = G_SREM %3, %4
+    G_STORE %5(<16 x s8>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            srem_v8i16
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: srem_v8i16
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
+    ; P5600: [[LD_H:%[0-9]+]]:msa128h = LD_H [[COPY]], 0 :: (load 16 from %ir.a)
+    ; P5600: [[LD_H1:%[0-9]+]]:msa128h = LD_H [[COPY1]], 0 :: (load 16 from %ir.b)
+    ; P5600: [[MOD_S_H:%[0-9]+]]:msa128h = MOD_S_H [[LD_H]], [[LD_H1]]
+    ; P5600: ST_H [[MOD_S_H]], [[COPY2]], 0 :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:gprb(p0) = COPY $a0
+    %1:gprb(p0) = COPY $a1
+    %2:gprb(p0) = COPY $a2
+    %3:fprb(<8 x s16>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:fprb(<8 x s16>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:fprb(<8 x s16>) = G_SREM %3, %4
+    G_STORE %5(<8 x s16>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            srem_v4i32
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: srem_v4i32
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
+    ; P5600: [[LD_W:%[0-9]+]]:msa128w = LD_W [[COPY]], 0 :: (load 16 from %ir.a)
+    ; P5600: [[LD_W1:%[0-9]+]]:msa128w = LD_W [[COPY1]], 0 :: (load 16 from %ir.b)
+    ; P5600: [[MOD_S_W:%[0-9]+]]:msa128w = MOD_S_W [[LD_W]], [[LD_W1]]
+    ; P5600: ST_W [[MOD_S_W]], [[COPY2]], 0 :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:gprb(p0) = COPY $a0
+    %1:gprb(p0) = COPY $a1
+    %2:gprb(p0) = COPY $a2
+    %3:fprb(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:fprb(<4 x s32>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:fprb(<4 x s32>) = G_SREM %3, %4
+    G_STORE %5(<4 x s32>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            srem_v2i64
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: srem_v2i64
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
+    ; P5600: [[LD_D:%[0-9]+]]:msa128d = LD_D [[COPY]], 0 :: (load 16 from %ir.a)
+    ; P5600: [[LD_D1:%[0-9]+]]:msa128d = LD_D [[COPY1]], 0 :: (load 16 from %ir.b)
+    ; P5600: [[MOD_S_D:%[0-9]+]]:msa128d = MOD_S_D [[LD_D]], [[LD_D1]]
+    ; P5600: ST_D [[MOD_S_D]], [[COPY2]], 0 :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:gprb(p0) = COPY $a0
+    %1:gprb(p0) = COPY $a1
+    %2:gprb(p0) = COPY $a2
+    %3:fprb(<2 x s64>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:fprb(<2 x s64>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:fprb(<2 x s64>) = G_SREM %3, %4
+    G_STORE %5(<2 x s64>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            udiv_v16u8
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: udiv_v16u8
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
+    ; P5600: [[LD_B:%[0-9]+]]:msa128b = LD_B [[COPY]], 0 :: (load 16 from %ir.a)
+    ; P5600: [[LD_B1:%[0-9]+]]:msa128b = LD_B [[COPY1]], 0 :: (load 16 from %ir.b)
+    ; P5600: [[DIV_U_B:%[0-9]+]]:msa128b = DIV_U_B [[LD_B]], [[LD_B1]]
+    ; P5600: ST_B [[DIV_U_B]], [[COPY2]], 0 :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:gprb(p0) = COPY $a0
+    %1:gprb(p0) = COPY $a1
+    %2:gprb(p0) = COPY $a2
+    %3:fprb(<16 x s8>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:fprb(<16 x s8>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:fprb(<16 x s8>) = G_UDIV %3, %4
+    G_STORE %5(<16 x s8>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            udiv_v8u16
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: udiv_v8u16
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
+    ; P5600: [[LD_H:%[0-9]+]]:msa128h = LD_H [[COPY]], 0 :: (load 16 from %ir.a)
+    ; P5600: [[LD_H1:%[0-9]+]]:msa128h = LD_H [[COPY1]], 0 :: (load 16 from %ir.b)
+    ; P5600: [[DIV_U_H:%[0-9]+]]:msa128h = DIV_U_H [[LD_H]], [[LD_H1]]
+    ; P5600: ST_H [[DIV_U_H]], [[COPY2]], 0 :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:gprb(p0) = COPY $a0
+    %1:gprb(p0) = COPY $a1
+    %2:gprb(p0) = COPY $a2
+    %3:fprb(<8 x s16>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:fprb(<8 x s16>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:fprb(<8 x s16>) = G_UDIV %3, %4
+    G_STORE %5(<8 x s16>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            udiv_v4u32
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: udiv_v4u32
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
+    ; P5600: [[LD_W:%[0-9]+]]:msa128w = LD_W [[COPY]], 0 :: (load 16 from %ir.a)
+    ; P5600: [[LD_W1:%[0-9]+]]:msa128w = LD_W [[COPY1]], 0 :: (load 16 from %ir.b)
+    ; P5600: [[DIV_U_W:%[0-9]+]]:msa128w = DIV_U_W [[LD_W]], [[LD_W1]]
+    ; P5600: ST_W [[DIV_U_W]], [[COPY2]], 0 :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:gprb(p0) = COPY $a0
+    %1:gprb(p0) = COPY $a1
+    %2:gprb(p0) = COPY $a2
+    %3:fprb(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:fprb(<4 x s32>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:fprb(<4 x s32>) = G_UDIV %3, %4
+    G_STORE %5(<4 x s32>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            udiv_v2u64
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: udiv_v2u64
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
+    ; P5600: [[LD_D:%[0-9]+]]:msa128d = LD_D [[COPY]], 0 :: (load 16 from %ir.a)
+    ; P5600: [[LD_D1:%[0-9]+]]:msa128d = LD_D [[COPY1]], 0 :: (load 16 from %ir.b)
+    ; P5600: [[DIV_U_D:%[0-9]+]]:msa128d = DIV_U_D [[LD_D]], [[LD_D1]]
+    ; P5600: ST_D [[DIV_U_D]], [[COPY2]], 0 :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:gprb(p0) = COPY $a0
+    %1:gprb(p0) = COPY $a1
+    %2:gprb(p0) = COPY $a2
+    %3:fprb(<2 x s64>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:fprb(<2 x s64>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:fprb(<2 x s64>) = G_UDIV %3, %4
+    G_STORE %5(<2 x s64>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            urem_v16u8
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: urem_v16u8
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
+    ; P5600: [[LD_B:%[0-9]+]]:msa128b = LD_B [[COPY]], 0 :: (load 16 from %ir.a)
+    ; P5600: [[LD_B1:%[0-9]+]]:msa128b = LD_B [[COPY1]], 0 :: (load 16 from %ir.b)
+    ; P5600: [[MOD_U_B:%[0-9]+]]:msa128b = MOD_U_B [[LD_B]], [[LD_B1]]
+    ; P5600: ST_B [[MOD_U_B]], [[COPY2]], 0 :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:gprb(p0) = COPY $a0
+    %1:gprb(p0) = COPY $a1
+    %2:gprb(p0) = COPY $a2
+    %3:fprb(<16 x s8>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:fprb(<16 x s8>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:fprb(<16 x s8>) = G_UREM %3, %4
+    G_STORE %5(<16 x s8>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            urem_v8u16
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: urem_v8u16
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
+    ; P5600: [[LD_H:%[0-9]+]]:msa128h = LD_H [[COPY]], 0 :: (load 16 from %ir.a)
+    ; P5600: [[LD_H1:%[0-9]+]]:msa128h = LD_H [[COPY1]], 0 :: (load 16 from %ir.b)
+    ; P5600: [[MOD_U_H:%[0-9]+]]:msa128h = MOD_U_H [[LD_H]], [[LD_H1]]
+    ; P5600: ST_H [[MOD_U_H]], [[COPY2]], 0 :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:gprb(p0) = COPY $a0
+    %1:gprb(p0) = COPY $a1
+    %2:gprb(p0) = COPY $a2
+    %3:fprb(<8 x s16>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:fprb(<8 x s16>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:fprb(<8 x s16>) = G_UREM %3, %4
+    G_STORE %5(<8 x s16>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            urem_v4u32
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: urem_v4u32
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
+    ; P5600: [[LD_W:%[0-9]+]]:msa128w = LD_W [[COPY]], 0 :: (load 16 from %ir.a)
+    ; P5600: [[LD_W1:%[0-9]+]]:msa128w = LD_W [[COPY1]], 0 :: (load 16 from %ir.b)
+    ; P5600: [[MOD_U_W:%[0-9]+]]:msa128w = MOD_U_W [[LD_W]], [[LD_W1]]
+    ; P5600: ST_W [[MOD_U_W]], [[COPY2]], 0 :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:gprb(p0) = COPY $a0
+    %1:gprb(p0) = COPY $a1
+    %2:gprb(p0) = COPY $a2
+    %3:fprb(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:fprb(<4 x s32>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:fprb(<4 x s32>) = G_UREM %3, %4
+    G_STORE %5(<4 x s32>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            urem_v2u64
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: urem_v2u64
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gpr32 = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gpr32 = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
+    ; P5600: [[LD_D:%[0-9]+]]:msa128d = LD_D [[COPY]], 0 :: (load 16 from %ir.a)
+    ; P5600: [[LD_D1:%[0-9]+]]:msa128d = LD_D [[COPY1]], 0 :: (load 16 from %ir.b)
+    ; P5600: [[MOD_U_D:%[0-9]+]]:msa128d = MOD_U_D [[LD_D]], [[LD_D1]]
+    ; P5600: ST_D [[MOD_U_D]], [[COPY2]], 0 :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:gprb(p0) = COPY $a0
+    %1:gprb(p0) = COPY $a1
+    %2:gprb(p0) = COPY $a2
+    %3:fprb(<2 x s64>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:fprb(<2 x s64>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:fprb(<2 x s64>) = G_UREM %3, %4
+    G_STORE %5(<2 x s64>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec.mir
new file mode 100644
index 000000000000..c66f567fc79f
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec.mir
@@ -0,0 +1,473 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
+--- |
+
+  define void @sdiv_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @sdiv_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @sdiv_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @sdiv_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+
+  define void @srem_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @srem_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @srem_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @srem_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+
+  define void @udiv_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @udiv_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @udiv_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @udiv_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+
+  define void @urem_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @urem_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @urem_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @urem_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+
+...
+---
+name:            sdiv_v16i8
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: sdiv_v16i8
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SDIV:%[0-9]+]]:_(<16 x s8>) = G_SDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SDIV]](<16 x s8>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<16 x s8>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<16 x s8>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<16 x s8>) = G_SDIV %3, %4
+    G_STORE %5(<16 x s8>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            sdiv_v8i16
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: sdiv_v8i16
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SDIV:%[0-9]+]]:_(<8 x s16>) = G_SDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SDIV]](<8 x s16>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<8 x s16>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<8 x s16>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<8 x s16>) = G_SDIV %3, %4
+    G_STORE %5(<8 x s16>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            sdiv_v4i32
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: sdiv_v4i32
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SDIV:%[0-9]+]]:_(<4 x s32>) = G_SDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SDIV]](<4 x s32>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<4 x s32>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<4 x s32>) = G_SDIV %3, %4
+    G_STORE %5(<4 x s32>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            sdiv_v2i64
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: sdiv_v2i64
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SDIV:%[0-9]+]]:_(<2 x s64>) = G_SDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SDIV]](<2 x s64>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<2 x s64>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<2 x s64>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<2 x s64>) = G_SDIV %3, %4
+    G_STORE %5(<2 x s64>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            srem_v16i8
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: srem_v16i8
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SREM:%[0-9]+]]:_(<16 x s8>) = G_SREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SREM]](<16 x s8>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<16 x s8>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<16 x s8>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<16 x s8>) = G_SREM %3, %4
+    G_STORE %5(<16 x s8>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            srem_v8i16
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: srem_v8i16
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SREM:%[0-9]+]]:_(<8 x s16>) = G_SREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SREM]](<8 x s16>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<8 x s16>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<8 x s16>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<8 x s16>) = G_SREM %3, %4
+    G_STORE %5(<8 x s16>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            srem_v4i32
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: srem_v4i32
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SREM:%[0-9]+]]:_(<4 x s32>) = G_SREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SREM]](<4 x s32>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<4 x s32>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<4 x s32>) = G_SREM %3, %4
+    G_STORE %5(<4 x s32>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            srem_v2i64
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: srem_v2i64
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SREM:%[0-9]+]]:_(<2 x s64>) = G_SREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SREM]](<2 x s64>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<2 x s64>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<2 x s64>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<2 x s64>) = G_SREM %3, %4
+    G_STORE %5(<2 x s64>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            udiv_v16u8
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: udiv_v16u8
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UDIV:%[0-9]+]]:_(<16 x s8>) = G_UDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UDIV]](<16 x s8>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<16 x s8>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<16 x s8>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<16 x s8>) = G_UDIV %3, %4
+    G_STORE %5(<16 x s8>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            udiv_v8u16
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: udiv_v8u16
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UDIV:%[0-9]+]]:_(<8 x s16>) = G_UDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UDIV]](<8 x s16>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<8 x s16>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<8 x s16>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<8 x s16>) = G_UDIV %3, %4
+    G_STORE %5(<8 x s16>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            udiv_v4u32
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: udiv_v4u32
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UDIV:%[0-9]+]]:_(<4 x s32>) = G_UDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UDIV]](<4 x s32>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<4 x s32>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<4 x s32>) = G_UDIV %3, %4
+    G_STORE %5(<4 x s32>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            udiv_v2u64
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: udiv_v2u64
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UDIV:%[0-9]+]]:_(<2 x s64>) = G_UDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UDIV]](<2 x s64>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<2 x s64>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<2 x s64>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<2 x s64>) = G_UDIV %3, %4
+    G_STORE %5(<2 x s64>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            urem_v16u8
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: urem_v16u8
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UREM:%[0-9]+]]:_(<16 x s8>) = G_UREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UREM]](<16 x s8>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<16 x s8>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<16 x s8>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<16 x s8>) = G_UREM %3, %4
+    G_STORE %5(<16 x s8>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            urem_v8u16
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: urem_v8u16
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UREM:%[0-9]+]]:_(<8 x s16>) = G_UREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UREM]](<8 x s16>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<8 x s16>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<8 x s16>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<8 x s16>) = G_UREM %3, %4
+    G_STORE %5(<8 x s16>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            urem_v4u32
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: urem_v4u32
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UREM:%[0-9]+]]:_(<4 x s32>) = G_UREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UREM]](<4 x s32>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<4 x s32>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<4 x s32>) = G_UREM %3, %4
+    G_STORE %5(<4 x s32>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            urem_v2u64
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: urem_v2u64
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UREM:%[0-9]+]]:_(<2 x s64>) = G_UREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UREM]](<2 x s64>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<2 x s64>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<2 x s64>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<2 x s64>) = G_UREM %3, %4
+    G_STORE %5(<2 x s64>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec_builtin.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec_builtin.mir
new file mode 100644
index 000000000000..402313976b4f
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec_builtin.mir
@@ -0,0 +1,501 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
+--- |
+
+  declare <16 x i8> @llvm.mips.div.s.b(<16 x i8>, <16 x i8>)
+  define void @sdiv_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+
+  declare <8 x i16> @llvm.mips.div.s.h(<8 x i16>, <8 x i16>)
+  define void @sdiv_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+
+  declare <4 x i32> @llvm.mips.div.s.w(<4 x i32>, <4 x i32>)
+  define void @sdiv_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+
+  declare <2 x i64> @llvm.mips.div.s.d(<2 x i64>, <2 x i64>)
+  define void @sdiv_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+
+  declare <16 x i8> @llvm.mips.mod.s.b(<16 x i8>, <16 x i8>)
+  define void @smod_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+
+  declare <8 x i16> @llvm.mips.mod.s.h(<8 x i16>, <8 x i16>)
+  define void @smod_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+
+  declare <4 x i32> @llvm.mips.mod.s.w(<4 x i32>, <4 x i32>)
+  define void @smod_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+
+  declare <2 x i64> @llvm.mips.mod.s.d(<2 x i64>, <2 x i64>)
+  define void @smod_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+
+  declare <16 x i8> @llvm.mips.div.u.b(<16 x i8>, <16 x i8>)
+  define void @udiv_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+
+  declare <8 x i16> @llvm.mips.div.u.h(<8 x i16>, <8 x i16>)
+  define void @udiv_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+
+  declare <4 x i32> @llvm.mips.div.u.w(<4 x i32>, <4 x i32>)
+  define void @udiv_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+
+  declare <2 x i64> @llvm.mips.div.u.d(<2 x i64>, <2 x i64>)
+  define void @udiv_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+
+  declare <16 x i8> @llvm.mips.mod.u.b(<16 x i8>, <16 x i8>)
+  define void @umod_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+
+  declare <8 x i16> @llvm.mips.mod.u.h(<8 x i16>, <8 x i16>)
+  define void @umod_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+
+  declare <4 x i32> @llvm.mips.mod.u.w(<4 x i32>, <4 x i32>)
+  define void @umod_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+
+  declare <2 x i64> @llvm.mips.mod.u.d(<2 x i64>, <2 x i64>)
+  define void @umod_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+
+...
+---
+name:            sdiv_v16i8_builtin
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: sdiv_v16i8_builtin
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SDIV:%[0-9]+]]:_(<16 x s8>) = G_SDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SDIV]](<16 x s8>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<16 x s8>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<16 x s8>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<16 x s8>) = G_INTRINSIC intrinsic(@llvm.mips.div.s.b), %3(<16 x s8>), %4(<16 x s8>)
+    G_STORE %5(<16 x s8>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            sdiv_v8i16_builtin
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: sdiv_v8i16_builtin
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SDIV:%[0-9]+]]:_(<8 x s16>) = G_SDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SDIV]](<8 x s16>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<8 x s16>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<8 x s16>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<8 x s16>) = G_INTRINSIC intrinsic(@llvm.mips.div.s.h), %3(<8 x s16>), %4(<8 x s16>)
+    G_STORE %5(<8 x s16>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            sdiv_v4i32_builtin
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: sdiv_v4i32_builtin
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SDIV:%[0-9]+]]:_(<4 x s32>) = G_SDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SDIV]](<4 x s32>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<4 x s32>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.mips.div.s.w), %3(<4 x s32>), %4(<4 x s32>)
+    G_STORE %5(<4 x s32>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            sdiv_v2i64_builtin
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: sdiv_v2i64_builtin
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SDIV:%[0-9]+]]:_(<2 x s64>) = G_SDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SDIV]](<2 x s64>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<2 x s64>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<2 x s64>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<2 x s64>) = G_INTRINSIC intrinsic(@llvm.mips.div.s.d), %3(<2 x s64>), %4(<2 x s64>)
+    G_STORE %5(<2 x s64>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            smod_v16i8_builtin
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: smod_v16i8_builtin
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SREM:%[0-9]+]]:_(<16 x s8>) = G_SREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SREM]](<16 x s8>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<16 x s8>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<16 x s8>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<16 x s8>) = G_INTRINSIC intrinsic(@llvm.mips.mod.s.b), %3(<16 x s8>), %4(<16 x s8>)
+    G_STORE %5(<16 x s8>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            smod_v8i16_builtin
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: smod_v8i16_builtin
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SREM:%[0-9]+]]:_(<8 x s16>) = G_SREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SREM]](<8 x s16>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<8 x s16>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<8 x s16>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<8 x s16>) = G_INTRINSIC intrinsic(@llvm.mips.mod.s.h), %3(<8 x s16>), %4(<8 x s16>)
+    G_STORE %5(<8 x s16>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            smod_v4i32_builtin
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: smod_v4i32_builtin
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SREM:%[0-9]+]]:_(<4 x s32>) = G_SREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SREM]](<4 x s32>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<4 x s32>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.mips.mod.s.w), %3(<4 x s32>), %4(<4 x s32>)
+    G_STORE %5(<4 x s32>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            smod_v2i64_builtin
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: smod_v2i64_builtin
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SREM:%[0-9]+]]:_(<2 x s64>) = G_SREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SREM]](<2 x s64>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<2 x s64>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<2 x s64>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<2 x s64>) = G_INTRINSIC intrinsic(@llvm.mips.mod.s.d), %3(<2 x s64>), %4(<2 x s64>)
+    G_STORE %5(<2 x s64>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            udiv_v16i8_builtin
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: udiv_v16i8_builtin
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UDIV:%[0-9]+]]:_(<16 x s8>) = G_UDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UDIV]](<16 x s8>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<16 x s8>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<16 x s8>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<16 x s8>) = G_INTRINSIC intrinsic(@llvm.mips.div.u.b), %3(<16 x s8>), %4(<16 x s8>)
+    G_STORE %5(<16 x s8>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            udiv_v8i16_builtin
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: udiv_v8i16_builtin
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UDIV:%[0-9]+]]:_(<8 x s16>) = G_UDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UDIV]](<8 x s16>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<8 x s16>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<8 x s16>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<8 x s16>) = G_INTRINSIC intrinsic(@llvm.mips.div.u.h), %3(<8 x s16>), %4(<8 x s16>)
+    G_STORE %5(<8 x s16>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            udiv_v4i32_builtin
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: udiv_v4i32_builtin
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UDIV:%[0-9]+]]:_(<4 x s32>) = G_UDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UDIV]](<4 x s32>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<4 x s32>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.mips.div.u.w), %3(<4 x s32>), %4(<4 x s32>)
+    G_STORE %5(<4 x s32>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            udiv_v2i64_builtin
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: udiv_v2i64_builtin
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UDIV:%[0-9]+]]:_(<2 x s64>) = G_UDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UDIV]](<2 x s64>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<2 x s64>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<2 x s64>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<2 x s64>) = G_INTRINSIC intrinsic(@llvm.mips.div.u.d), %3(<2 x s64>), %4(<2 x s64>)
+    G_STORE %5(<2 x s64>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            umod_v16i8_builtin
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: umod_v16i8_builtin
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UREM:%[0-9]+]]:_(<16 x s8>) = G_UREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UREM]](<16 x s8>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<16 x s8>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<16 x s8>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<16 x s8>) = G_INTRINSIC intrinsic(@llvm.mips.mod.u.b), %3(<16 x s8>), %4(<16 x s8>)
+    G_STORE %5(<16 x s8>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            umod_v8i16_builtin
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: umod_v8i16_builtin
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UREM:%[0-9]+]]:_(<8 x s16>) = G_UREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UREM]](<8 x s16>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<8 x s16>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<8 x s16>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<8 x s16>) = G_INTRINSIC intrinsic(@llvm.mips.mod.u.h), %3(<8 x s16>), %4(<8 x s16>)
+    G_STORE %5(<8 x s16>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            umod_v4i32_builtin
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: umod_v4i32_builtin
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UREM:%[0-9]+]]:_(<4 x s32>) = G_UREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UREM]](<4 x s32>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<4 x s32>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.mips.mod.u.w), %3(<4 x s32>), %4(<4 x s32>)
+    G_STORE %5(<4 x s32>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            umod_v2i64_builtin
+alignment:       4
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: umod_v2i64_builtin
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:_(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UREM:%[0-9]+]]:_(<2 x s64>) = G_UREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UREM]](<2 x s64>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<2 x s64>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<2 x s64>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<2 x s64>) = G_INTRINSIC intrinsic(@llvm.mips.mod.u.d), %3(<2 x s64>), %4(<2 x s64>)
+    G_STORE %5(<2 x s64>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div_vec.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div_vec.ll
new file mode 100644
index 000000000000..29b41b454b5a
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div_vec.ll
@@ -0,0 +1,274 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
+
+define void @sdiv_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+; P5600-LABEL: sdiv_v16i8:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.b $w0, 0($4)
+; P5600-NEXT:    ld.b $w1, 0($5)
+; P5600-NEXT:    div_s.b $w0, $w0, $w1
+; P5600-NEXT:    st.b $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <16 x i8>, <16 x i8>* %a, align 16
+  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %div = sdiv <16 x i8> %0, %1
+  store <16 x i8> %div, <16 x i8>* %c, align 16
+  ret void
+}
+
+define void @sdiv_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+; P5600-LABEL: sdiv_v8i16:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.h $w0, 0($4)
+; P5600-NEXT:    ld.h $w1, 0($5)
+; P5600-NEXT:    div_s.h $w0, $w0, $w1
+; P5600-NEXT:    st.h $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <8 x i16>, <8 x i16>* %a, align 16
+  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %div = sdiv <8 x i16> %0, %1
+  store <8 x i16> %div, <8 x i16>* %c, align 16
+  ret void
+}
+
+define void @sdiv_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+; P5600-LABEL: sdiv_v4i32:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.w $w0, 0($4)
+; P5600-NEXT:    ld.w $w1, 0($5)
+; P5600-NEXT:    div_s.w $w0, $w0, $w1
+; P5600-NEXT:    st.w $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %a, align 16
+  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %div = sdiv <4 x i32> %0, %1
+  store <4 x i32> %div, <4 x i32>* %c, align 16
+  ret void
+}
+
+define void @sdiv_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+; P5600-LABEL: sdiv_v2i64:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.d $w0, 0($4)
+; P5600-NEXT:    ld.d $w1, 0($5)
+; P5600-NEXT:    div_s.d $w0, $w0, $w1
+; P5600-NEXT:    st.d $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <2 x i64>, <2 x i64>* %a, align 16
+  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %div = sdiv <2 x i64> %0, %1
+  store <2 x i64> %div, <2 x i64>* %c, align 16
+  ret void
+}
+
+define void @srem_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+; P5600-LABEL: srem_v16i8:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.b $w0, 0($4)
+; P5600-NEXT:    ld.b $w1, 0($5)
+; P5600-NEXT:    mod_s.b $w0, $w0, $w1
+; P5600-NEXT:    st.b $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <16 x i8>, <16 x i8>* %a, align 16
+  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %rem = srem <16 x i8> %0, %1
+  store <16 x i8> %rem, <16 x i8>* %c, align 16
+  ret void
+}
+
+define void @srem_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+; P5600-LABEL: srem_v8i16:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.h $w0, 0($4)
+; P5600-NEXT:    ld.h $w1, 0($5)
+; P5600-NEXT:    mod_s.h $w0, $w0, $w1
+; P5600-NEXT:    st.h $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <8 x i16>, <8 x i16>* %a, align 16
+  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %rem = srem <8 x i16> %0, %1
+  store <8 x i16> %rem, <8 x i16>* %c, align 16
+  ret void
+}
+
+define void @srem_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+; P5600-LABEL: srem_v4i32:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.w $w0, 0($4)
+; P5600-NEXT:    ld.w $w1, 0($5)
+; P5600-NEXT:    mod_s.w $w0, $w0, $w1
+; P5600-NEXT:    st.w $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %a, align 16
+  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %rem = srem <4 x i32> %0, %1
+  store <4 x i32> %rem, <4 x i32>* %c, align 16
+  ret void
+}
+
+define void @srem_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+; P5600-LABEL: srem_v2i64:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.d $w0, 0($4)
+; P5600-NEXT:    ld.d $w1, 0($5)
+; P5600-NEXT:    mod_s.d $w0, $w0, $w1
+; P5600-NEXT:    st.d $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <2 x i64>, <2 x i64>* %a, align 16
+  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %rem = srem <2 x i64> %0, %1
+  store <2 x i64> %rem, <2 x i64>* %c, align 16
+  ret void
+}
+
+define void @udiv_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+; P5600-LABEL: udiv_v16u8:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.b $w0, 0($4)
+; P5600-NEXT:    ld.b $w1, 0($5)
+; P5600-NEXT:    div_u.b $w0, $w0, $w1
+; P5600-NEXT:    st.b $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <16 x i8>, <16 x i8>* %a, align 16
+  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %div = udiv <16 x i8> %0, %1
+  store <16 x i8> %div, <16 x i8>* %c, align 16
+  ret void
+}
+
+define void @udiv_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+; P5600-LABEL: udiv_v8u16:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.h $w0, 0($4)
+; P5600-NEXT:    ld.h $w1, 0($5)
+; P5600-NEXT:    div_u.h $w0, $w0, $w1
+; P5600-NEXT:    st.h $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <8 x i16>, <8 x i16>* %a, align 16
+  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %div = udiv <8 x i16> %0, %1
+  store <8 x i16> %div, <8 x i16>* %c, align 16
+  ret void
+}
+
+define void @udiv_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+; P5600-LABEL: udiv_v4u32:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.w $w0, 0($4)
+; P5600-NEXT:    ld.w $w1, 0($5)
+; P5600-NEXT:    div_u.w $w0, $w0, $w1
+; P5600-NEXT:    st.w $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %a, align 16
+  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %div = udiv <4 x i32> %0, %1
+  store <4 x i32> %div, <4 x i32>* %c, align 16
+  ret void
+}
+
+define void @udiv_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+; P5600-LABEL: udiv_v2u64:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.d $w0, 0($4)
+; P5600-NEXT:    ld.d $w1, 0($5)
+; P5600-NEXT:    div_u.d $w0, $w0, $w1
+; P5600-NEXT:    st.d $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <2 x i64>, <2 x i64>* %a, align 16
+  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %div = udiv <2 x i64> %0, %1
+  store <2 x i64> %div, <2 x i64>* %c, align 16
+  ret void
+}
+
+define void @urem_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+; P5600-LABEL: urem_v16u8:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.b $w0, 0($4)
+; P5600-NEXT:    ld.b $w1, 0($5)
+; P5600-NEXT:    mod_u.b $w0, $w0, $w1
+; P5600-NEXT:    st.b $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <16 x i8>, <16 x i8>* %a, align 16
+  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %rem = urem <16 x i8> %0, %1
+  store <16 x i8> %rem, <16 x i8>* %c, align 16
+  ret void
+}
+
+define void @urem_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+; P5600-LABEL: urem_v8u16:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.h $w0, 0($4)
+; P5600-NEXT:    ld.h $w1, 0($5)
+; P5600-NEXT:    mod_u.h $w0, $w0, $w1
+; P5600-NEXT:    st.h $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <8 x i16>, <8 x i16>* %a, align 16
+  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %rem = urem <8 x i16> %0, %1
+  store <8 x i16> %rem, <8 x i16>* %c, align 16
+  ret void
+}
+
+define void @urem_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+; P5600-LABEL: urem_v4u32:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.w $w0, 0($4)
+; P5600-NEXT:    ld.w $w1, 0($5)
+; P5600-NEXT:    mod_u.w $w0, $w0, $w1
+; P5600-NEXT:    st.w $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %a, align 16
+  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %rem = urem <4 x i32> %0, %1
+  store <4 x i32> %rem, <4 x i32>* %c, align 16
+  ret void
+}
+
+define void @urem_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+; P5600-LABEL: urem_v2u64:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.d $w0, 0($4)
+; P5600-NEXT:    ld.d $w1, 0($5)
+; P5600-NEXT:    mod_u.d $w0, $w0, $w1
+; P5600-NEXT:    st.d $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <2 x i64>, <2 x i64>* %a, align 16
+  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %rem = urem <2 x i64> %0, %1
+  store <2 x i64> %rem, <2 x i64>* %c, align 16
+  ret void
+}

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div_vec_builtin.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div_vec_builtin.ll
new file mode 100644
index 000000000000..8246971fe759
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div_vec_builtin.ll
@@ -0,0 +1,290 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
+
+declare <16 x i8> @llvm.mips.div.s.b(<16 x i8>, <16 x i8>)
+define void @sdiv_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+; P5600-LABEL: sdiv_v16i8_builtin:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.b $w0, 0($4)
+; P5600-NEXT:    ld.b $w1, 0($5)
+; P5600-NEXT:    div_s.b $w0, $w0, $w1
+; P5600-NEXT:    st.b $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <16 x i8>, <16 x i8>* %a, align 16
+  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %2 = tail call <16 x i8> @llvm.mips.div.s.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* %c, align 16
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.div.s.h(<8 x i16>, <8 x i16>)
+define void @sdiv_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+; P5600-LABEL: sdiv_v8i16_builtin:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.h $w0, 0($4)
+; P5600-NEXT:    ld.h $w1, 0($5)
+; P5600-NEXT:    div_s.h $w0, $w0, $w1
+; P5600-NEXT:    st.h $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <8 x i16>, <8 x i16>* %a, align 16
+  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %2 = tail call <8 x i16> @llvm.mips.div.s.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* %c, align 16
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.div.s.w(<4 x i32>, <4 x i32>)
+define void @sdiv_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+; P5600-LABEL: sdiv_v4i32_builtin:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.w $w0, 0($4)
+; P5600-NEXT:    ld.w $w1, 0($5)
+; P5600-NEXT:    div_s.w $w0, $w0, $w1
+; P5600-NEXT:    st.w $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %a, align 16
+  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %2 = tail call <4 x i32> @llvm.mips.div.s.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* %c, align 16
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.div.s.d(<2 x i64>, <2 x i64>)
+define void @sdiv_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+; P5600-LABEL: sdiv_v2i64_builtin:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.d $w0, 0($4)
+; P5600-NEXT:    ld.d $w1, 0($5)
+; P5600-NEXT:    div_s.d $w0, $w0, $w1
+; P5600-NEXT:    st.d $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <2 x i64>, <2 x i64>* %a, align 16
+  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %2 = tail call <2 x i64> @llvm.mips.div.s.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* %c, align 16
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.mod.s.b(<16 x i8>, <16 x i8>)
+define void @smod_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+; P5600-LABEL: smod_v16i8_builtin:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.b $w0, 0($4)
+; P5600-NEXT:    ld.b $w1, 0($5)
+; P5600-NEXT:    mod_s.b $w0, $w0, $w1
+; P5600-NEXT:    st.b $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <16 x i8>, <16 x i8>* %a, align 16
+  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %2 = tail call <16 x i8> @llvm.mips.mod.s.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* %c, align 16
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.mod.s.h(<8 x i16>, <8 x i16>)
+define void @smod_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+; P5600-LABEL: smod_v8i16_builtin:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.h $w0, 0($4)
+; P5600-NEXT:    ld.h $w1, 0($5)
+; P5600-NEXT:    mod_s.h $w0, $w0, $w1
+; P5600-NEXT:    st.h $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <8 x i16>, <8 x i16>* %a, align 16
+  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %2 = tail call <8 x i16> @llvm.mips.mod.s.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* %c, align 16
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.mod.s.w(<4 x i32>, <4 x i32>)
+define void @smod_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+; P5600-LABEL: smod_v4i32_builtin:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.w $w0, 0($4)
+; P5600-NEXT:    ld.w $w1, 0($5)
+; P5600-NEXT:    mod_s.w $w0, $w0, $w1
+; P5600-NEXT:    st.w $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %a, align 16
+  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %2 = tail call <4 x i32> @llvm.mips.mod.s.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* %c, align 16
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.mod.s.d(<2 x i64>, <2 x i64>)
+define void @smod_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+; P5600-LABEL: smod_v2i64_builtin:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.d $w0, 0($4)
+; P5600-NEXT:    ld.d $w1, 0($5)
+; P5600-NEXT:    mod_s.d $w0, $w0, $w1
+; P5600-NEXT:    st.d $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <2 x i64>, <2 x i64>* %a, align 16
+  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %2 = tail call <2 x i64> @llvm.mips.mod.s.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* %c, align 16
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.div.u.b(<16 x i8>, <16 x i8>)
+define void @udiv_v16u8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+; P5600-LABEL: udiv_v16u8_builtin:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.b $w0, 0($4)
+; P5600-NEXT:    ld.b $w1, 0($5)
+; P5600-NEXT:    div_u.b $w0, $w0, $w1
+; P5600-NEXT:    st.b $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <16 x i8>, <16 x i8>* %a, align 16
+  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %2 = tail call <16 x i8> @llvm.mips.div.u.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* %c, align 16
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.div.u.h(<8 x i16>, <8 x i16>)
+define void @udiv_v8u16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+; P5600-LABEL: udiv_v8u16_builtin:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.h $w0, 0($4)
+; P5600-NEXT:    ld.h $w1, 0($5)
+; P5600-NEXT:    div_u.h $w0, $w0, $w1
+; P5600-NEXT:    st.h $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <8 x i16>, <8 x i16>* %a, align 16
+  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %2 = tail call <8 x i16> @llvm.mips.div.u.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* %c, align 16
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.div.u.w(<4 x i32>, <4 x i32>)
+define void @udiv_v4u32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+; P5600-LABEL: udiv_v4u32_builtin:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.w $w0, 0($4)
+; P5600-NEXT:    ld.w $w1, 0($5)
+; P5600-NEXT:    div_u.w $w0, $w0, $w1
+; P5600-NEXT:    st.w $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %a, align 16
+  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %2 = tail call <4 x i32> @llvm.mips.div.u.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* %c, align 16
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.div.u.d(<2 x i64>, <2 x i64>)
+define void @udiv_v2u64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+; P5600-LABEL: udiv_v2u64_builtin:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.d $w0, 0($4)
+; P5600-NEXT:    ld.d $w1, 0($5)
+; P5600-NEXT:    div_u.d $w0, $w0, $w1
+; P5600-NEXT:    st.d $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <2 x i64>, <2 x i64>* %a, align 16
+  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %2 = tail call <2 x i64> @llvm.mips.div.u.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* %c, align 16
+  ret void
+}
+
+declare <16 x i8> @llvm.mips.mod.u.b(<16 x i8>, <16 x i8>)
+define void @umod_v16u8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+; P5600-LABEL: umod_v16u8_builtin:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.b $w0, 0($4)
+; P5600-NEXT:    ld.b $w1, 0($5)
+; P5600-NEXT:    mod_u.b $w0, $w0, $w1
+; P5600-NEXT:    st.b $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <16 x i8>, <16 x i8>* %a, align 16
+  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %2 = tail call <16 x i8> @llvm.mips.mod.u.b(<16 x i8> %0, <16 x i8> %1)
+  store <16 x i8> %2, <16 x i8>* %c, align 16
+  ret void
+}
+
+declare <8 x i16> @llvm.mips.mod.u.h(<8 x i16>, <8 x i16>)
+define void @umod_v8u16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+; P5600-LABEL: umod_v8u16_builtin:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.h $w0, 0($4)
+; P5600-NEXT:    ld.h $w1, 0($5)
+; P5600-NEXT:    mod_u.h $w0, $w0, $w1
+; P5600-NEXT:    st.h $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <8 x i16>, <8 x i16>* %a, align 16
+  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %2 = tail call <8 x i16> @llvm.mips.mod.u.h(<8 x i16> %0, <8 x i16> %1)
+  store <8 x i16> %2, <8 x i16>* %c, align 16
+  ret void
+}
+
+declare <4 x i32> @llvm.mips.mod.u.w(<4 x i32>, <4 x i32>)
+define void @umod_v4u32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+; P5600-LABEL: umod_v4u32_builtin:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.w $w0, 0($4)
+; P5600-NEXT:    ld.w $w1, 0($5)
+; P5600-NEXT:    mod_u.w $w0, $w0, $w1
+; P5600-NEXT:    st.w $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %a, align 16
+  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %2 = tail call <4 x i32> @llvm.mips.mod.u.w(<4 x i32> %0, <4 x i32> %1)
+  store <4 x i32> %2, <4 x i32>* %c, align 16
+  ret void
+}
+
+declare <2 x i64> @llvm.mips.mod.u.d(<2 x i64>, <2 x i64>)
+define void @umod_v2u64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+; P5600-LABEL: umod_v2u64_builtin:
+; P5600:       # %bb.0: # %entry
+; P5600-NEXT:    ld.d $w0, 0($4)
+; P5600-NEXT:    ld.d $w1, 0($5)
+; P5600-NEXT:    mod_u.d $w0, $w0, $w1
+; P5600-NEXT:    st.d $w0, 0($6)
+; P5600-NEXT:    jr $ra
+; P5600-NEXT:    nop
+entry:
+  %0 = load <2 x i64>, <2 x i64>* %a, align 16
+  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %2 = tail call <2 x i64> @llvm.mips.mod.u.d(<2 x i64> %0, <2 x i64> %1)
+  store <2 x i64> %2, <2 x i64>* %c, align 16
+  ret void
+}

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/rem_and_div_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/rem_and_div_vec.mir
new file mode 100644
index 000000000000..249204c3b1b2
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/rem_and_div_vec.mir
@@ -0,0 +1,489 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
+--- |
+
+  define void @sdiv_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @sdiv_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @sdiv_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @sdiv_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+
+  define void @srem_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @srem_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @srem_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @srem_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+
+  define void @udiv_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @udiv_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @udiv_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @udiv_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+
+  define void @urem_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @urem_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @urem_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @urem_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+
+...
+---
+name:            sdiv_v16i8
+alignment:       4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: sdiv_v16i8
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:fprb(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:fprb(<16 x s8>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SDIV:%[0-9]+]]:fprb(<16 x s8>) = G_SDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SDIV]](<16 x s8>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<16 x s8>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<16 x s8>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<16 x s8>) = G_SDIV %3, %4
+    G_STORE %5(<16 x s8>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            sdiv_v8i16
+alignment:       4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: sdiv_v8i16
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:fprb(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:fprb(<8 x s16>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SDIV:%[0-9]+]]:fprb(<8 x s16>) = G_SDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SDIV]](<8 x s16>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<8 x s16>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<8 x s16>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<8 x s16>) = G_SDIV %3, %4
+    G_STORE %5(<8 x s16>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            sdiv_v4i32
+alignment:       4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: sdiv_v4i32
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:fprb(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:fprb(<4 x s32>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SDIV:%[0-9]+]]:fprb(<4 x s32>) = G_SDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SDIV]](<4 x s32>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<4 x s32>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<4 x s32>) = G_SDIV %3, %4
+    G_STORE %5(<4 x s32>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            sdiv_v2i64
+alignment:       4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: sdiv_v2i64
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:fprb(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:fprb(<2 x s64>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SDIV:%[0-9]+]]:fprb(<2 x s64>) = G_SDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SDIV]](<2 x s64>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<2 x s64>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<2 x s64>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<2 x s64>) = G_SDIV %3, %4
+    G_STORE %5(<2 x s64>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            srem_v16i8
+alignment:       4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: srem_v16i8
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:fprb(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:fprb(<16 x s8>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SREM:%[0-9]+]]:fprb(<16 x s8>) = G_SREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SREM]](<16 x s8>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<16 x s8>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<16 x s8>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<16 x s8>) = G_SREM %3, %4
+    G_STORE %5(<16 x s8>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            srem_v8i16
+alignment:       4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: srem_v8i16
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:fprb(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:fprb(<8 x s16>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SREM:%[0-9]+]]:fprb(<8 x s16>) = G_SREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SREM]](<8 x s16>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<8 x s16>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<8 x s16>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<8 x s16>) = G_SREM %3, %4
+    G_STORE %5(<8 x s16>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            srem_v4i32
+alignment:       4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: srem_v4i32
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:fprb(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:fprb(<4 x s32>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SREM:%[0-9]+]]:fprb(<4 x s32>) = G_SREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SREM]](<4 x s32>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<4 x s32>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<4 x s32>) = G_SREM %3, %4
+    G_STORE %5(<4 x s32>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            srem_v2i64
+alignment:       4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: srem_v2i64
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:fprb(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:fprb(<2 x s64>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[SREM:%[0-9]+]]:fprb(<2 x s64>) = G_SREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[SREM]](<2 x s64>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<2 x s64>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<2 x s64>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<2 x s64>) = G_SREM %3, %4
+    G_STORE %5(<2 x s64>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            udiv_v16u8
+alignment:       4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: udiv_v16u8
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:fprb(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:fprb(<16 x s8>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UDIV:%[0-9]+]]:fprb(<16 x s8>) = G_UDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UDIV]](<16 x s8>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<16 x s8>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<16 x s8>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<16 x s8>) = G_UDIV %3, %4
+    G_STORE %5(<16 x s8>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            udiv_v8u16
+alignment:       4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: udiv_v8u16
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:fprb(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:fprb(<8 x s16>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UDIV:%[0-9]+]]:fprb(<8 x s16>) = G_UDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UDIV]](<8 x s16>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<8 x s16>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<8 x s16>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<8 x s16>) = G_UDIV %3, %4
+    G_STORE %5(<8 x s16>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            udiv_v4u32
+alignment:       4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: udiv_v4u32
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:fprb(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:fprb(<4 x s32>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UDIV:%[0-9]+]]:fprb(<4 x s32>) = G_UDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UDIV]](<4 x s32>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<4 x s32>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<4 x s32>) = G_UDIV %3, %4
+    G_STORE %5(<4 x s32>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            udiv_v2u64
+alignment:       4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: udiv_v2u64
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:fprb(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:fprb(<2 x s64>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UDIV:%[0-9]+]]:fprb(<2 x s64>) = G_UDIV [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UDIV]](<2 x s64>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<2 x s64>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<2 x s64>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<2 x s64>) = G_UDIV %3, %4
+    G_STORE %5(<2 x s64>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            urem_v16u8
+alignment:       4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: urem_v16u8
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:fprb(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:fprb(<16 x s8>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UREM:%[0-9]+]]:fprb(<16 x s8>) = G_UREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UREM]](<16 x s8>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<16 x s8>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<16 x s8>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<16 x s8>) = G_UREM %3, %4
+    G_STORE %5(<16 x s8>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            urem_v8u16
+alignment:       4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: urem_v8u16
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:fprb(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:fprb(<8 x s16>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UREM:%[0-9]+]]:fprb(<8 x s16>) = G_UREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UREM]](<8 x s16>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<8 x s16>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<8 x s16>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<8 x s16>) = G_UREM %3, %4
+    G_STORE %5(<8 x s16>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            urem_v4u32
+alignment:       4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: urem_v4u32
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:fprb(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:fprb(<4 x s32>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UREM:%[0-9]+]]:fprb(<4 x s32>) = G_UREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UREM]](<4 x s32>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<4 x s32>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<4 x s32>) = G_UREM %3, %4
+    G_STORE %5(<4 x s32>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...
+---
+name:            urem_v2u64
+alignment:       4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; P5600-LABEL: name: urem_v2u64
+    ; P5600: liveins: $a0, $a1, $a2
+    ; P5600: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; P5600: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; P5600: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; P5600: [[LOAD:%[0-9]+]]:fprb(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.a)
+    ; P5600: [[LOAD1:%[0-9]+]]:fprb(<2 x s64>) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.b)
+    ; P5600: [[UREM:%[0-9]+]]:fprb(<2 x s64>) = G_UREM [[LOAD]], [[LOAD1]]
+    ; P5600: G_STORE [[UREM]](<2 x s64>), [[COPY2]](p0) :: (store 16 into %ir.c)
+    ; P5600: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(<2 x s64>) = G_LOAD %0(p0) :: (load 16 from %ir.a)
+    %4:_(<2 x s64>) = G_LOAD %1(p0) :: (load 16 from %ir.b)
+    %5:_(<2 x s64>) = G_UREM %3, %4
+    G_STORE %5(<2 x s64>), %2(p0) :: (store 16 into %ir.c)
+    RetRA
+
+...


        


More information about the llvm-commits mailing list