[llvm] r343485 - [mips] Generate tests expectations using update_llc_test_checks. NFC

Simon Atanasyan via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 1 07:43:07 PDT 2018


Author: atanasyan
Date: Mon Oct  1 07:43:07 2018
New Revision: 343485

URL: http://llvm.org/viewvc/llvm-project?rev=343485&view=rev
Log:
[mips] Generate tests expectations using update_llc_test_checks. NFC

Generate tests expectations using update_llc_test_checks and reduce
number of "check prefixes" used in the tests.

Modified:
    llvm/trunk/test/CodeGen/Mips/llvm-ir/sdiv.ll
    llvm/trunk/test/CodeGen/Mips/llvm-ir/srem.ll
    llvm/trunk/test/CodeGen/Mips/llvm-ir/udiv.ll
    llvm/trunk/test/CodeGen/Mips/llvm-ir/urem.ll

Modified: llvm/trunk/test/CodeGen/Mips/llvm-ir/sdiv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/llvm-ir/sdiv.ll?rev=343485&r1=343484&r2=343485&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/llvm-ir/sdiv.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/llvm-ir/sdiv.ll Mon Oct  1 07:43:07 2018
@@ -1,192 +1,507 @@
-; RUN: llc < %s -march=mips -mcpu=mips2 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,NOT-R2-R6,GP32
-; RUN: llc < %s -march=mips -mcpu=mips32 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,NOT-R2-R6,GP32
-; RUN: llc < %s -march=mips -mcpu=mips32r2 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,R2-R5,GP32
-; RUN: llc < %s -march=mips -mcpu=mips32r3 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,R2-R5,GP32
-; RUN: llc < %s -march=mips -mcpu=mips32r5 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,R2-R5,GP32
-; RUN: llc < %s -march=mips -mcpu=mips32r6 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,R6,GP32
-
-; RUN: llc < %s -march=mips64 -mcpu=mips3 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,NOT-R2-R6,GP64-NOT-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips4 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,NOT-R2-R6,GP64-NOT-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,NOT-R2-R6,GP64-NOT-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64r2 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,R2-R5,GP64-NOT-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64r3 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,R2-R5,GP64-NOT-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64r5 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,R2-R5,GP64-NOT-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64r6 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,R6,64R6
-
-; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,MMR3,MM32
-; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,MMR6,MM32
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=mips -mcpu=mips2 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R0R2
+; RUN: llc < %s -mtriple=mips -mcpu=mips32 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R0R2
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r2 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R2R5
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r3 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R2R5
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r5 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R2R5
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r6 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefix=GP32R6
+
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips3 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R0R1
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips4 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R0R1
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R0R1
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r2 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R2R5
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r3 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R2R5
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r5 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R2R5
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r6 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefix=GP64R6
+
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r3 -mattr=+micromips -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefix=MMR3
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r6 -mattr=+micromips -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefix=MMR6
 
 define signext i1 @sdiv_i1(i1 signext %a, i1 signext %b) {
+; GP32-LABEL: sdiv_i1:
+; GP32:       # %bb.0: # %entry
+; GP32-NEXT:    div $zero, $4, $5
+; GP32-NEXT:    teq $5, $zero, 7
+; GP32-NEXT:    mflo $1
+; GP32-NEXT:    andi $1, $1, 1
+; GP32-NEXT:    jr $ra
+; GP32-NEXT:    negu $2, $1
+;
+; GP32R6-LABEL: sdiv_i1:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    div $1, $4, $5
+; GP32R6-NEXT:    teq $5, $zero, 7
+; GP32R6-NEXT:    andi $1, $1, 1
+; GP32R6-NEXT:    jr $ra
+; GP32R6-NEXT:    negu $2, $1
+;
+; GP64-LABEL: sdiv_i1:
+; GP64:       # %bb.0: # %entry
+; GP64-NEXT:    div $zero, $4, $5
+; GP64-NEXT:    teq $5, $zero, 7
+; GP64-NEXT:    mflo $1
+; GP64-NEXT:    andi $1, $1, 1
+; GP64-NEXT:    jr $ra
+; GP64-NEXT:    negu $2, $1
+;
+; GP64R6-LABEL: sdiv_i1:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    div $1, $4, $5
+; GP64R6-NEXT:    teq $5, $zero, 7
+; GP64R6-NEXT:    andi $1, $1, 1
+; GP64R6-NEXT:    jr $ra
+; GP64R6-NEXT:    negu $2, $1
+;
+; MMR3-LABEL: sdiv_i1:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    div $zero, $4, $5
+; MMR3-NEXT:    teq $5, $zero, 7
+; MMR3-NEXT:    mflo16 $2
+; MMR3-NEXT:    andi16 $2, $2, 1
+; MMR3-NEXT:    li16 $3, 0
+; MMR3-NEXT:    subu16 $2, $3, $2
+; MMR3-NEXT:    jrc $ra
+;
+; MMR6-LABEL: sdiv_i1:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    div $2, $4, $5
+; MMR6-NEXT:    teq $5, $zero, 7
+; MMR6-NEXT:    andi16 $2, $2, 1
+; MMR6-NEXT:    li16 $3, 0
+; MMR6-NEXT:    subu16 $2, $3, $2
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: sdiv_i1:
-
-  ; NOT-R6:       div     $zero, $4, $5
-  ; NOT-R6:       teq     $5, $zero, 7
-  ; NOT-R6:       mflo    $[[T0:[0-9]+]]
-  ; FIXME: The andi/negu instructions are redundant since div is signed.
-  ; NOT-R6:       andi    $[[T0]], $[[T0]], 1
-  ; NOT-R6:       negu    $2, $[[T0]]
-
-  ; R6:           div     $[[T0:[0-9]+]], $4, $5
-  ; R6:           teq     $5, $zero, 7
-  ; FIXME: The andi/negu instructions are redundant since div is signed.
-  ; R6:           andi    $[[T0]], $[[T0]], 1
-  ; R6:           negu    $2, $[[T0]]
-
-  ; MMR3:         div     $zero, $4, $5
-  ; MMR3:         teq     $5, $zero, 7
-  ; MMR3:         mflo16  $[[T0:[0-9]+]]
-  ; MMR3:         andi16  $[[T0]], $[[T0]], 1
-  ; MMR3:         li16    $[[T1:[0-9]+]], 0
-  ; MMR3:         subu16  $2, $[[T1]], $[[T0]]
-
-  ; MMR6:         div     $[[T0:[0-9]+]], $4, $5
-  ; MMR6:         teq     $5, $zero, 7
-  ; MMR6:         andi16  $[[T0]], $[[T0]], 1
-  ; MMR6:         li16    $[[T1:[0-9]+]], 0
-  ; MMR6:         subu16  $2, $[[T1]], $[[T0]]
-
   %r = sdiv i1 %a, %b
   ret i1 %r
 }
 
 define signext i8 @sdiv_i8(i8 signext %a, i8 signext %b) {
+; GP32R0R2-LABEL: sdiv_i8:
+; GP32R0R2:       # %bb.0: # %entry
+; GP32R0R2-NEXT:    div $zero, $4, $5
+; GP32R0R2-NEXT:    teq $5, $zero, 7
+; GP32R0R2-NEXT:    mflo $1
+; GP32R0R2-NEXT:    sll $1, $1, 24
+; GP32R0R2-NEXT:    jr $ra
+; GP32R0R2-NEXT:    sra $2, $1, 24
+;
+; GP32R2R5-LABEL: sdiv_i8:
+; GP32R2R5:       # %bb.0: # %entry
+; GP32R2R5-NEXT:    div $zero, $4, $5
+; GP32R2R5-NEXT:    teq $5, $zero, 7
+; GP32R2R5-NEXT:    mflo $1
+; GP32R2R5-NEXT:    jr $ra
+; GP32R2R5-NEXT:    seb $2, $1
+;
+; GP32R6-LABEL: sdiv_i8:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    div $1, $4, $5
+; GP32R6-NEXT:    teq $5, $zero, 7
+; GP32R6-NEXT:    jr $ra
+; GP32R6-NEXT:    seb $2, $1
+;
+; GP64R0R1-LABEL: sdiv_i8:
+; GP64R0R1:       # %bb.0: # %entry
+; GP64R0R1-NEXT:    div $zero, $4, $5
+; GP64R0R1-NEXT:    teq $5, $zero, 7
+; GP64R0R1-NEXT:    mflo $1
+; GP64R0R1-NEXT:    sll $1, $1, 24
+; GP64R0R1-NEXT:    jr $ra
+; GP64R0R1-NEXT:    sra $2, $1, 24
+;
+; GP64R2R5-LABEL: sdiv_i8:
+; GP64R2R5:       # %bb.0: # %entry
+; GP64R2R5-NEXT:    div $zero, $4, $5
+; GP64R2R5-NEXT:    teq $5, $zero, 7
+; GP64R2R5-NEXT:    mflo $1
+; GP64R2R5-NEXT:    jr $ra
+; GP64R2R5-NEXT:    seb $2, $1
+;
+; GP64R6-LABEL: sdiv_i8:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    div $1, $4, $5
+; GP64R6-NEXT:    teq $5, $zero, 7
+; GP64R6-NEXT:    jr $ra
+; GP64R6-NEXT:    seb $2, $1
+;
+; MMR3-LABEL: sdiv_i8:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    div $zero, $4, $5
+; MMR3-NEXT:    teq $5, $zero, 7
+; MMR3-NEXT:    mflo16 $1
+; MMR3-NEXT:    jr $ra
+; MMR3-NEXT:    seb $2, $1
+;
+; MMR6-LABEL: sdiv_i8:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    div $1, $4, $5
+; MMR6-NEXT:    teq $5, $zero, 7
+; MMR6-NEXT:    seb $2, $1
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: sdiv_i8:
-
-  ; NOT-R2-R6:    div     $zero, $4, $5
-  ; NOT-R2-R6:    teq     $5, $zero, 7
-  ; NOT-R2-R6:    mflo    $[[T0:[0-9]+]]
-  ; FIXME: The sll/sra instructions are redundant since div is signed.
-  ; NOT-R2-R6:    sll     $[[T1:[0-9]+]], $[[T0]], 24
-  ; NOT-R2-R6:    sra     $2, $[[T1]], 24
-
-  ; R2-R5:        div     $zero, $4, $5
-  ; R2-R5:        teq     $5, $zero, 7
-  ; R2-R5:        mflo    $[[T0:[0-9]+]]
-  ; FIXME: This instruction is redundant.
-  ; R2-R5:        seb     $2, $[[T0]]
-
-  ; R6:           div     $[[T0:[0-9]+]], $4, $5
-  ; R6:           teq     $5, $zero, 7
-  ; FIXME: This instruction is redundant.
-  ; R6:           seb     $2, $[[T0]]
-
-  ; MMR3:         div     $zero, $4, $5
-  ; MMR3:         teq     $5, $zero, 7
-  ; MMR3:         mflo16  $[[T0:[0-9]+]]
-  ; MMR3:         seb     $2, $[[T0]]
-
-  ; MMR6:         div     $[[T0:[0-9]+]], $4, $5
-  ; MMR6:         teq     $5, $zero, 7
-  ; MMR6:         seb     $2, $[[T0]]
-
   %r = sdiv i8 %a, %b
   ret i8 %r
 }
 
 define signext i16 @sdiv_i16(i16 signext %a, i16 signext %b) {
+; GP32R0R2-LABEL: sdiv_i16:
+; GP32R0R2:       # %bb.0: # %entry
+; GP32R0R2-NEXT:    div $zero, $4, $5
+; GP32R0R2-NEXT:    teq $5, $zero, 7
+; GP32R0R2-NEXT:    mflo $1
+; GP32R0R2-NEXT:    sll $1, $1, 16
+; GP32R0R2-NEXT:    jr $ra
+; GP32R0R2-NEXT:    sra $2, $1, 16
+;
+; GP32R2R5-LABEL: sdiv_i16:
+; GP32R2R5:       # %bb.0: # %entry
+; GP32R2R5-NEXT:    div $zero, $4, $5
+; GP32R2R5-NEXT:    teq $5, $zero, 7
+; GP32R2R5-NEXT:    mflo $1
+; GP32R2R5-NEXT:    jr $ra
+; GP32R2R5-NEXT:    seh $2, $1
+;
+; GP32R6-LABEL: sdiv_i16:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    div $1, $4, $5
+; GP32R6-NEXT:    teq $5, $zero, 7
+; GP32R6-NEXT:    jr $ra
+; GP32R6-NEXT:    seh $2, $1
+;
+; GP64R0R1-LABEL: sdiv_i16:
+; GP64R0R1:       # %bb.0: # %entry
+; GP64R0R1-NEXT:    div $zero, $4, $5
+; GP64R0R1-NEXT:    teq $5, $zero, 7
+; GP64R0R1-NEXT:    mflo $1
+; GP64R0R1-NEXT:    sll $1, $1, 16
+; GP64R0R1-NEXT:    jr $ra
+; GP64R0R1-NEXT:    sra $2, $1, 16
+;
+; GP64R2R5-LABEL: sdiv_i16:
+; GP64R2R5:       # %bb.0: # %entry
+; GP64R2R5-NEXT:    div $zero, $4, $5
+; GP64R2R5-NEXT:    teq $5, $zero, 7
+; GP64R2R5-NEXT:    mflo $1
+; GP64R2R5-NEXT:    jr $ra
+; GP64R2R5-NEXT:    seh $2, $1
+;
+; GP64R6-LABEL: sdiv_i16:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    div $1, $4, $5
+; GP64R6-NEXT:    teq $5, $zero, 7
+; GP64R6-NEXT:    jr $ra
+; GP64R6-NEXT:    seh $2, $1
+;
+; MMR3-LABEL: sdiv_i16:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    div $zero, $4, $5
+; MMR3-NEXT:    teq $5, $zero, 7
+; MMR3-NEXT:    mflo16 $1
+; MMR3-NEXT:    jr $ra
+; MMR3-NEXT:    seh $2, $1
+;
+; MMR6-LABEL: sdiv_i16:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    div $1, $4, $5
+; MMR6-NEXT:    teq $5, $zero, 7
+; MMR6-NEXT:    seh $2, $1
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: sdiv_i16:
-
-  ; NOT-R2-R6:    div     $zero, $4, $5
-  ; NOT-R2-R6:    teq     $5, $zero, 7
-  ; NOT-R2-R6:    mflo    $[[T0:[0-9]+]]
-  ; FIXME: The sll/sra instructions are redundant since div is signed.
-  ; NOT-R2-R6:    sll     $[[T1:[0-9]+]], $[[T0]], 16
-  ; NOT-R2-R6:    sra     $2, $[[T1]], 16
-
-  ; R2-R5:        div     $zero, $4, $5
-  ; R2-R5:        teq     $5, $zero, 7
-  ; R2-R5:        mflo    $[[T0:[0-9]+]]
-  ; FIXME: This is instruction is redundant since div is signed.
-  ; R2-R5:        seh     $2, $[[T0]]
-
-  ; R6:           div     $[[T0:[0-9]+]], $4, $5
-  ; R6:           teq     $5, $zero, 7
-  ; FIXME: This is instruction is redundant since div is signed.
-  ; R6:           seh     $2, $[[T0]]
-
-  ; MMR3:         div     $zero, $4, $5
-  ; MMR3:         teq     $5, $zero, 7
-  ; MMR3:         mflo16  $[[T0:[0-9]+]]
-  ; MMR3:         seh     $2, $[[T0]]
-
-  ; MMR6:         div     $[[T0:[0-9]+]], $4, $5
-  ; MMR6:         teq     $5, $zero, 7
-  ; MMR6:         seh     $2, $[[T0]]
-
   %r = sdiv i16 %a, %b
   ret i16 %r
 }
 
 define signext i32 @sdiv_i32(i32 signext %a, i32 signext %b) {
+; GP32-LABEL: sdiv_i32:
+; GP32:       # %bb.0: # %entry
+; GP32-NEXT:    div $zero, $4, $5
+; GP32-NEXT:    teq $5, $zero, 7
+; GP32-NEXT:    jr $ra
+; GP32-NEXT:    mflo $2
+;
+; GP32R6-LABEL: sdiv_i32:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    div $2, $4, $5
+; GP32R6-NEXT:    teq $5, $zero, 7
+; GP32R6-NEXT:    jrc $ra
+;
+; GP64-LABEL: sdiv_i32:
+; GP64:       # %bb.0: # %entry
+; GP64-NEXT:    div $zero, $4, $5
+; GP64-NEXT:    teq $5, $zero, 7
+; GP64-NEXT:    jr $ra
+; GP64-NEXT:    mflo $2
+;
+; GP64R6-LABEL: sdiv_i32:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    div $2, $4, $5
+; GP64R6-NEXT:    teq $5, $zero, 7
+; GP64R6-NEXT:    jrc $ra
+;
+; MMR3-LABEL: sdiv_i32:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    div $zero, $4, $5
+; MMR3-NEXT:    teq $5, $zero, 7
+; MMR3-NEXT:    mflo16 $2
+; MMR3-NEXT:    jrc $ra
+;
+; MMR6-LABEL: sdiv_i32:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    div $2, $4, $5
+; MMR6-NEXT:    teq $5, $zero, 7
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: sdiv_i32:
-
-  ; NOT-R6:       div     $zero, $4, $5
-  ; NOT-R6:       teq     $5, $zero, 7
-  ; NOT-R6:       mflo    $2
-
-  ; R6:           div     $2, $4, $5
-  ; R6:           teq     $5, $zero, 7
-
-  ; MMR3:         div     $zero, $4, $5
-  ; MMR3:         teq     $5, $zero, 7
-  ; MMR3:         mflo16 $2
-
-  ; MMR6:         div     $2, $4, $5
-  ; MMR6:         teq     $5, $zero, 7
-
   %r = sdiv i32 %a, %b
   ret i32 %r
 }
 
 define signext i64 @sdiv_i64(i64 signext %a, i64 signext %b) {
+; GP32-LABEL: sdiv_i64:
+; GP32:       # %bb.0: # %entry
+; GP32-NEXT:    lui $2, %hi(_gp_disp)
+; GP32-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; GP32-NEXT:    addiu $sp, $sp, -24
+; GP32-NEXT:    .cfi_def_cfa_offset 24
+; GP32-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
+; GP32-NEXT:    .cfi_offset 31, -4
+; GP32-NEXT:    addu $gp, $2, $25
+; GP32-NEXT:    lw $25, %call16(__divdi3)($gp)
+; GP32-NEXT:    jalr $25
+; GP32-NEXT:    nop
+; GP32-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
+; GP32-NEXT:    jr $ra
+; GP32-NEXT:    addiu $sp, $sp, 24
+;
+; GP32R6-LABEL: sdiv_i64:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    lui $2, %hi(_gp_disp)
+; GP32R6-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; GP32R6-NEXT:    addiu $sp, $sp, -24
+; GP32R6-NEXT:    .cfi_def_cfa_offset 24
+; GP32R6-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
+; GP32R6-NEXT:    .cfi_offset 31, -4
+; GP32R6-NEXT:    addu $gp, $2, $25
+; GP32R6-NEXT:    lw $25, %call16(__divdi3)($gp)
+; GP32R6-NEXT:    jalrc $25
+; GP32R6-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
+; GP32R6-NEXT:    jr $ra
+; GP32R6-NEXT:    addiu $sp, $sp, 24
+;
+; GP64-LABEL: sdiv_i64:
+; GP64:       # %bb.0: # %entry
+; GP64-NEXT:    ddiv $zero, $4, $5
+; GP64-NEXT:    teq $5, $zero, 7
+; GP64-NEXT:    jr $ra
+; GP64-NEXT:    mflo $2
+;
+; GP64R6-LABEL: sdiv_i64:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    ddiv $2, $4, $5
+; GP64R6-NEXT:    teq $5, $zero, 7
+; GP64R6-NEXT:    jrc $ra
+;
+; MMR3-LABEL: sdiv_i64:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    lui $2, %hi(_gp_disp)
+; MMR3-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; MMR3-NEXT:    addiusp -24
+; MMR3-NEXT:    .cfi_def_cfa_offset 24
+; MMR3-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
+; MMR3-NEXT:    .cfi_offset 31, -4
+; MMR3-NEXT:    addu $2, $2, $25
+; MMR3-NEXT:    lw $25, %call16(__divdi3)($2)
+; MMR3-NEXT:    move $gp, $2
+; MMR3-NEXT:    jalr $25
+; MMR3-NEXT:    nop
+; MMR3-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
+; MMR3-NEXT:    addiusp 24
+; MMR3-NEXT:    jrc $ra
+;
+; MMR6-LABEL: sdiv_i64:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    lui $2, %hi(_gp_disp)
+; MMR6-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; MMR6-NEXT:    addiu $sp, $sp, -24
+; MMR6-NEXT:    .cfi_def_cfa_offset 24
+; MMR6-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
+; MMR6-NEXT:    .cfi_offset 31, -4
+; MMR6-NEXT:    addu $2, $2, $25
+; MMR6-NEXT:    lw $25, %call16(__divdi3)($2)
+; MMR6-NEXT:    move $gp, $2
+; MMR6-NEXT:    jalr $25
+; MMR6-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
+; MMR6-NEXT:    addiu $sp, $sp, 24
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: sdiv_i64:
-
-  ; GP32:         lw      $25, %call16(__divdi3)($gp)
-
-  ; GP64-NOT-R6:  ddiv    $zero, $4, $5
-  ; GP64-NOT-R6:  teq     $5, $zero, 7
-  ; GP64-NOT-R6:  mflo    $2
-
-  ; 64R6:         ddiv    $2, $4, $5
-  ; 64R6:         teq     $5, $zero, 7
-
-  ; MM32:         lw      $25, %call16(__divdi3)($2)
-
   %r = sdiv i64 %a, %b
   ret i64 %r
 }
 
 define signext i128 @sdiv_i128(i128 signext %a, i128 signext %b) {
+; GP32-LABEL: sdiv_i128:
+; GP32:       # %bb.0: # %entry
+; GP32-NEXT:    lui $2, %hi(_gp_disp)
+; GP32-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; GP32-NEXT:    addiu $sp, $sp, -40
+; GP32-NEXT:    .cfi_def_cfa_offset 40
+; GP32-NEXT:    sw $ra, 36($sp) # 4-byte Folded Spill
+; GP32-NEXT:    .cfi_offset 31, -4
+; GP32-NEXT:    addu $gp, $2, $25
+; GP32-NEXT:    lw $1, 60($sp)
+; GP32-NEXT:    lw $2, 64($sp)
+; GP32-NEXT:    lw $3, 68($sp)
+; GP32-NEXT:    sw $3, 28($sp)
+; GP32-NEXT:    sw $2, 24($sp)
+; GP32-NEXT:    sw $1, 20($sp)
+; GP32-NEXT:    lw $1, 56($sp)
+; GP32-NEXT:    sw $1, 16($sp)
+; GP32-NEXT:    lw $25, %call16(__divti3)($gp)
+; GP32-NEXT:    jalr $25
+; GP32-NEXT:    nop
+; GP32-NEXT:    lw $ra, 36($sp) # 4-byte Folded Reload
+; GP32-NEXT:    jr $ra
+; GP32-NEXT:    addiu $sp, $sp, 40
+;
+; GP32R6-LABEL: sdiv_i128:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    lui $2, %hi(_gp_disp)
+; GP32R6-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; GP32R6-NEXT:    addiu $sp, $sp, -40
+; GP32R6-NEXT:    .cfi_def_cfa_offset 40
+; GP32R6-NEXT:    sw $ra, 36($sp) # 4-byte Folded Spill
+; GP32R6-NEXT:    .cfi_offset 31, -4
+; GP32R6-NEXT:    addu $gp, $2, $25
+; GP32R6-NEXT:    lw $1, 60($sp)
+; GP32R6-NEXT:    lw $2, 64($sp)
+; GP32R6-NEXT:    lw $3, 68($sp)
+; GP32R6-NEXT:    sw $3, 28($sp)
+; GP32R6-NEXT:    sw $2, 24($sp)
+; GP32R6-NEXT:    sw $1, 20($sp)
+; GP32R6-NEXT:    lw $1, 56($sp)
+; GP32R6-NEXT:    sw $1, 16($sp)
+; GP32R6-NEXT:    lw $25, %call16(__divti3)($gp)
+; GP32R6-NEXT:    jalrc $25
+; GP32R6-NEXT:    lw $ra, 36($sp) # 4-byte Folded Reload
+; GP32R6-NEXT:    jr $ra
+; GP32R6-NEXT:    addiu $sp, $sp, 40
+;
+; GP64-LABEL: sdiv_i128:
+; GP64:       # %bb.0: # %entry
+; GP64-NEXT:    daddiu $sp, $sp, -16
+; GP64-NEXT:    .cfi_def_cfa_offset 16
+; GP64-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
+; GP64-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
+; GP64-NEXT:    .cfi_offset 31, -8
+; GP64-NEXT:    .cfi_offset 28, -16
+; GP64-NEXT:    lui $1, %hi(%neg(%gp_rel(sdiv_i128)))
+; GP64-NEXT:    daddu $1, $1, $25
+; GP64-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(sdiv_i128)))
+; GP64-NEXT:    ld $25, %call16(__divti3)($gp)
+; GP64-NEXT:    jalr $25
+; GP64-NEXT:    nop
+; GP64-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
+; GP64-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
+; GP64-NEXT:    jr $ra
+; GP64-NEXT:    daddiu $sp, $sp, 16
+;
+; GP64R6-LABEL: sdiv_i128:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    daddiu $sp, $sp, -16
+; GP64R6-NEXT:    .cfi_def_cfa_offset 16
+; GP64R6-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
+; GP64R6-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
+; GP64R6-NEXT:    .cfi_offset 31, -8
+; GP64R6-NEXT:    .cfi_offset 28, -16
+; GP64R6-NEXT:    lui $1, %hi(%neg(%gp_rel(sdiv_i128)))
+; GP64R6-NEXT:    daddu $1, $1, $25
+; GP64R6-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(sdiv_i128)))
+; GP64R6-NEXT:    ld $25, %call16(__divti3)($gp)
+; GP64R6-NEXT:    jalrc $25
+; GP64R6-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
+; GP64R6-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
+; GP64R6-NEXT:    jr $ra
+; GP64R6-NEXT:    daddiu $sp, $sp, 16
+;
+; MMR3-LABEL: sdiv_i128:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    lui $2, %hi(_gp_disp)
+; MMR3-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; MMR3-NEXT:    addiusp -48
+; MMR3-NEXT:    .cfi_def_cfa_offset 48
+; MMR3-NEXT:    sw $ra, 44($sp) # 4-byte Folded Spill
+; MMR3-NEXT:    swp $16, 36($sp)
+; MMR3-NEXT:    .cfi_offset 31, -4
+; MMR3-NEXT:    .cfi_offset 17, -8
+; MMR3-NEXT:    .cfi_offset 16, -12
+; MMR3-NEXT:    addu $16, $2, $25
+; MMR3-NEXT:    move $1, $7
+; MMR3-NEXT:    lw $7, 68($sp)
+; MMR3-NEXT:    lw $17, 72($sp)
+; MMR3-NEXT:    lw $3, 76($sp)
+; MMR3-NEXT:    move $2, $sp
+; MMR3-NEXT:    sw16 $3, 28($2)
+; MMR3-NEXT:    sw16 $17, 24($2)
+; MMR3-NEXT:    sw16 $7, 20($2)
+; MMR3-NEXT:    lw $3, 64($sp)
+; MMR3-NEXT:    sw16 $3, 16($2)
+; MMR3-NEXT:    lw $25, %call16(__divti3)($16)
+; MMR3-NEXT:    move $7, $1
+; MMR3-NEXT:    move $gp, $16
+; MMR3-NEXT:    jalr $25
+; MMR3-NEXT:    nop
+; MMR3-NEXT:    lwp $16, 36($sp)
+; MMR3-NEXT:    lw $ra, 44($sp) # 4-byte Folded Reload
+; MMR3-NEXT:    addiusp 48
+; MMR3-NEXT:    jrc $ra
+;
+; MMR6-LABEL: sdiv_i128:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    lui $2, %hi(_gp_disp)
+; MMR6-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; MMR6-NEXT:    addiu $sp, $sp, -48
+; MMR6-NEXT:    .cfi_def_cfa_offset 48
+; MMR6-NEXT:    sw $ra, 44($sp) # 4-byte Folded Spill
+; MMR6-NEXT:    sw $17, 40($sp) # 4-byte Folded Spill
+; MMR6-NEXT:    sw $16, 36($sp) # 4-byte Folded Spill
+; MMR6-NEXT:    .cfi_offset 31, -4
+; MMR6-NEXT:    .cfi_offset 17, -8
+; MMR6-NEXT:    .cfi_offset 16, -12
+; MMR6-NEXT:    addu $16, $2, $25
+; MMR6-NEXT:    move $1, $7
+; MMR6-NEXT:    lw $7, 68($sp)
+; MMR6-NEXT:    lw $17, 72($sp)
+; MMR6-NEXT:    lw $3, 76($sp)
+; MMR6-NEXT:    move $2, $sp
+; MMR6-NEXT:    sw16 $3, 28($2)
+; MMR6-NEXT:    sw16 $17, 24($2)
+; MMR6-NEXT:    sw16 $7, 20($2)
+; MMR6-NEXT:    lw $3, 64($sp)
+; MMR6-NEXT:    sw16 $3, 16($2)
+; MMR6-NEXT:    lw $25, %call16(__divti3)($16)
+; MMR6-NEXT:    move $7, $1
+; MMR6-NEXT:    move $gp, $16
+; MMR6-NEXT:    jalr $25
+; MMR6-NEXT:    lw $16, 36($sp) # 4-byte Folded Reload
+; MMR6-NEXT:    lw $17, 40($sp) # 4-byte Folded Reload
+; MMR6-NEXT:    lw $ra, 44($sp) # 4-byte Folded Reload
+; MMR6-NEXT:    addiu $sp, $sp, 48
+; MMR6-NEXT:    jrc $ra
 entry:
-  ; ALL-LABEL: sdiv_i128:
-
-  ; GP32:         lw      $25, %call16(__divti3)($gp)
-
-  ; GP64-NOT-R6:  ld      $25, %call16(__divti3)($gp)
-  ; 64R6:         ld      $25, %call16(__divti3)($gp)
-
-  ; MM32:         lw      $25, %call16(__divti3)($16)
-
   %r = sdiv i128 %a, %b
   ret i128 %r
 }

Modified: llvm/trunk/test/CodeGen/Mips/llvm-ir/srem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/llvm-ir/srem.ll?rev=343485&r1=343484&r2=343485&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/llvm-ir/srem.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/llvm-ir/srem.ll Mon Oct  1 07:43:07 2018
@@ -1,184 +1,507 @@
-; RUN: llc < %s -march=mips -mcpu=mips2 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,GP32,NOT-R6,NOT-R2-R6
-; RUN: llc < %s -march=mips -mcpu=mips32 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,GP32,NOT-R6,NOT-R2-R6
-; RUN: llc < %s -march=mips -mcpu=mips32r2 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,GP32,R2-R5,R2-R6,NOT-R6
-; RUN: llc < %s -march=mips -mcpu=mips32r3 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,GP32,R2-R5,R2-R6,NOT-R6
-; RUN: llc < %s -march=mips -mcpu=mips32r5 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,GP32,R2-R5,R2-R6,NOT-R6
-; RUN: llc < %s -march=mips -mcpu=mips32r6 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,GP32,R6,R2-R6
-
-; RUN: llc < %s -march=mips64 -mcpu=mips3 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,GP64-NOT-R6,NOT-R6,NOT-R2-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips4 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,GP64-NOT-R6,NOT-R6,NOT-R2-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,GP64-NOT-R6,NOT-R6,NOT-R2-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64r2 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,R2-R5,R2-R6,GP64-NOT-R6,NOT-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64r3 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,R2-R5,R2-R6,GP64-NOT-R6,NOT-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64r5 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,R2-R5,R2-R6,GP64-NOT-R6,NOT-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64r6 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,64R6,R6,R2-R6
-
-; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,MMR3,MM32
-; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,MMR6,MM32
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=mips -mcpu=mips2 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R0R2
+; RUN: llc < %s -mtriple=mips -mcpu=mips32 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R0R2
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r2 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R2R5
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r3 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R2R5
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r5 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R2R5
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r6 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefix=GP32R6
+
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips3 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R0R1
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips4 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R0R1
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R0R1
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r2 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R2R5
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r3 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R2R5
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r5 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R2R5
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r6 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefix=GP64R6
+
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r3 -mattr=+micromips -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefix=MMR3
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r6 -mattr=+micromips -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefix=MMR6
 
 define signext i1 @srem_i1(i1 signext %a, i1 signext %b) {
+; GP32-LABEL: srem_i1:
+; GP32:       # %bb.0: # %entry
+; GP32-NEXT:    div $zero, $4, $5
+; GP32-NEXT:    teq $5, $zero, 7
+; GP32-NEXT:    mfhi $1
+; GP32-NEXT:    andi $1, $1, 1
+; GP32-NEXT:    jr $ra
+; GP32-NEXT:    negu $2, $1
+;
+; GP32R6-LABEL: srem_i1:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    mod $1, $4, $5
+; GP32R6-NEXT:    teq $5, $zero, 7
+; GP32R6-NEXT:    andi $1, $1, 1
+; GP32R6-NEXT:    jr $ra
+; GP32R6-NEXT:    negu $2, $1
+;
+; GP64-LABEL: srem_i1:
+; GP64:       # %bb.0: # %entry
+; GP64-NEXT:    div $zero, $4, $5
+; GP64-NEXT:    teq $5, $zero, 7
+; GP64-NEXT:    mfhi $1
+; GP64-NEXT:    andi $1, $1, 1
+; GP64-NEXT:    jr $ra
+; GP64-NEXT:    negu $2, $1
+;
+; GP64R6-LABEL: srem_i1:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    mod $1, $4, $5
+; GP64R6-NEXT:    teq $5, $zero, 7
+; GP64R6-NEXT:    andi $1, $1, 1
+; GP64R6-NEXT:    jr $ra
+; GP64R6-NEXT:    negu $2, $1
+;
+; MMR3-LABEL: srem_i1:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    div $zero, $4, $5
+; MMR3-NEXT:    teq $5, $zero, 7
+; MMR3-NEXT:    mfhi16 $2
+; MMR3-NEXT:    andi16 $2, $2, 1
+; MMR3-NEXT:    li16 $3, 0
+; MMR3-NEXT:    subu16 $2, $3, $2
+; MMR3-NEXT:    jrc $ra
+;
+; MMR6-LABEL: srem_i1:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    mod $2, $4, $5
+; MMR6-NEXT:    teq $5, $zero, 7
+; MMR6-NEXT:    andi16 $2, $2, 1
+; MMR6-NEXT:    li16 $3, 0
+; MMR6-NEXT:    subu16 $2, $3, $2
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: srem_i1:
-
-  ; NOT-R6:       div     $zero, $4, $5
-  ; NOT-R6:       teq     $5, $zero, 7
-  ; NOT-R6:       mfhi    $[[T0:[0-9]+]]
-  ; NOT-R6:       andi    $[[T0]], $[[T0]], 1
-  ; NOT-R6:       negu    $2, $[[T0]]
-
-  ; R6:           mod     $[[T0:[0-9]+]], $4, $5
-  ; R6:           teq     $5, $zero, 7
-  ; R6:           andi    $[[T0]], $[[T0]], 1
-  ; R6:           negu    $2, $[[T0]]
-
-  ; MMR3:         div     $zero, $4, $5
-  ; MMR3:         teq     $5, $zero, 7
-  ; MMR3:         mfhi16  $[[T0:[0-9]+]]
-  ; MMR3:         andi16  $[[T0]], $[[T0]], 1
-  ; MMR3:         li16    $[[T1:[0-9]+]], 0
-  ; MMR3:         subu16  $2, $[[T1]], $[[T0]]
-
-  ; MMR6:         mod     $[[T0:[0-9]+]], $4, $5
-  ; MMR6:         teq     $5, $zero, 7
-  ; MMR6:         andi16  $[[T0]], $[[T0]], 1
-  ; MMR6:         li16    $[[T1:[0-9]+]], 0
-  ; MMR6:         subu16  $2, $[[T1]], $[[T0]]
-
   %r = srem i1 %a, %b
   ret i1 %r
 }
 
 define signext i8 @srem_i8(i8 signext %a, i8 signext %b) {
+; GP32R0R2-LABEL: srem_i8:
+; GP32R0R2:       # %bb.0: # %entry
+; GP32R0R2-NEXT:    div $zero, $4, $5
+; GP32R0R2-NEXT:    teq $5, $zero, 7
+; GP32R0R2-NEXT:    mfhi $1
+; GP32R0R2-NEXT:    sll $1, $1, 24
+; GP32R0R2-NEXT:    jr $ra
+; GP32R0R2-NEXT:    sra $2, $1, 24
+;
+; GP32R2R5-LABEL: srem_i8:
+; GP32R2R5:       # %bb.0: # %entry
+; GP32R2R5-NEXT:    div $zero, $4, $5
+; GP32R2R5-NEXT:    teq $5, $zero, 7
+; GP32R2R5-NEXT:    mfhi $1
+; GP32R2R5-NEXT:    jr $ra
+; GP32R2R5-NEXT:    seb $2, $1
+;
+; GP32R6-LABEL: srem_i8:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    mod $1, $4, $5
+; GP32R6-NEXT:    teq $5, $zero, 7
+; GP32R6-NEXT:    jr $ra
+; GP32R6-NEXT:    seb $2, $1
+;
+; GP64R0R1-LABEL: srem_i8:
+; GP64R0R1:       # %bb.0: # %entry
+; GP64R0R1-NEXT:    div $zero, $4, $5
+; GP64R0R1-NEXT:    teq $5, $zero, 7
+; GP64R0R1-NEXT:    mfhi $1
+; GP64R0R1-NEXT:    sll $1, $1, 24
+; GP64R0R1-NEXT:    jr $ra
+; GP64R0R1-NEXT:    sra $2, $1, 24
+;
+; GP64R2R5-LABEL: srem_i8:
+; GP64R2R5:       # %bb.0: # %entry
+; GP64R2R5-NEXT:    div $zero, $4, $5
+; GP64R2R5-NEXT:    teq $5, $zero, 7
+; GP64R2R5-NEXT:    mfhi $1
+; GP64R2R5-NEXT:    jr $ra
+; GP64R2R5-NEXT:    seb $2, $1
+;
+; GP64R6-LABEL: srem_i8:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    mod $1, $4, $5
+; GP64R6-NEXT:    teq $5, $zero, 7
+; GP64R6-NEXT:    jr $ra
+; GP64R6-NEXT:    seb $2, $1
+;
+; MMR3-LABEL: srem_i8:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    div $zero, $4, $5
+; MMR3-NEXT:    teq $5, $zero, 7
+; MMR3-NEXT:    mfhi16 $1
+; MMR3-NEXT:    jr $ra
+; MMR3-NEXT:    seb $2, $1
+;
+; MMR6-LABEL: srem_i8:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    mod $1, $4, $5
+; MMR6-NEXT:    teq $5, $zero, 7
+; MMR6-NEXT:    seb $2, $1
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: srem_i8:
-
-  ; NOT-R2-R6:    div     $zero, $4, $5
-  ; NOT-R2-R6:    teq     $5, $zero, 7
-  ; NOT-R2-R6:    mfhi    $[[T0:[0-9]+]]
-  ; NOT-R2-R6:    sll     $[[T1:[0-9]+]], $[[T0]], 24
-  ; NOT-R2-R6:    sra     $2, $[[T1]], 24
-
-  ; R2-R5:        div     $zero, $4, $5
-  ; R2-R5:        teq     $5, $zero, 7
-  ; R2-R5:        mfhi    $[[T0:[0-9]+]]
-  ; R2-R5:        seb     $2, $[[T0]]
-
-  ; R6:           mod     $[[T0:[0-9]+]], $4, $5
-  ; R6:           teq     $5, $zero, 7
-  ; R6:           seb     $2, $[[T0]]
-
-  ; MMR3:         div     $zero, $4, $5
-  ; MMR3:         teq     $5, $zero, 7
-  ; MMR3:         mfhi16  $[[T0:[0-9]+]]
-  ; MMR3:         seb     $2, $[[T0]]
-
-  ; MMR6:         mod     $[[T0:[0-9]+]], $4, $5
-  ; MMR6:         teq     $5, $zero, 7
-  ; MMR6:         seb     $2, $[[T0]]
-
   %r = srem i8 %a, %b
   ret i8 %r
 }
 
 define signext i16 @srem_i16(i16 signext %a, i16 signext %b) {
+; GP32R0R2-LABEL: srem_i16:
+; GP32R0R2:       # %bb.0: # %entry
+; GP32R0R2-NEXT:    div $zero, $4, $5
+; GP32R0R2-NEXT:    teq $5, $zero, 7
+; GP32R0R2-NEXT:    mfhi $1
+; GP32R0R2-NEXT:    sll $1, $1, 16
+; GP32R0R2-NEXT:    jr $ra
+; GP32R0R2-NEXT:    sra $2, $1, 16
+;
+; GP32R2R5-LABEL: srem_i16:
+; GP32R2R5:       # %bb.0: # %entry
+; GP32R2R5-NEXT:    div $zero, $4, $5
+; GP32R2R5-NEXT:    teq $5, $zero, 7
+; GP32R2R5-NEXT:    mfhi $1
+; GP32R2R5-NEXT:    jr $ra
+; GP32R2R5-NEXT:    seh $2, $1
+;
+; GP32R6-LABEL: srem_i16:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    mod $1, $4, $5
+; GP32R6-NEXT:    teq $5, $zero, 7
+; GP32R6-NEXT:    jr $ra
+; GP32R6-NEXT:    seh $2, $1
+;
+; GP64R0R1-LABEL: srem_i16:
+; GP64R0R1:       # %bb.0: # %entry
+; GP64R0R1-NEXT:    div $zero, $4, $5
+; GP64R0R1-NEXT:    teq $5, $zero, 7
+; GP64R0R1-NEXT:    mfhi $1
+; GP64R0R1-NEXT:    sll $1, $1, 16
+; GP64R0R1-NEXT:    jr $ra
+; GP64R0R1-NEXT:    sra $2, $1, 16
+;
+; GP64R2R5-LABEL: srem_i16:
+; GP64R2R5:       # %bb.0: # %entry
+; GP64R2R5-NEXT:    div $zero, $4, $5
+; GP64R2R5-NEXT:    teq $5, $zero, 7
+; GP64R2R5-NEXT:    mfhi $1
+; GP64R2R5-NEXT:    jr $ra
+; GP64R2R5-NEXT:    seh $2, $1
+;
+; GP64R6-LABEL: srem_i16:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    mod $1, $4, $5
+; GP64R6-NEXT:    teq $5, $zero, 7
+; GP64R6-NEXT:    jr $ra
+; GP64R6-NEXT:    seh $2, $1
+;
+; MMR3-LABEL: srem_i16:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    div $zero, $4, $5
+; MMR3-NEXT:    teq $5, $zero, 7
+; MMR3-NEXT:    mfhi16 $1
+; MMR3-NEXT:    jr $ra
+; MMR3-NEXT:    seh $2, $1
+;
+; MMR6-LABEL: srem_i16:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    mod $1, $4, $5
+; MMR6-NEXT:    teq $5, $zero, 7
+; MMR6-NEXT:    seh $2, $1
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: srem_i16:
-
-  ; NOT-R2-R6:    div     $zero, $4, $5
-  ; NOT-R2-R6:    teq     $5, $zero, 7
-  ; NOT-R2-R6:    mfhi    $[[T0:[0-9]+]]
-  ; NOT-R2-R6:    sll     $[[T1:[0-9]+]], $[[T0]], 16
-  ; NOT-R2-R6:    sra     $2, $[[T1]], 16
-
-  ; R2-R5:        div     $zero, $4, $5
-  ; R2-R5:        teq     $5, $zero, 7
-  ; R2-R5:        mfhi    $[[T0:[0-9]+]]
-  ; R2-R5:        seh     $2, $[[T0]]
-
-  ; R6:           mod     $[[T0:[0-9]+]], $4, $5
-  ; R6:           teq     $5, $zero, 7
-  ; R6:           seh     $2, $[[T0]]
-
-  ; MMR3:         div     $zero, $4, $5
-  ; MMR3:         teq     $5, $zero, 7
-  ; MMR3:         mfhi16  $[[T0:[0-9]+]]
-  ; MMR3:         seh     $2, $[[T0]]
-
-  ; MMR6:         mod     $[[T0:[0-9]+]], $4, $5
-  ; MMR6:         teq     $5, $zero, 7
-  ; MMR6:         seh     $2, $[[T0]]
-
   %r = srem i16 %a, %b
   ret i16 %r
 }
 
 define signext i32 @srem_i32(i32 signext %a, i32 signext %b) {
+; GP32-LABEL: srem_i32:
+; GP32:       # %bb.0: # %entry
+; GP32-NEXT:    div $zero, $4, $5
+; GP32-NEXT:    teq $5, $zero, 7
+; GP32-NEXT:    jr $ra
+; GP32-NEXT:    mfhi $2
+;
+; GP32R6-LABEL: srem_i32:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    mod $2, $4, $5
+; GP32R6-NEXT:    teq $5, $zero, 7
+; GP32R6-NEXT:    jrc $ra
+;
+; GP64-LABEL: srem_i32:
+; GP64:       # %bb.0: # %entry
+; GP64-NEXT:    div $zero, $4, $5
+; GP64-NEXT:    teq $5, $zero, 7
+; GP64-NEXT:    jr $ra
+; GP64-NEXT:    mfhi $2
+;
+; GP64R6-LABEL: srem_i32:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    mod $2, $4, $5
+; GP64R6-NEXT:    teq $5, $zero, 7
+; GP64R6-NEXT:    jrc $ra
+;
+; MMR3-LABEL: srem_i32:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    div $zero, $4, $5
+; MMR3-NEXT:    teq $5, $zero, 7
+; MMR3-NEXT:    mfhi16 $2
+; MMR3-NEXT:    jrc $ra
+;
+; MMR6-LABEL: srem_i32:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    mod $2, $4, $5
+; MMR6-NEXT:    teq $5, $zero, 7
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: srem_i32:
-
-  ; NOT-R6:       div     $zero, $4, $5
-  ; NOT-R6:       teq     $5, $zero, 7
-  ; NOT-R6:       mfhi    $2
-
-  ; R6:           mod     $2, $4, $5
-  ; R6:           teq     $5, $zero, 7
-
-  ; MMR3:         div     $zero, $4, $5
-  ; MMR3:         teq     $5, $zero, 7
-  ; MMR3:         mfhi16  $2
-
-  ; MMR6:         mod     $2, $4, $5
-  ; MMR6:         teq     $5, $zero, 7
-
   %r = srem i32 %a, %b
   ret i32 %r
 }
 
 define signext i64 @srem_i64(i64 signext %a, i64 signext %b) {
+; GP32-LABEL: srem_i64:
+; GP32:       # %bb.0: # %entry
+; GP32-NEXT:    lui $2, %hi(_gp_disp)
+; GP32-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; GP32-NEXT:    addiu $sp, $sp, -24
+; GP32-NEXT:    .cfi_def_cfa_offset 24
+; GP32-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
+; GP32-NEXT:    .cfi_offset 31, -4
+; GP32-NEXT:    addu $gp, $2, $25
+; GP32-NEXT:    lw $25, %call16(__moddi3)($gp)
+; GP32-NEXT:    jalr $25
+; GP32-NEXT:    nop
+; GP32-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
+; GP32-NEXT:    jr $ra
+; GP32-NEXT:    addiu $sp, $sp, 24
+;
+; GP32R6-LABEL: srem_i64:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    lui $2, %hi(_gp_disp)
+; GP32R6-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; GP32R6-NEXT:    addiu $sp, $sp, -24
+; GP32R6-NEXT:    .cfi_def_cfa_offset 24
+; GP32R6-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
+; GP32R6-NEXT:    .cfi_offset 31, -4
+; GP32R6-NEXT:    addu $gp, $2, $25
+; GP32R6-NEXT:    lw $25, %call16(__moddi3)($gp)
+; GP32R6-NEXT:    jalrc $25
+; GP32R6-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
+; GP32R6-NEXT:    jr $ra
+; GP32R6-NEXT:    addiu $sp, $sp, 24
+;
+; GP64-LABEL: srem_i64:
+; GP64:       # %bb.0: # %entry
+; GP64-NEXT:    ddiv $zero, $4, $5
+; GP64-NEXT:    teq $5, $zero, 7
+; GP64-NEXT:    jr $ra
+; GP64-NEXT:    mfhi $2
+;
+; GP64R6-LABEL: srem_i64:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    dmod $2, $4, $5
+; GP64R6-NEXT:    teq $5, $zero, 7
+; GP64R6-NEXT:    jrc $ra
+;
+; MMR3-LABEL: srem_i64:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    lui $2, %hi(_gp_disp)
+; MMR3-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; MMR3-NEXT:    addiusp -24
+; MMR3-NEXT:    .cfi_def_cfa_offset 24
+; MMR3-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
+; MMR3-NEXT:    .cfi_offset 31, -4
+; MMR3-NEXT:    addu $2, $2, $25
+; MMR3-NEXT:    lw $25, %call16(__moddi3)($2)
+; MMR3-NEXT:    move $gp, $2
+; MMR3-NEXT:    jalr $25
+; MMR3-NEXT:    nop
+; MMR3-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
+; MMR3-NEXT:    addiusp 24
+; MMR3-NEXT:    jrc $ra
+;
+; MMR6-LABEL: srem_i64:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    lui $2, %hi(_gp_disp)
+; MMR6-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; MMR6-NEXT:    addiu $sp, $sp, -24
+; MMR6-NEXT:    .cfi_def_cfa_offset 24
+; MMR6-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
+; MMR6-NEXT:    .cfi_offset 31, -4
+; MMR6-NEXT:    addu $2, $2, $25
+; MMR6-NEXT:    lw $25, %call16(__moddi3)($2)
+; MMR6-NEXT:    move $gp, $2
+; MMR6-NEXT:    jalr $25
+; MMR6-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
+; MMR6-NEXT:    addiu $sp, $sp, 24
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: srem_i64:
-
-  ; GP32:         lw      $25, %call16(__moddi3)($gp)
-
-  ; GP64-NOT-R6:  ddiv    $zero, $4, $5
-  ; GP64-NOT-R6:  teq     $5, $zero, 7
-  ; GP64-NOT-R6:  mfhi    $2
-
-  ; 64R6:         dmod    $2, $4, $5
-  ; 64R6:         teq     $5, $zero, 7
-
-  ; MM32:         lw      $25, %call16(__moddi3)($2)
-
   %r = srem i64 %a, %b
   ret i64 %r
 }
 
 define signext i128 @srem_i128(i128 signext %a, i128 signext %b) {
+; GP32-LABEL: srem_i128:
+; GP32:       # %bb.0: # %entry
+; GP32-NEXT:    lui $2, %hi(_gp_disp)
+; GP32-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; GP32-NEXT:    addiu $sp, $sp, -40
+; GP32-NEXT:    .cfi_def_cfa_offset 40
+; GP32-NEXT:    sw $ra, 36($sp) # 4-byte Folded Spill
+; GP32-NEXT:    .cfi_offset 31, -4
+; GP32-NEXT:    addu $gp, $2, $25
+; GP32-NEXT:    lw $1, 60($sp)
+; GP32-NEXT:    lw $2, 64($sp)
+; GP32-NEXT:    lw $3, 68($sp)
+; GP32-NEXT:    sw $3, 28($sp)
+; GP32-NEXT:    sw $2, 24($sp)
+; GP32-NEXT:    sw $1, 20($sp)
+; GP32-NEXT:    lw $1, 56($sp)
+; GP32-NEXT:    sw $1, 16($sp)
+; GP32-NEXT:    lw $25, %call16(__modti3)($gp)
+; GP32-NEXT:    jalr $25
+; GP32-NEXT:    nop
+; GP32-NEXT:    lw $ra, 36($sp) # 4-byte Folded Reload
+; GP32-NEXT:    jr $ra
+; GP32-NEXT:    addiu $sp, $sp, 40
+;
+; GP32R6-LABEL: srem_i128:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    lui $2, %hi(_gp_disp)
+; GP32R6-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; GP32R6-NEXT:    addiu $sp, $sp, -40
+; GP32R6-NEXT:    .cfi_def_cfa_offset 40
+; GP32R6-NEXT:    sw $ra, 36($sp) # 4-byte Folded Spill
+; GP32R6-NEXT:    .cfi_offset 31, -4
+; GP32R6-NEXT:    addu $gp, $2, $25
+; GP32R6-NEXT:    lw $1, 60($sp)
+; GP32R6-NEXT:    lw $2, 64($sp)
+; GP32R6-NEXT:    lw $3, 68($sp)
+; GP32R6-NEXT:    sw $3, 28($sp)
+; GP32R6-NEXT:    sw $2, 24($sp)
+; GP32R6-NEXT:    sw $1, 20($sp)
+; GP32R6-NEXT:    lw $1, 56($sp)
+; GP32R6-NEXT:    sw $1, 16($sp)
+; GP32R6-NEXT:    lw $25, %call16(__modti3)($gp)
+; GP32R6-NEXT:    jalrc $25
+; GP32R6-NEXT:    lw $ra, 36($sp) # 4-byte Folded Reload
+; GP32R6-NEXT:    jr $ra
+; GP32R6-NEXT:    addiu $sp, $sp, 40
+;
+; GP64-LABEL: srem_i128:
+; GP64:       # %bb.0: # %entry
+; GP64-NEXT:    daddiu $sp, $sp, -16
+; GP64-NEXT:    .cfi_def_cfa_offset 16
+; GP64-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
+; GP64-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
+; GP64-NEXT:    .cfi_offset 31, -8
+; GP64-NEXT:    .cfi_offset 28, -16
+; GP64-NEXT:    lui $1, %hi(%neg(%gp_rel(srem_i128)))
+; GP64-NEXT:    daddu $1, $1, $25
+; GP64-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(srem_i128)))
+; GP64-NEXT:    ld $25, %call16(__modti3)($gp)
+; GP64-NEXT:    jalr $25
+; GP64-NEXT:    nop
+; GP64-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
+; GP64-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
+; GP64-NEXT:    jr $ra
+; GP64-NEXT:    daddiu $sp, $sp, 16
+;
+; GP64R6-LABEL: srem_i128:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    daddiu $sp, $sp, -16
+; GP64R6-NEXT:    .cfi_def_cfa_offset 16
+; GP64R6-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
+; GP64R6-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
+; GP64R6-NEXT:    .cfi_offset 31, -8
+; GP64R6-NEXT:    .cfi_offset 28, -16
+; GP64R6-NEXT:    lui $1, %hi(%neg(%gp_rel(srem_i128)))
+; GP64R6-NEXT:    daddu $1, $1, $25
+; GP64R6-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(srem_i128)))
+; GP64R6-NEXT:    ld $25, %call16(__modti3)($gp)
+; GP64R6-NEXT:    jalrc $25
+; GP64R6-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
+; GP64R6-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
+; GP64R6-NEXT:    jr $ra
+; GP64R6-NEXT:    daddiu $sp, $sp, 16
+;
+; MMR3-LABEL: srem_i128:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    lui $2, %hi(_gp_disp)
+; MMR3-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; MMR3-NEXT:    addiusp -48
+; MMR3-NEXT:    .cfi_def_cfa_offset 48
+; MMR3-NEXT:    sw $ra, 44($sp) # 4-byte Folded Spill
+; MMR3-NEXT:    swp $16, 36($sp)
+; MMR3-NEXT:    .cfi_offset 31, -4
+; MMR3-NEXT:    .cfi_offset 17, -8
+; MMR3-NEXT:    .cfi_offset 16, -12
+; MMR3-NEXT:    addu $16, $2, $25
+; MMR3-NEXT:    move $1, $7
+; MMR3-NEXT:    lw $7, 68($sp)
+; MMR3-NEXT:    lw $17, 72($sp)
+; MMR3-NEXT:    lw $3, 76($sp)
+; MMR3-NEXT:    move $2, $sp
+; MMR3-NEXT:    sw16 $3, 28($2)
+; MMR3-NEXT:    sw16 $17, 24($2)
+; MMR3-NEXT:    sw16 $7, 20($2)
+; MMR3-NEXT:    lw $3, 64($sp)
+; MMR3-NEXT:    sw16 $3, 16($2)
+; MMR3-NEXT:    lw $25, %call16(__modti3)($16)
+; MMR3-NEXT:    move $7, $1
+; MMR3-NEXT:    move $gp, $16
+; MMR3-NEXT:    jalr $25
+; MMR3-NEXT:    nop
+; MMR3-NEXT:    lwp $16, 36($sp)
+; MMR3-NEXT:    lw $ra, 44($sp) # 4-byte Folded Reload
+; MMR3-NEXT:    addiusp 48
+; MMR3-NEXT:    jrc $ra
+;
+; MMR6-LABEL: srem_i128:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    lui $2, %hi(_gp_disp)
+; MMR6-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; MMR6-NEXT:    addiu $sp, $sp, -48
+; MMR6-NEXT:    .cfi_def_cfa_offset 48
+; MMR6-NEXT:    sw $ra, 44($sp) # 4-byte Folded Spill
+; MMR6-NEXT:    sw $17, 40($sp) # 4-byte Folded Spill
+; MMR6-NEXT:    sw $16, 36($sp) # 4-byte Folded Spill
+; MMR6-NEXT:    .cfi_offset 31, -4
+; MMR6-NEXT:    .cfi_offset 17, -8
+; MMR6-NEXT:    .cfi_offset 16, -12
+; MMR6-NEXT:    addu $16, $2, $25
+; MMR6-NEXT:    move $1, $7
+; MMR6-NEXT:    lw $7, 68($sp)
+; MMR6-NEXT:    lw $17, 72($sp)
+; MMR6-NEXT:    lw $3, 76($sp)
+; MMR6-NEXT:    move $2, $sp
+; MMR6-NEXT:    sw16 $3, 28($2)
+; MMR6-NEXT:    sw16 $17, 24($2)
+; MMR6-NEXT:    sw16 $7, 20($2)
+; MMR6-NEXT:    lw $3, 64($sp)
+; MMR6-NEXT:    sw16 $3, 16($2)
+; MMR6-NEXT:    lw $25, %call16(__modti3)($16)
+; MMR6-NEXT:    move $7, $1
+; MMR6-NEXT:    move $gp, $16
+; MMR6-NEXT:    jalr $25
+; MMR6-NEXT:    lw $16, 36($sp) # 4-byte Folded Reload
+; MMR6-NEXT:    lw $17, 40($sp) # 4-byte Folded Reload
+; MMR6-NEXT:    lw $ra, 44($sp) # 4-byte Folded Reload
+; MMR6-NEXT:    addiu $sp, $sp, 48
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: srem_i128:
-
-  ; GP32:         lw      $25, %call16(__modti3)($gp)
-
-  ; GP64-NOT-R6:  ld      $25, %call16(__modti3)($gp)
-  ; 64R6:         ld      $25, %call16(__modti3)($gp)
-
-  ; MM32:         lw      $25, %call16(__modti3)($16)
-
   %r = srem i128 %a, %b
   ret i128 %r
 }

Modified: llvm/trunk/test/CodeGen/Mips/llvm-ir/udiv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/llvm-ir/udiv.ll?rev=343485&r1=343484&r2=343485&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/llvm-ir/udiv.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/llvm-ir/udiv.ll Mon Oct  1 07:43:07 2018
@@ -1,154 +1,445 @@
-; RUN: llc < %s -march=mips -mcpu=mips2 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,GP32
-; RUN: llc < %s -march=mips -mcpu=mips32 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,GP32
-; RUN: llc < %s -march=mips -mcpu=mips32r2 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,GP32
-; RUN: llc < %s -march=mips -mcpu=mips32r3 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,GP32
-; RUN: llc < %s -march=mips -mcpu=mips32r5 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,GP32
-; RUN: llc < %s -march=mips -mcpu=mips32r6 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,R6,GP32
-
-; RUN: llc < %s -march=mips64 -mcpu=mips3 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,GP64-NOT-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips4 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,GP64-NOT-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,GP64-NOT-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64r2 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,GP64-NOT-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64r3 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,GP64-NOT-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64r5 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R6,GP64-NOT-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64r6 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,R6,64R6
-
-; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,MMR3,MM32
-; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,MMR6,MM32
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=mips -mcpu=mips2 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R0R1
+; RUN: llc < %s -mtriple=mips -mcpu=mips32 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R0R1
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r2 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R2R5
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r3 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R2R5
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r5 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R2R5
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r6 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefix=GP32R6
+
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips3 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R0R1
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips4 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R0R1
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R0R2
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r2 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R2R5
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r3 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R2R5
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r5 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R2R5
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r6 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefix=GP64R6
+
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r3 -mattr=+micromips -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefix=MMR3
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r6 -mattr=+micromips -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefix=MMR6
 
 define zeroext i1 @udiv_i1(i1 zeroext %a, i1 zeroext %b) {
+; GP32-LABEL: udiv_i1:
+; GP32:       # %bb.0: # %entry
+; GP32-NEXT:    divu $zero, $4, $5
+; GP32-NEXT:    teq $5, $zero, 7
+; GP32-NEXT:    jr $ra
+; GP32-NEXT:    mflo $2
+;
+; GP32R6-LABEL: udiv_i1:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    divu $2, $4, $5
+; GP32R6-NEXT:    teq $5, $zero, 7
+; GP32R6-NEXT:    jrc $ra
+;
+; GP64-LABEL: udiv_i1:
+; GP64:       # %bb.0: # %entry
+; GP64-NEXT:    divu $zero, $4, $5
+; GP64-NEXT:    teq $5, $zero, 7
+; GP64-NEXT:    jr $ra
+; GP64-NEXT:    mflo $2
+;
+; GP64R6-LABEL: udiv_i1:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    divu $2, $4, $5
+; GP64R6-NEXT:    teq $5, $zero, 7
+; GP64R6-NEXT:    jrc $ra
+;
+; MMR3-LABEL: udiv_i1:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    divu $zero, $4, $5
+; MMR3-NEXT:    teq $5, $zero, 7
+; MMR3-NEXT:    mflo16 $2
+; MMR3-NEXT:    jrc $ra
+;
+; MMR6-LABEL: udiv_i1:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    divu $2, $4, $5
+; MMR6-NEXT:    teq $5, $zero, 7
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: udiv_i1:
-
-  ; NOT-R6:       divu    $zero, $4, $5
-  ; NOT-R6:       teq     $5, $zero, 7
-  ; NOT-R6:       mflo    $2
-
-  ; R6:           divu    $2, $4, $5
-  ; R6:           teq     $5, $zero, 7
-
-  ; MMR3:         divu    $zero, $4, $5
-  ; MMR3:         teq     $5, $zero, 7
-  ; MMR3:         mflo16  $2
-
-  ; MMR6:         divu    $2, $4, $5
-  ; MMR6:         teq     $5, $zero, 7
-
   %r = udiv i1 %a, %b
   ret i1 %r
 }
 
 define zeroext i8 @udiv_i8(i8 zeroext %a, i8 zeroext %b) {
+; GP32-LABEL: udiv_i8:
+; GP32:       # %bb.0: # %entry
+; GP32-NEXT:    divu $zero, $4, $5
+; GP32-NEXT:    teq $5, $zero, 7
+; GP32-NEXT:    jr $ra
+; GP32-NEXT:    mflo $2
+;
+; GP32R6-LABEL: udiv_i8:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    divu $2, $4, $5
+; GP32R6-NEXT:    teq $5, $zero, 7
+; GP32R6-NEXT:    jrc $ra
+;
+; GP64-LABEL: udiv_i8:
+; GP64:       # %bb.0: # %entry
+; GP64-NEXT:    divu $zero, $4, $5
+; GP64-NEXT:    teq $5, $zero, 7
+; GP64-NEXT:    jr $ra
+; GP64-NEXT:    mflo $2
+;
+; GP64R6-LABEL: udiv_i8:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    divu $2, $4, $5
+; GP64R6-NEXT:    teq $5, $zero, 7
+; GP64R6-NEXT:    jrc $ra
+;
+; MMR3-LABEL: udiv_i8:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    divu $zero, $4, $5
+; MMR3-NEXT:    teq $5, $zero, 7
+; MMR3-NEXT:    mflo16 $2
+; MMR3-NEXT:    jrc $ra
+;
+; MMR6-LABEL: udiv_i8:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    divu $2, $4, $5
+; MMR6-NEXT:    teq $5, $zero, 7
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: udiv_i8:
-
-  ; NOT-R6:       divu    $zero, $4, $5
-  ; NOT-R6:       teq     $5, $zero, 7
-  ; NOT-R6:       mflo    $2
-
-  ; R6:           divu    $2, $4, $5
-  ; R6:           teq     $5, $zero, 7
-
-  ; MMR3:         divu    $zero, $4, $5
-  ; MMR3:         teq     $5, $zero, 7
-  ; MMR3:         mflo16  $2
-
-  ; MMR6:         divu    $2, $4, $5
-  ; MMR6:         teq     $5, $zero, 7
-
   %r = udiv i8 %a, %b
   ret i8 %r
 }
 
 define zeroext i16 @udiv_i16(i16 zeroext %a, i16 zeroext %b) {
+; GP32-LABEL: udiv_i16:
+; GP32:       # %bb.0: # %entry
+; GP32-NEXT:    divu $zero, $4, $5
+; GP32-NEXT:    teq $5, $zero, 7
+; GP32-NEXT:    jr $ra
+; GP32-NEXT:    mflo $2
+;
+; GP32R6-LABEL: udiv_i16:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    divu $2, $4, $5
+; GP32R6-NEXT:    teq $5, $zero, 7
+; GP32R6-NEXT:    jrc $ra
+;
+; GP64-LABEL: udiv_i16:
+; GP64:       # %bb.0: # %entry
+; GP64-NEXT:    divu $zero, $4, $5
+; GP64-NEXT:    teq $5, $zero, 7
+; GP64-NEXT:    jr $ra
+; GP64-NEXT:    mflo $2
+;
+; GP64R6-LABEL: udiv_i16:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    divu $2, $4, $5
+; GP64R6-NEXT:    teq $5, $zero, 7
+; GP64R6-NEXT:    jrc $ra
+;
+; MMR3-LABEL: udiv_i16:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    divu $zero, $4, $5
+; MMR3-NEXT:    teq $5, $zero, 7
+; MMR3-NEXT:    mflo16 $2
+; MMR3-NEXT:    jrc $ra
+;
+; MMR6-LABEL: udiv_i16:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    divu $2, $4, $5
+; MMR6-NEXT:    teq $5, $zero, 7
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: udiv_i16:
-
-  ; NOT-R6:       divu    $zero, $4, $5
-  ; NOT-R6:       teq     $5, $zero, 7
-  ; NOT-R6:       mflo    $2
-
-  ; R6:           divu    $2, $4, $5
-  ; R6:           teq     $5, $zero, 7
-
-  ; MMR3:         divu    $zero, $4, $5
-  ; MMR3:         teq     $5, $zero, 7
-  ; MMR3:         mflo16  $2
-
-  ; MMR6:         divu    $2, $4, $5
-  ; MMR6:         teq     $5, $zero, 7
-
   %r = udiv i16 %a, %b
   ret i16 %r
 }
 
 define signext i32 @udiv_i32(i32 signext %a, i32 signext %b) {
+; GP32-LABEL: udiv_i32:
+; GP32:       # %bb.0: # %entry
+; GP32-NEXT:    divu $zero, $4, $5
+; GP32-NEXT:    teq $5, $zero, 7
+; GP32-NEXT:    jr $ra
+; GP32-NEXT:    mflo $2
+;
+; GP32R6-LABEL: udiv_i32:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    divu $2, $4, $5
+; GP32R6-NEXT:    teq $5, $zero, 7
+; GP32R6-NEXT:    jrc $ra
+;
+; GP64-LABEL: udiv_i32:
+; GP64:       # %bb.0: # %entry
+; GP64-NEXT:    divu $zero, $4, $5
+; GP64-NEXT:    teq $5, $zero, 7
+; GP64-NEXT:    jr $ra
+; GP64-NEXT:    mflo $2
+;
+; GP64R6-LABEL: udiv_i32:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    divu $2, $4, $5
+; GP64R6-NEXT:    teq $5, $zero, 7
+; GP64R6-NEXT:    jrc $ra
+;
+; MMR3-LABEL: udiv_i32:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    divu $zero, $4, $5
+; MMR3-NEXT:    teq $5, $zero, 7
+; MMR3-NEXT:    mflo16 $2
+; MMR3-NEXT:    jrc $ra
+;
+; MMR6-LABEL: udiv_i32:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    divu $2, $4, $5
+; MMR6-NEXT:    teq $5, $zero, 7
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: udiv_i32:
-
-  ; NOT-R6:       divu    $zero, $4, $5
-  ; NOT-R6:       teq     $5, $zero, 7
-  ; NOT-R6:       mflo    $2
-
-  ; R6:           divu    $2, $4, $5
-  ; R6:           teq     $5, $zero, 7
-
-  ; MMR3:         divu    $zero, $4, $5
-  ; MMR3:         teq     $5, $zero, 7
-  ; MMR3:         mflo16  $2
-
-  ; MMR6:         divu    $2, $4, $5
-  ; MMR6:         teq     $5, $zero, 7
-
   %r = udiv i32 %a, %b
   ret i32 %r
 }
 
 define signext i64 @udiv_i64(i64 signext %a, i64 signext %b) {
+; GP32-LABEL: udiv_i64:
+; GP32:       # %bb.0: # %entry
+; GP32-NEXT:    lui $2, %hi(_gp_disp)
+; GP32-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; GP32-NEXT:    addiu $sp, $sp, -24
+; GP32-NEXT:    .cfi_def_cfa_offset 24
+; GP32-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
+; GP32-NEXT:    .cfi_offset 31, -4
+; GP32-NEXT:    addu $gp, $2, $25
+; GP32-NEXT:    lw $25, %call16(__udivdi3)($gp)
+; GP32-NEXT:    jalr $25
+; GP32-NEXT:    nop
+; GP32-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
+; GP32-NEXT:    jr $ra
+; GP32-NEXT:    addiu $sp, $sp, 24
+;
+; GP32R6-LABEL: udiv_i64:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    lui $2, %hi(_gp_disp)
+; GP32R6-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; GP32R6-NEXT:    addiu $sp, $sp, -24
+; GP32R6-NEXT:    .cfi_def_cfa_offset 24
+; GP32R6-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
+; GP32R6-NEXT:    .cfi_offset 31, -4
+; GP32R6-NEXT:    addu $gp, $2, $25
+; GP32R6-NEXT:    lw $25, %call16(__udivdi3)($gp)
+; GP32R6-NEXT:    jalrc $25
+; GP32R6-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
+; GP32R6-NEXT:    jr $ra
+; GP32R6-NEXT:    addiu $sp, $sp, 24
+;
+; GP64-LABEL: udiv_i64:
+; GP64:       # %bb.0: # %entry
+; GP64-NEXT:    ddivu $zero, $4, $5
+; GP64-NEXT:    teq $5, $zero, 7
+; GP64-NEXT:    jr $ra
+; GP64-NEXT:    mflo $2
+;
+; GP64R6-LABEL: udiv_i64:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    ddivu $2, $4, $5
+; GP64R6-NEXT:    teq $5, $zero, 7
+; GP64R6-NEXT:    jrc $ra
+;
+; MMR3-LABEL: udiv_i64:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    lui $2, %hi(_gp_disp)
+; MMR3-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; MMR3-NEXT:    addiusp -24
+; MMR3-NEXT:    .cfi_def_cfa_offset 24
+; MMR3-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
+; MMR3-NEXT:    .cfi_offset 31, -4
+; MMR3-NEXT:    addu $2, $2, $25
+; MMR3-NEXT:    lw $25, %call16(__udivdi3)($2)
+; MMR3-NEXT:    move $gp, $2
+; MMR3-NEXT:    jalr $25
+; MMR3-NEXT:    nop
+; MMR3-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
+; MMR3-NEXT:    addiusp 24
+; MMR3-NEXT:    jrc $ra
+;
+; MMR6-LABEL: udiv_i64:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    lui $2, %hi(_gp_disp)
+; MMR6-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; MMR6-NEXT:    addiu $sp, $sp, -24
+; MMR6-NEXT:    .cfi_def_cfa_offset 24
+; MMR6-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
+; MMR6-NEXT:    .cfi_offset 31, -4
+; MMR6-NEXT:    addu $2, $2, $25
+; MMR6-NEXT:    lw $25, %call16(__udivdi3)($2)
+; MMR6-NEXT:    move $gp, $2
+; MMR6-NEXT:    jalr $25
+; MMR6-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
+; MMR6-NEXT:    addiu $sp, $sp, 24
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: udiv_i64:
-
-  ; GP32:         lw      $25, %call16(__udivdi3)($gp)
-
-  ; GP64-NOT-R6:  ddivu   $zero, $4, $5
-  ; GP64-NOT-R6:  teq     $5, $zero, 7
-  ; GP64-NOT-R6:  mflo    $2
-
-  ; 64R6:         ddivu   $2, $4, $5
-  ; 64R6:         teq     $5, $zero, 7
-
-  ; MM32:         lw      $25, %call16(__udivdi3)($2)
-
   %r = udiv i64 %a, %b
   ret i64 %r
 }
 
 define signext i128 @udiv_i128(i128 signext %a, i128 signext %b) {
+; GP32-LABEL: udiv_i128:
+; GP32:       # %bb.0: # %entry
+; GP32-NEXT:    lui $2, %hi(_gp_disp)
+; GP32-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; GP32-NEXT:    addiu $sp, $sp, -40
+; GP32-NEXT:    .cfi_def_cfa_offset 40
+; GP32-NEXT:    sw $ra, 36($sp) # 4-byte Folded Spill
+; GP32-NEXT:    .cfi_offset 31, -4
+; GP32-NEXT:    addu $gp, $2, $25
+; GP32-NEXT:    lw $1, 60($sp)
+; GP32-NEXT:    lw $2, 64($sp)
+; GP32-NEXT:    lw $3, 68($sp)
+; GP32-NEXT:    sw $3, 28($sp)
+; GP32-NEXT:    sw $2, 24($sp)
+; GP32-NEXT:    sw $1, 20($sp)
+; GP32-NEXT:    lw $1, 56($sp)
+; GP32-NEXT:    sw $1, 16($sp)
+; GP32-NEXT:    lw $25, %call16(__udivti3)($gp)
+; GP32-NEXT:    jalr $25
+; GP32-NEXT:    nop
+; GP32-NEXT:    lw $ra, 36($sp) # 4-byte Folded Reload
+; GP32-NEXT:    jr $ra
+; GP32-NEXT:    addiu $sp, $sp, 40
+;
+; GP32R6-LABEL: udiv_i128:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    lui $2, %hi(_gp_disp)
+; GP32R6-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; GP32R6-NEXT:    addiu $sp, $sp, -40
+; GP32R6-NEXT:    .cfi_def_cfa_offset 40
+; GP32R6-NEXT:    sw $ra, 36($sp) # 4-byte Folded Spill
+; GP32R6-NEXT:    .cfi_offset 31, -4
+; GP32R6-NEXT:    addu $gp, $2, $25
+; GP32R6-NEXT:    lw $1, 60($sp)
+; GP32R6-NEXT:    lw $2, 64($sp)
+; GP32R6-NEXT:    lw $3, 68($sp)
+; GP32R6-NEXT:    sw $3, 28($sp)
+; GP32R6-NEXT:    sw $2, 24($sp)
+; GP32R6-NEXT:    sw $1, 20($sp)
+; GP32R6-NEXT:    lw $1, 56($sp)
+; GP32R6-NEXT:    sw $1, 16($sp)
+; GP32R6-NEXT:    lw $25, %call16(__udivti3)($gp)
+; GP32R6-NEXT:    jalrc $25
+; GP32R6-NEXT:    lw $ra, 36($sp) # 4-byte Folded Reload
+; GP32R6-NEXT:    jr $ra
+; GP32R6-NEXT:    addiu $sp, $sp, 40
+;
+; GP64-LABEL: udiv_i128:
+; GP64:       # %bb.0: # %entry
+; GP64-NEXT:    daddiu $sp, $sp, -16
+; GP64-NEXT:    .cfi_def_cfa_offset 16
+; GP64-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
+; GP64-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
+; GP64-NEXT:    .cfi_offset 31, -8
+; GP64-NEXT:    .cfi_offset 28, -16
+; GP64-NEXT:    lui $1, %hi(%neg(%gp_rel(udiv_i128)))
+; GP64-NEXT:    daddu $1, $1, $25
+; GP64-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(udiv_i128)))
+; GP64-NEXT:    ld $25, %call16(__udivti3)($gp)
+; GP64-NEXT:    jalr $25
+; GP64-NEXT:    nop
+; GP64-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
+; GP64-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
+; GP64-NEXT:    jr $ra
+; GP64-NEXT:    daddiu $sp, $sp, 16
+;
+; GP64R6-LABEL: udiv_i128:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    daddiu $sp, $sp, -16
+; GP64R6-NEXT:    .cfi_def_cfa_offset 16
+; GP64R6-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
+; GP64R6-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
+; GP64R6-NEXT:    .cfi_offset 31, -8
+; GP64R6-NEXT:    .cfi_offset 28, -16
+; GP64R6-NEXT:    lui $1, %hi(%neg(%gp_rel(udiv_i128)))
+; GP64R6-NEXT:    daddu $1, $1, $25
+; GP64R6-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(udiv_i128)))
+; GP64R6-NEXT:    ld $25, %call16(__udivti3)($gp)
+; GP64R6-NEXT:    jalrc $25
+; GP64R6-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
+; GP64R6-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
+; GP64R6-NEXT:    jr $ra
+; GP64R6-NEXT:    daddiu $sp, $sp, 16
+;
+; MMR3-LABEL: udiv_i128:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    lui $2, %hi(_gp_disp)
+; MMR3-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; MMR3-NEXT:    addiusp -48
+; MMR3-NEXT:    .cfi_def_cfa_offset 48
+; MMR3-NEXT:    sw $ra, 44($sp) # 4-byte Folded Spill
+; MMR3-NEXT:    swp $16, 36($sp)
+; MMR3-NEXT:    .cfi_offset 31, -4
+; MMR3-NEXT:    .cfi_offset 17, -8
+; MMR3-NEXT:    .cfi_offset 16, -12
+; MMR3-NEXT:    addu $16, $2, $25
+; MMR3-NEXT:    move $1, $7
+; MMR3-NEXT:    lw $7, 68($sp)
+; MMR3-NEXT:    lw $17, 72($sp)
+; MMR3-NEXT:    lw $3, 76($sp)
+; MMR3-NEXT:    move $2, $sp
+; MMR3-NEXT:    sw16 $3, 28($2)
+; MMR3-NEXT:    sw16 $17, 24($2)
+; MMR3-NEXT:    sw16 $7, 20($2)
+; MMR3-NEXT:    lw $3, 64($sp)
+; MMR3-NEXT:    sw16 $3, 16($2)
+; MMR3-NEXT:    lw $25, %call16(__udivti3)($16)
+; MMR3-NEXT:    move $7, $1
+; MMR3-NEXT:    move $gp, $16
+; MMR3-NEXT:    jalr $25
+; MMR3-NEXT:    nop
+; MMR3-NEXT:    lwp $16, 36($sp)
+; MMR3-NEXT:    lw $ra, 44($sp) # 4-byte Folded Reload
+; MMR3-NEXT:    addiusp 48
+; MMR3-NEXT:    jrc $ra
+;
+; MMR6-LABEL: udiv_i128:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    lui $2, %hi(_gp_disp)
+; MMR6-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; MMR6-NEXT:    addiu $sp, $sp, -48
+; MMR6-NEXT:    .cfi_def_cfa_offset 48
+; MMR6-NEXT:    sw $ra, 44($sp) # 4-byte Folded Spill
+; MMR6-NEXT:    sw $17, 40($sp) # 4-byte Folded Spill
+; MMR6-NEXT:    sw $16, 36($sp) # 4-byte Folded Spill
+; MMR6-NEXT:    .cfi_offset 31, -4
+; MMR6-NEXT:    .cfi_offset 17, -8
+; MMR6-NEXT:    .cfi_offset 16, -12
+; MMR6-NEXT:    addu $16, $2, $25
+; MMR6-NEXT:    move $1, $7
+; MMR6-NEXT:    lw $7, 68($sp)
+; MMR6-NEXT:    lw $17, 72($sp)
+; MMR6-NEXT:    lw $3, 76($sp)
+; MMR6-NEXT:    move $2, $sp
+; MMR6-NEXT:    sw16 $3, 28($2)
+; MMR6-NEXT:    sw16 $17, 24($2)
+; MMR6-NEXT:    sw16 $7, 20($2)
+; MMR6-NEXT:    lw $3, 64($sp)
+; MMR6-NEXT:    sw16 $3, 16($2)
+; MMR6-NEXT:    lw $25, %call16(__udivti3)($16)
+; MMR6-NEXT:    move $7, $1
+; MMR6-NEXT:    move $gp, $16
+; MMR6-NEXT:    jalr $25
+; MMR6-NEXT:    lw $16, 36($sp) # 4-byte Folded Reload
+; MMR6-NEXT:    lw $17, 40($sp) # 4-byte Folded Reload
+; MMR6-NEXT:    lw $ra, 44($sp) # 4-byte Folded Reload
+; MMR6-NEXT:    addiu $sp, $sp, 48
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: udiv_i128:
-
-  ; GP32:         lw      $25, %call16(__udivti3)($gp)
-
-  ; GP64-NOT-R6:  ld      $25, %call16(__udivti3)($gp)
-  ; 64-R6:        ld      $25, %call16(__udivti3)($gp)
-
-  ; MM32:         lw      $25, %call16(__udivti3)($16)
-
   %r = udiv i128 %a, %b
   ret i128 %r
 }

Modified: llvm/trunk/test/CodeGen/Mips/llvm-ir/urem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/llvm-ir/urem.ll?rev=343485&r1=343484&r2=343485&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/llvm-ir/urem.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/llvm-ir/urem.ll Mon Oct  1 07:43:07 2018
@@ -1,210 +1,548 @@
-; RUN: llc < %s -march=mips -mcpu=mips2 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,GP32,NOT-R6,NOT-R2-R6
-; RUN: llc < %s -march=mips -mcpu=mips32 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,GP32,NOT-R6,NOT-R2-R6
-; RUN: llc < %s -march=mips -mcpu=mips32r2 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,GP32,R2-R5,R2-R6,NOT-R6
-; RUN: llc < %s -march=mips -mcpu=mips32r3 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,GP32,R2-R5,R2-R6,NOT-R6
-; RUN: llc < %s -march=mips -mcpu=mips32r5 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,GP32,R2-R5,R2-R6,NOT-R6
-; RUN: llc < %s -march=mips -mcpu=mips32r6 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,GP32,R6,R2-R6
-
-; RUN: llc < %s -march=mips64 -mcpu=mips3 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,GP64-NOT-R6,NOT-R6,NOT-R2-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips4 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,GP64-NOT-R6,NOT-R6,NOT-R2-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,GP64-NOT-R6,NOT-R6,NOT-R2-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64r2 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,R2-R5,R2-R6,GP64-NOT-R6,NOT-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64r3 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,R2-R5,R2-R6,GP64-NOT-R6,NOT-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64r5 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,R2-R5,R2-R6,GP64-NOT-R6,NOT-R6
-; RUN: llc < %s -march=mips64 -mcpu=mips64r6 -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,64R6,R6,R2-R6
-
-; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,MMR3,MM32
-; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -relocation-model=pic | FileCheck %s \
-; RUN:    -check-prefixes=ALL,MMR6,MM32
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=mips -mcpu=mips2 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R0R2
+; RUN: llc < %s -mtriple=mips -mcpu=mips32 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R0R2
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r2 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R2R5
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r3 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R2R5
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r5 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP32,GP32R2R5
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r6 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefix=GP32R6
+
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips3 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R0R1
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips4 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R0R1
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R0R1
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r2 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R2R5
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r3 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R2R5
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r5 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefixes=GP64,GP64R2R5
+; RUN: llc < %s -mtriple=mips64 -mcpu=mips64r6 -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefix=GP64R6
+
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r3 -mattr=+micromips -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefix=MMR3
+; RUN: llc < %s -mtriple=mips -mcpu=mips32r6 -mattr=+micromips -relocation-model=pic \
+; RUN:   | FileCheck %s -check-prefix=MMR6
 
 define signext i1 @urem_i1(i1 signext %a, i1 signext %b) {
+; GP32-LABEL: urem_i1:
+; GP32:       # %bb.0: # %entry
+; GP32-NEXT:    andi $1, $5, 1
+; GP32-NEXT:    andi $2, $4, 1
+; GP32-NEXT:    divu $zero, $2, $1
+; GP32-NEXT:    teq $1, $zero, 7
+; GP32-NEXT:    mfhi $1
+; GP32-NEXT:    andi $1, $1, 1
+; GP32-NEXT:    jr $ra
+; GP32-NEXT:    negu $2, $1
+;
+; GP32R6-LABEL: urem_i1:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    andi $1, $5, 1
+; GP32R6-NEXT:    andi $2, $4, 1
+; GP32R6-NEXT:    modu $2, $2, $1
+; GP32R6-NEXT:    teq $1, $zero, 7
+; GP32R6-NEXT:    jr $ra
+; GP32R6-NEXT:    negu $2, $2
+;
+; GP64-LABEL: urem_i1:
+; GP64:       # %bb.0: # %entry
+; GP64-NEXT:    andi $1, $5, 1
+; GP64-NEXT:    andi $2, $4, 1
+; GP64-NEXT:    divu $zero, $2, $1
+; GP64-NEXT:    teq $1, $zero, 7
+; GP64-NEXT:    mfhi $1
+; GP64-NEXT:    andi $1, $1, 1
+; GP64-NEXT:    jr $ra
+; GP64-NEXT:    negu $2, $1
+;
+; GP64R6-LABEL: urem_i1:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    andi $1, $5, 1
+; GP64R6-NEXT:    andi $2, $4, 1
+; GP64R6-NEXT:    modu $2, $2, $1
+; GP64R6-NEXT:    teq $1, $zero, 7
+; GP64R6-NEXT:    jr $ra
+; GP64R6-NEXT:    negu $2, $2
+;
+; MMR3-LABEL: urem_i1:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    andi16 $2, $5, 1
+; MMR3-NEXT:    andi16 $3, $4, 1
+; MMR3-NEXT:    divu $zero, $3, $2
+; MMR3-NEXT:    teq $2, $zero, 7
+; MMR3-NEXT:    mfhi16 $2
+; MMR3-NEXT:    andi16 $2, $2, 1
+; MMR3-NEXT:    li16 $3, 0
+; MMR3-NEXT:    subu16 $2, $3, $2
+; MMR3-NEXT:    jrc $ra
+;
+; MMR6-LABEL: urem_i1:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    andi16 $2, $5, 1
+; MMR6-NEXT:    andi16 $3, $4, 1
+; MMR6-NEXT:    modu $3, $3, $2
+; MMR6-NEXT:    teq $2, $zero, 7
+; MMR6-NEXT:    li16 $2, 0
+; MMR6-NEXT:    subu16 $2, $2, $3
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: urem_i1:
-
-  ; NOT-R6:       andi    $[[T0:[0-9]+]], $5, 1
-  ; NOT-R6:       andi    $[[T1:[0-9]+]], $4, 1
-  ; NOT-R6:       divu    $zero, $[[T1]], $[[T0]]
-  ; NOT-R6:       teq     $[[T0]], $zero, 7
-  ; NOT-R6:       mfhi    $[[T2:[0-9]+]]
-  ; NOT-R6:       andi    $[[T0]], $[[T0]], 1
-  ; NOT-R6:       negu    $2, $[[T0]]
-
-  ; R6:           andi    $[[T0:[0-9]+]], $5, 1
-  ; R6:           andi    $[[T1:[0-9]+]], $4, 1
-  ; R6:           modu    $[[T2:[0-9]+]], $[[T1]], $[[T0]]
-  ; R6:           teq     $[[T0]], $zero, 7
-  ; R6:           negu    $2, $[[T2]]
-
-  ; MMR3:         andi16  $[[T0:[0-9]+]], $5, 1
-  ; MMR3:         andi16  $[[T1:[0-9]+]], $4, 1
-  ; MMR3:         divu    $zero, $[[T1]], $[[T0]]
-  ; MMR3:         teq     $[[T0]], $zero, 7
-  ; MMR3:         mfhi16  $[[T2:[0-9]+]]
-  ; MMR3:         andi16  $[[T0]], $[[T0]], 1
-  ; MMR3:         li16    $[[T1:[0-9]+]], 0
-  ; MMR3:         subu16  $2, $[[T1]], $[[T0]]
-
-  ; MMR6:         andi16  $[[T0:[0-9]+]], $5, 1
-  ; MMR6:         andi16  $[[T1:[0-9]+]], $4, 1
-  ; MMR6:         modu    $[[T2:[0-9]+]], $[[T1]], $[[T0]]
-  ; MMR6:         teq     $[[T0]], $zero, 7
-  ; MMR6:         li16    $[[T3:[0-9]+]], 0
-  ; MMR6:         subu16  $2, $[[T3]], $[[T2]]
-
   %r = urem i1 %a, %b
   ret i1 %r
 }
 
 define signext i8 @urem_i8(i8 signext %a, i8 signext %b) {
+; GP32R0R2-LABEL: urem_i8:
+; GP32R0R2:       # %bb.0: # %entry
+; GP32R0R2-NEXT:    andi $1, $5, 255
+; GP32R0R2-NEXT:    andi $2, $4, 255
+; GP32R0R2-NEXT:    divu $zero, $2, $1
+; GP32R0R2-NEXT:    teq $1, $zero, 7
+; GP32R0R2-NEXT:    mfhi $1
+; GP32R0R2-NEXT:    sll $1, $1, 24
+; GP32R0R2-NEXT:    jr $ra
+; GP32R0R2-NEXT:    sra $2, $1, 24
+;
+; GP32R2R5-LABEL: urem_i8:
+; GP32R2R5:       # %bb.0: # %entry
+; GP32R2R5-NEXT:    andi $1, $5, 255
+; GP32R2R5-NEXT:    andi $2, $4, 255
+; GP32R2R5-NEXT:    divu $zero, $2, $1
+; GP32R2R5-NEXT:    teq $1, $zero, 7
+; GP32R2R5-NEXT:    mfhi $1
+; GP32R2R5-NEXT:    jr $ra
+; GP32R2R5-NEXT:    seb $2, $1
+;
+; GP32R6-LABEL: urem_i8:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    andi $1, $5, 255
+; GP32R6-NEXT:    andi $2, $4, 255
+; GP32R6-NEXT:    modu $2, $2, $1
+; GP32R6-NEXT:    teq $1, $zero, 7
+; GP32R6-NEXT:    jr $ra
+; GP32R6-NEXT:    seb $2, $2
+;
+; GP64R0R1-LABEL: urem_i8:
+; GP64R0R1:       # %bb.0: # %entry
+; GP64R0R1-NEXT:    andi $1, $5, 255
+; GP64R0R1-NEXT:    andi $2, $4, 255
+; GP64R0R1-NEXT:    divu $zero, $2, $1
+; GP64R0R1-NEXT:    teq $1, $zero, 7
+; GP64R0R1-NEXT:    mfhi $1
+; GP64R0R1-NEXT:    sll $1, $1, 24
+; GP64R0R1-NEXT:    jr $ra
+; GP64R0R1-NEXT:    sra $2, $1, 24
+;
+; GP64R2R5-LABEL: urem_i8:
+; GP64R2R5:       # %bb.0: # %entry
+; GP64R2R5-NEXT:    andi $1, $5, 255
+; GP64R2R5-NEXT:    andi $2, $4, 255
+; GP64R2R5-NEXT:    divu $zero, $2, $1
+; GP64R2R5-NEXT:    teq $1, $zero, 7
+; GP64R2R5-NEXT:    mfhi $1
+; GP64R2R5-NEXT:    jr $ra
+; GP64R2R5-NEXT:    seb $2, $1
+;
+; GP64R6-LABEL: urem_i8:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    andi $1, $5, 255
+; GP64R6-NEXT:    andi $2, $4, 255
+; GP64R6-NEXT:    modu $2, $2, $1
+; GP64R6-NEXT:    teq $1, $zero, 7
+; GP64R6-NEXT:    jr $ra
+; GP64R6-NEXT:    seb $2, $2
+;
+; MMR3-LABEL: urem_i8:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    andi16 $2, $5, 255
+; MMR3-NEXT:    andi16 $3, $4, 255
+; MMR3-NEXT:    divu $zero, $3, $2
+; MMR3-NEXT:    teq $2, $zero, 7
+; MMR3-NEXT:    mfhi16 $1
+; MMR3-NEXT:    jr $ra
+; MMR3-NEXT:    seb $2, $1
+;
+; MMR6-LABEL: urem_i8:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    andi16 $2, $5, 255
+; MMR6-NEXT:    andi16 $3, $4, 255
+; MMR6-NEXT:    modu $1, $3, $2
+; MMR6-NEXT:    teq $2, $zero, 7
+; MMR6-NEXT:    seb $2, $1
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: urem_i8:
-
-  ; NOT-R2-R6:    andi    $[[T0:[0-9]+]], $5, 255
-  ; NOT-R2-R6:    andi    $[[T1:[0-9]+]], $4, 255
-  ; NOT-R2-R6:    divu    $zero, $[[T1]], $[[T0]]
-  ; NOT-R2-R6:    teq     $[[T0]], $zero, 7
-  ; NOT-R2-R6:    mfhi    $[[T2:[0-9]+]]
-  ; NOT-R2-R6:    sll     $[[T3:[0-9]+]], $[[T2]], 24
-  ; NOT-R2-R6:    sra     $2, $[[T3]], 24
-
-  ; R2-R5:        andi    $[[T0:[0-9]+]], $5, 255
-  ; R2-R5:        andi    $[[T1:[0-9]+]], $4, 255
-  ; R2-R5:        divu    $zero, $[[T1]], $[[T0]]
-  ; R2-R5:        teq     $[[T0]], $zero, 7
-  ; R2-R5:        mfhi    $[[T2:[0-9]+]]
-  ; R2-R5:        seb     $2, $[[T2]]
-
-  ; R6:           andi    $[[T0:[0-9]+]], $5, 255
-  ; R6:           andi    $[[T1:[0-9]+]], $4, 255
-  ; R6:           modu    $[[T2:[0-9]+]], $[[T1]], $[[T0]]
-  ; R6:           teq     $[[T0]], $zero, 7
-  ; R6:           seb     $2, $[[T2]]
-
-  ; MMR3:         andi16  $[[T0:[0-9]+]], $5, 255
-  ; MMR3:         andi16  $[[T1:[0-9]+]], $4, 255
-  ; MMR3:         divu    $zero, $[[T1]], $[[T0]]
-  ; MMR3:         teq     $[[T0]], $zero, 7
-  ; MMR3:         mfhi16  $[[T2:[0-9]+]]
-  ; MMR3:         seb     $2, $[[T2]]
-
-  ; MMR6:         andi16  $[[T0:[0-9]+]], $5, 255
-  ; MMR6:         andi16  $[[T1:[0-9]+]], $4, 255
-  ; MMR6:         modu    $[[T2:[0-9]+]], $[[T1]], $[[T0]]
-  ; MMR6:         teq     $[[T0]], $zero, 7
-  ; MMR6:         seb     $2, $[[T2]]
-
   %r = urem i8 %a, %b
   ret i8 %r
 }
 
 define signext i16 @urem_i16(i16 signext %a, i16 signext %b) {
+; GP32R0R2-LABEL: urem_i16:
+; GP32R0R2:       # %bb.0: # %entry
+; GP32R0R2-NEXT:    andi $1, $5, 65535
+; GP32R0R2-NEXT:    andi $2, $4, 65535
+; GP32R0R2-NEXT:    divu $zero, $2, $1
+; GP32R0R2-NEXT:    teq $1, $zero, 7
+; GP32R0R2-NEXT:    mfhi $1
+; GP32R0R2-NEXT:    sll $1, $1, 16
+; GP32R0R2-NEXT:    jr $ra
+; GP32R0R2-NEXT:    sra $2, $1, 16
+;
+; GP32R2R5-LABEL: urem_i16:
+; GP32R2R5:       # %bb.0: # %entry
+; GP32R2R5-NEXT:    andi $1, $5, 65535
+; GP32R2R5-NEXT:    andi $2, $4, 65535
+; GP32R2R5-NEXT:    divu $zero, $2, $1
+; GP32R2R5-NEXT:    teq $1, $zero, 7
+; GP32R2R5-NEXT:    mfhi $1
+; GP32R2R5-NEXT:    jr $ra
+; GP32R2R5-NEXT:    seh $2, $1
+;
+; GP32R6-LABEL: urem_i16:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    andi $1, $5, 65535
+; GP32R6-NEXT:    andi $2, $4, 65535
+; GP32R6-NEXT:    modu $2, $2, $1
+; GP32R6-NEXT:    teq $1, $zero, 7
+; GP32R6-NEXT:    jr $ra
+; GP32R6-NEXT:    seh $2, $2
+;
+; GP64R0R1-LABEL: urem_i16:
+; GP64R0R1:       # %bb.0: # %entry
+; GP64R0R1-NEXT:    andi $1, $5, 65535
+; GP64R0R1-NEXT:    andi $2, $4, 65535
+; GP64R0R1-NEXT:    divu $zero, $2, $1
+; GP64R0R1-NEXT:    teq $1, $zero, 7
+; GP64R0R1-NEXT:    mfhi $1
+; GP64R0R1-NEXT:    sll $1, $1, 16
+; GP64R0R1-NEXT:    jr $ra
+; GP64R0R1-NEXT:    sra $2, $1, 16
+;
+; GP64R2R5-LABEL: urem_i16:
+; GP64R2R5:       # %bb.0: # %entry
+; GP64R2R5-NEXT:    andi $1, $5, 65535
+; GP64R2R5-NEXT:    andi $2, $4, 65535
+; GP64R2R5-NEXT:    divu $zero, $2, $1
+; GP64R2R5-NEXT:    teq $1, $zero, 7
+; GP64R2R5-NEXT:    mfhi $1
+; GP64R2R5-NEXT:    jr $ra
+; GP64R2R5-NEXT:    seh $2, $1
+;
+; GP64R6-LABEL: urem_i16:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    andi $1, $5, 65535
+; GP64R6-NEXT:    andi $2, $4, 65535
+; GP64R6-NEXT:    modu $2, $2, $1
+; GP64R6-NEXT:    teq $1, $zero, 7
+; GP64R6-NEXT:    jr $ra
+; GP64R6-NEXT:    seh $2, $2
+;
+; MMR3-LABEL: urem_i16:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    andi16 $2, $5, 65535
+; MMR3-NEXT:    andi16 $3, $4, 65535
+; MMR3-NEXT:    divu $zero, $3, $2
+; MMR3-NEXT:    teq $2, $zero, 7
+; MMR3-NEXT:    mfhi16 $1
+; MMR3-NEXT:    jr $ra
+; MMR3-NEXT:    seh $2, $1
+;
+; MMR6-LABEL: urem_i16:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    andi16 $2, $5, 65535
+; MMR6-NEXT:    andi16 $3, $4, 65535
+; MMR6-NEXT:    modu $1, $3, $2
+; MMR6-NEXT:    teq $2, $zero, 7
+; MMR6-NEXT:    seh $2, $1
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: urem_i16:
-
-  ; NOT-R2-R6:    andi    $[[T0:[0-9]+]], $5, 65535
-  ; NOT-R2-R6:    andi    $[[T1:[0-9]+]], $4, 65535
-  ; NOT-R2-R6:    divu    $zero, $[[T1]], $[[T0]]
-  ; NOT-R2-R6:    teq     $[[T0]], $zero, 7
-  ; NOT-R2-R6:    mfhi    $[[T2:[0-9]+]]
-  ; NOT-R2-R6:    sll     $[[T3:[0-9]+]], $[[T2]], 16
-  ; NOT-R2-R6:    sra     $2, $[[T3]], 16
-
-  ; R2-R5:        andi    $[[T0:[0-9]+]], $5, 65535
-  ; R2-R5:        andi    $[[T1:[0-9]+]], $4, 65535
-  ; R2-R5:        divu    $zero, $[[T1]], $[[T0]]
-  ; R2-R5:        teq     $[[T0]], $zero, 7
-  ; R2-R5:        mfhi    $[[T3:[0-9]+]]
-  ; R2-R5:        seh     $2, $[[T2]]
-
-  ; R6:           andi    $[[T0:[0-9]+]], $5, 65535
-  ; R6:           andi    $[[T1:[0-9]+]], $4, 65535
-  ; R6:           modu    $[[T2:[0-9]+]], $[[T1]], $[[T0]]
-  ; R6:           teq     $[[T0]], $zero, 7
-  ; R6:           seh     $2, $[[T2]]
-
-  ; MMR3:         andi16  $[[T0:[0-9]+]], $5, 65535
-  ; MMR3:         andi16  $[[T1:[0-9]+]], $4, 65535
-  ; MMR3:         divu    $zero, $[[T1]], $[[T0]]
-  ; MMR3:         teq     $[[T0]], $zero, 7
-  ; MMR3:         mfhi16  $[[T2:[0-9]+]]
-  ; MMR3:         seh     $2, $[[T2]]
-
-  ; MMR6:         andi16  $[[T0:[0-9]+]], $5, 65535
-  ; MMR6:         andi16  $[[T1:[0-9]+]], $4, 65535
-  ; MMR6:         modu    $[[T2:[0-9]+]], $[[T1]], $[[T0]]
-  ; MMR6:         teq     $[[T0]], $zero, 7
-  ; MMR6:         seh     $2, $[[T2]]
-
   %r = urem i16 %a, %b
   ret i16 %r
 }
 
 define signext i32 @urem_i32(i32 signext %a, i32 signext %b) {
+; GP32-LABEL: urem_i32:
+; GP32:       # %bb.0: # %entry
+; GP32-NEXT:    divu $zero, $4, $5
+; GP32-NEXT:    teq $5, $zero, 7
+; GP32-NEXT:    jr $ra
+; GP32-NEXT:    mfhi $2
+;
+; GP32R6-LABEL: urem_i32:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    modu $2, $4, $5
+; GP32R6-NEXT:    teq $5, $zero, 7
+; GP32R6-NEXT:    jrc $ra
+;
+; GP64-LABEL: urem_i32:
+; GP64:       # %bb.0: # %entry
+; GP64-NEXT:    divu $zero, $4, $5
+; GP64-NEXT:    teq $5, $zero, 7
+; GP64-NEXT:    jr $ra
+; GP64-NEXT:    mfhi $2
+;
+; GP64R6-LABEL: urem_i32:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    modu $2, $4, $5
+; GP64R6-NEXT:    teq $5, $zero, 7
+; GP64R6-NEXT:    jrc $ra
+;
+; MMR3-LABEL: urem_i32:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    divu $zero, $4, $5
+; MMR3-NEXT:    teq $5, $zero, 7
+; MMR3-NEXT:    mfhi16 $2
+; MMR3-NEXT:    jrc $ra
+;
+; MMR6-LABEL: urem_i32:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    modu $2, $4, $5
+; MMR6-NEXT:    teq $5, $zero, 7
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: urem_i32:
-
-  ; NOT-R6:       divu    $zero, $4, $5
-  ; NOT-R6:       teq     $5, $zero, 7
-  ; NOT-R6:       mfhi    $2
-
-  ; R6:           modu    $2, $4, $5
-  ; R6:           teq     $5, $zero, 7
-
-  ; MMR3:         divu    $zero, $4, $5
-  ; MMR3:         teq     $5, $zero, 7
-  ; MMR3:         mfhi16  $2
-
-  ; MMR6:         modu    $2, $4, $5
-  ; MMR6:         teq     $5, $zero, 7
-
   %r = urem i32 %a, %b
   ret i32 %r
 }
 
 define signext i64 @urem_i64(i64 signext %a, i64 signext %b) {
+; GP32-LABEL: urem_i64:
+; GP32:       # %bb.0: # %entry
+; GP32-NEXT:    lui $2, %hi(_gp_disp)
+; GP32-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; GP32-NEXT:    addiu $sp, $sp, -24
+; GP32-NEXT:    .cfi_def_cfa_offset 24
+; GP32-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
+; GP32-NEXT:    .cfi_offset 31, -4
+; GP32-NEXT:    addu $gp, $2, $25
+; GP32-NEXT:    lw $25, %call16(__umoddi3)($gp)
+; GP32-NEXT:    jalr $25
+; GP32-NEXT:    nop
+; GP32-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
+; GP32-NEXT:    jr $ra
+; GP32-NEXT:    addiu $sp, $sp, 24
+;
+; GP32R6-LABEL: urem_i64:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    lui $2, %hi(_gp_disp)
+; GP32R6-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; GP32R6-NEXT:    addiu $sp, $sp, -24
+; GP32R6-NEXT:    .cfi_def_cfa_offset 24
+; GP32R6-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
+; GP32R6-NEXT:    .cfi_offset 31, -4
+; GP32R6-NEXT:    addu $gp, $2, $25
+; GP32R6-NEXT:    lw $25, %call16(__umoddi3)($gp)
+; GP32R6-NEXT:    jalrc $25
+; GP32R6-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
+; GP32R6-NEXT:    jr $ra
+; GP32R6-NEXT:    addiu $sp, $sp, 24
+;
+; GP64-LABEL: urem_i64:
+; GP64:       # %bb.0: # %entry
+; GP64-NEXT:    ddivu $zero, $4, $5
+; GP64-NEXT:    teq $5, $zero, 7
+; GP64-NEXT:    jr $ra
+; GP64-NEXT:    mfhi $2
+;
+; GP64R6-LABEL: urem_i64:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    dmodu $2, $4, $5
+; GP64R6-NEXT:    teq $5, $zero, 7
+; GP64R6-NEXT:    jrc $ra
+;
+; MMR3-LABEL: urem_i64:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    lui $2, %hi(_gp_disp)
+; MMR3-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; MMR3-NEXT:    addiusp -24
+; MMR3-NEXT:    .cfi_def_cfa_offset 24
+; MMR3-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
+; MMR3-NEXT:    .cfi_offset 31, -4
+; MMR3-NEXT:    addu $2, $2, $25
+; MMR3-NEXT:    lw $25, %call16(__umoddi3)($2)
+; MMR3-NEXT:    move $gp, $2
+; MMR3-NEXT:    jalr $25
+; MMR3-NEXT:    nop
+; MMR3-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
+; MMR3-NEXT:    addiusp 24
+; MMR3-NEXT:    jrc $ra
+;
+; MMR6-LABEL: urem_i64:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    lui $2, %hi(_gp_disp)
+; MMR6-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; MMR6-NEXT:    addiu $sp, $sp, -24
+; MMR6-NEXT:    .cfi_def_cfa_offset 24
+; MMR6-NEXT:    sw $ra, 20($sp) # 4-byte Folded Spill
+; MMR6-NEXT:    .cfi_offset 31, -4
+; MMR6-NEXT:    addu $2, $2, $25
+; MMR6-NEXT:    lw $25, %call16(__umoddi3)($2)
+; MMR6-NEXT:    move $gp, $2
+; MMR6-NEXT:    jalr $25
+; MMR6-NEXT:    lw $ra, 20($sp) # 4-byte Folded Reload
+; MMR6-NEXT:    addiu $sp, $sp, 24
+; MMR6-NEXT:    jrc $ra
 entry:
-; ALL-LABEL: urem_i64:
-
-  ; GP32:         lw      $25, %call16(__umoddi3)($gp)
-
-  ; GP64-NOT-R6:  ddivu   $zero, $4, $5
-  ; GP64-NOT-R6:  teq     $5, $zero, 7
-  ; GP64-NOT-R6:  mfhi    $2
-
-  ; 64R6:         dmodu   $2, $4, $5
-  ; 64R6:         teq     $5, $zero, 7
-
-  ; MM32:         lw      $25, %call16(__umoddi3)($2)
-
   %r = urem i64 %a, %b
   ret i64 %r
 }
 
 define signext i128 @urem_i128(i128 signext %a, i128 signext %b) {
+; GP32-LABEL: urem_i128:
+; GP32:       # %bb.0: # %entry
+; GP32-NEXT:    lui $2, %hi(_gp_disp)
+; GP32-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; GP32-NEXT:    addiu $sp, $sp, -40
+; GP32-NEXT:    .cfi_def_cfa_offset 40
+; GP32-NEXT:    sw $ra, 36($sp) # 4-byte Folded Spill
+; GP32-NEXT:    .cfi_offset 31, -4
+; GP32-NEXT:    addu $gp, $2, $25
+; GP32-NEXT:    lw $1, 60($sp)
+; GP32-NEXT:    lw $2, 64($sp)
+; GP32-NEXT:    lw $3, 68($sp)
+; GP32-NEXT:    sw $3, 28($sp)
+; GP32-NEXT:    sw $2, 24($sp)
+; GP32-NEXT:    sw $1, 20($sp)
+; GP32-NEXT:    lw $1, 56($sp)
+; GP32-NEXT:    sw $1, 16($sp)
+; GP32-NEXT:    lw $25, %call16(__umodti3)($gp)
+; GP32-NEXT:    jalr $25
+; GP32-NEXT:    nop
+; GP32-NEXT:    lw $ra, 36($sp) # 4-byte Folded Reload
+; GP32-NEXT:    jr $ra
+; GP32-NEXT:    addiu $sp, $sp, 40
+;
+; GP32R6-LABEL: urem_i128:
+; GP32R6:       # %bb.0: # %entry
+; GP32R6-NEXT:    lui $2, %hi(_gp_disp)
+; GP32R6-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; GP32R6-NEXT:    addiu $sp, $sp, -40
+; GP32R6-NEXT:    .cfi_def_cfa_offset 40
+; GP32R6-NEXT:    sw $ra, 36($sp) # 4-byte Folded Spill
+; GP32R6-NEXT:    .cfi_offset 31, -4
+; GP32R6-NEXT:    addu $gp, $2, $25
+; GP32R6-NEXT:    lw $1, 60($sp)
+; GP32R6-NEXT:    lw $2, 64($sp)
+; GP32R6-NEXT:    lw $3, 68($sp)
+; GP32R6-NEXT:    sw $3, 28($sp)
+; GP32R6-NEXT:    sw $2, 24($sp)
+; GP32R6-NEXT:    sw $1, 20($sp)
+; GP32R6-NEXT:    lw $1, 56($sp)
+; GP32R6-NEXT:    sw $1, 16($sp)
+; GP32R6-NEXT:    lw $25, %call16(__umodti3)($gp)
+; GP32R6-NEXT:    jalrc $25
+; GP32R6-NEXT:    lw $ra, 36($sp) # 4-byte Folded Reload
+; GP32R6-NEXT:    jr $ra
+; GP32R6-NEXT:    addiu $sp, $sp, 40
+;
+; GP64-LABEL: urem_i128:
+; GP64:       # %bb.0: # %entry
+; GP64-NEXT:    daddiu $sp, $sp, -16
+; GP64-NEXT:    .cfi_def_cfa_offset 16
+; GP64-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
+; GP64-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
+; GP64-NEXT:    .cfi_offset 31, -8
+; GP64-NEXT:    .cfi_offset 28, -16
+; GP64-NEXT:    lui $1, %hi(%neg(%gp_rel(urem_i128)))
+; GP64-NEXT:    daddu $1, $1, $25
+; GP64-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(urem_i128)))
+; GP64-NEXT:    ld $25, %call16(__umodti3)($gp)
+; GP64-NEXT:    jalr $25
+; GP64-NEXT:    nop
+; GP64-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
+; GP64-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
+; GP64-NEXT:    jr $ra
+; GP64-NEXT:    daddiu $sp, $sp, 16
+;
+; GP64R6-LABEL: urem_i128:
+; GP64R6:       # %bb.0: # %entry
+; GP64R6-NEXT:    daddiu $sp, $sp, -16
+; GP64R6-NEXT:    .cfi_def_cfa_offset 16
+; GP64R6-NEXT:    sd $ra, 8($sp) # 8-byte Folded Spill
+; GP64R6-NEXT:    sd $gp, 0($sp) # 8-byte Folded Spill
+; GP64R6-NEXT:    .cfi_offset 31, -8
+; GP64R6-NEXT:    .cfi_offset 28, -16
+; GP64R6-NEXT:    lui $1, %hi(%neg(%gp_rel(urem_i128)))
+; GP64R6-NEXT:    daddu $1, $1, $25
+; GP64R6-NEXT:    daddiu $gp, $1, %lo(%neg(%gp_rel(urem_i128)))
+; GP64R6-NEXT:    ld $25, %call16(__umodti3)($gp)
+; GP64R6-NEXT:    jalrc $25
+; GP64R6-NEXT:    ld $gp, 0($sp) # 8-byte Folded Reload
+; GP64R6-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
+; GP64R6-NEXT:    jr $ra
+; GP64R6-NEXT:    daddiu $sp, $sp, 16
+;
+; MMR3-LABEL: urem_i128:
+; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    lui $2, %hi(_gp_disp)
+; MMR3-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; MMR3-NEXT:    addiusp -48
+; MMR3-NEXT:    .cfi_def_cfa_offset 48
+; MMR3-NEXT:    sw $ra, 44($sp) # 4-byte Folded Spill
+; MMR3-NEXT:    swp $16, 36($sp)
+; MMR3-NEXT:    .cfi_offset 31, -4
+; MMR3-NEXT:    .cfi_offset 17, -8
+; MMR3-NEXT:    .cfi_offset 16, -12
+; MMR3-NEXT:    addu $16, $2, $25
+; MMR3-NEXT:    move $1, $7
+; MMR3-NEXT:    lw $7, 68($sp)
+; MMR3-NEXT:    lw $17, 72($sp)
+; MMR3-NEXT:    lw $3, 76($sp)
+; MMR3-NEXT:    move $2, $sp
+; MMR3-NEXT:    sw16 $3, 28($2)
+; MMR3-NEXT:    sw16 $17, 24($2)
+; MMR3-NEXT:    sw16 $7, 20($2)
+; MMR3-NEXT:    lw $3, 64($sp)
+; MMR3-NEXT:    sw16 $3, 16($2)
+; MMR3-NEXT:    lw $25, %call16(__umodti3)($16)
+; MMR3-NEXT:    move $7, $1
+; MMR3-NEXT:    move $gp, $16
+; MMR3-NEXT:    jalr $25
+; MMR3-NEXT:    nop
+; MMR3-NEXT:    lwp $16, 36($sp)
+; MMR3-NEXT:    lw $ra, 44($sp) # 4-byte Folded Reload
+; MMR3-NEXT:    addiusp 48
+; MMR3-NEXT:    jrc $ra
+;
+; MMR6-LABEL: urem_i128:
+; MMR6:       # %bb.0: # %entry
+; MMR6-NEXT:    lui $2, %hi(_gp_disp)
+; MMR6-NEXT:    addiu $2, $2, %lo(_gp_disp)
+; MMR6-NEXT:    addiu $sp, $sp, -48
+; MMR6-NEXT:    .cfi_def_cfa_offset 48
+; MMR6-NEXT:    sw $ra, 44($sp) # 4-byte Folded Spill
+; MMR6-NEXT:    sw $17, 40($sp) # 4-byte Folded Spill
+; MMR6-NEXT:    sw $16, 36($sp) # 4-byte Folded Spill
+; MMR6-NEXT:    .cfi_offset 31, -4
+; MMR6-NEXT:    .cfi_offset 17, -8
+; MMR6-NEXT:    .cfi_offset 16, -12
+; MMR6-NEXT:    addu $16, $2, $25
+; MMR6-NEXT:    move $1, $7
+; MMR6-NEXT:    lw $7, 68($sp)
+; MMR6-NEXT:    lw $17, 72($sp)
+; MMR6-NEXT:    lw $3, 76($sp)
+; MMR6-NEXT:    move $2, $sp
+; MMR6-NEXT:    sw16 $3, 28($2)
+; MMR6-NEXT:    sw16 $17, 24($2)
+; MMR6-NEXT:    sw16 $7, 20($2)
+; MMR6-NEXT:    lw $3, 64($sp)
+; MMR6-NEXT:    sw16 $3, 16($2)
+; MMR6-NEXT:    lw $25, %call16(__umodti3)($16)
+; MMR6-NEXT:    move $7, $1
+; MMR6-NEXT:    move $gp, $16
+; MMR6-NEXT:    jalr $25
+; MMR6-NEXT:    lw $16, 36($sp) # 4-byte Folded Reload
+; MMR6-NEXT:    lw $17, 40($sp) # 4-byte Folded Reload
+; MMR6-NEXT:    lw $ra, 44($sp) # 4-byte Folded Reload
+; MMR6-NEXT:    addiu $sp, $sp, 48
+; MMR6-NEXT:    jrc $ra
 entry:
-  ; ALL-LABEL: urem_i128:
-
-  ; GP32:         lw      $25, %call16(__umodti3)($gp)
-
-  ; GP64-NOT-R6:  ld      $25, %call16(__umodti3)($gp)
-  ; 64R6:         ld      $25, %call16(__umodti3)($gp)
-
-  ; MM32:         lw      $25, %call16(__umodti3)($16)
-
-    %r = urem i128 %a, %b
-    ret i128 %r
+  %r = urem i128 %a, %b
+  ret i128 %r
 }




More information about the llvm-commits mailing list