[llvm] [ARM] Add mayRaiseFPException to appropriate instructions and mark all instructions that read/write fpscr rounding bits as doing so (PR #160698)

Erik Enikeev via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 2 08:24:08 PDT 2025


https://github.com/Varnike updated https://github.com/llvm/llvm-project/pull/160698

>From 804240615dd1c695236104ed7fee0303bcf174d7 Mon Sep 17 00:00:00 2001
From: Erik Enikeev <evonatarius at gmail.com>
Date: Wed, 24 Sep 2025 14:11:20 +0300
Subject: [PATCH 1/3] [ARM] Add mayRaiseFPException to appropriate instructions
 and mark all instructions that read/write fpscr rounding bits as doing so

Added new register FPSCR_RM to correctly model interactions with rounding
mode control bits of fpscr and avoid performance degradation for
normal (non-strictfp) case.
---
 llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp   |   1 +
 llvm/lib/Target/ARM/ARMInstrVFP.td            |  90 ++--
 llvm/lib/Target/ARM/ARMRegisterInfo.td        |   8 +-
 .../arm-instruction-select-combos.mir         |  16 +-
 .../test/CodeGen/ARM/GlobalISel/select-fp.mir | 388 ++++++++++--------
 .../CodeGen/ARM/GlobalISel/select-pr35926.mir |   2 +-
 llvm/test/CodeGen/ARM/bf16_fast_math.ll       |  18 +-
 .../CodeGen/ARM/cmse-vlldm-no-reorder.mir     |   4 +-
 llvm/test/CodeGen/ARM/cortex-m7-wideops.mir   |  17 +-
 llvm/test/CodeGen/ARM/fp16-litpool-arm.mir    |   2 +-
 llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir  |   2 +-
 llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir   |   2 +-
 llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir   |   2 +-
 llvm/test/CodeGen/ARM/fp16_fast_math.ll       |  86 ++--
 llvm/test/CodeGen/ARM/ipra-reg-usage.ll       |   2 +-
 ...ched-prevent-erase-history-of-subunits.mir |   4 +-
 llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir    |   4 +-
 .../Thumb2/LowOverheadLoops/emptyblock.mir    |  68 +--
 .../Thumb2/LowOverheadLoops/it-block-mov.mir  |  16 +-
 .../lstp-insertion-position.mir               |  12 +-
 .../LowOverheadLoops/mov-after-dlstp.mir      |   8 +-
 .../CodeGen/Thumb2/pipeliner-inlineasm.mir    |  16 +-
 llvm/test/CodeGen/Thumb2/scavenge-lr.mir      |  16 +-
 .../test/CodeGen/Thumb2/swp-exitbranchdir.mir |  16 +-
 llvm/test/CodeGen/Thumb2/swp-fixedii-le.mir   |  12 +-
 llvm/test/CodeGen/Thumb2/swp-fixedii.mir      |  16 +-
 llvm/test/CodeGen/Thumb2/swp-regpressure.mir  | 160 ++++----
 27 files changed, 528 insertions(+), 460 deletions(-)

diff --git a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index e94220af05a0d..e2404397cc8c2 100644
--- a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -232,6 +232,7 @@ getReservedRegs(const MachineFunction &MF) const {
   markSuperRegs(Reserved, ARM::SP);
   markSuperRegs(Reserved, ARM::PC);
   markSuperRegs(Reserved, ARM::FPSCR);
+  markSuperRegs(Reserved, ARM::FPSCR_RM);
   markSuperRegs(Reserved, ARM::APSR_NZCV);
   if (TFI->isFPReserved(MF))
     markSuperRegs(Reserved, STI.getFramePointerReg());
diff --git a/llvm/lib/Target/ARM/ARMInstrVFP.td b/llvm/lib/Target/ARM/ARMInstrVFP.td
index 31650e0137beb..bc51e99412422 100644
--- a/llvm/lib/Target/ARM/ARMInstrVFP.td
+++ b/llvm/lib/Target/ARM/ARMInstrVFP.td
@@ -338,7 +338,7 @@ def : MnemonicAlias<"vstm", "vstmia">;
 
 def VLLDM : AXSI4FR<"vlldm${p}\t$Rn, $regs", 0, 1>,
             Requires<[HasV8MMainline, Has8MSecExt]> {
-    let Defs = [VPR, FPSCR, FPSCR_NZCV, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15];
+    let Defs = [VPR, FPSCR, FPSCR_NZCV, FPSCR_RM, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15];
     let DecoderMethod = "DecodeLazyLoadStoreMul";
 }
 // T1: assembly does not contains the register list.
@@ -348,7 +348,7 @@ def : InstAlias<"vlldm${p}\t$Rn", (VLLDM GPRnopc:$Rn, pred:$p, 0)>,
 // The register list has no effect on the encoding, it is for assembly/disassembly purposes only.
 def VLLDM_T2 : AXSI4FR<"vlldm${p}\t$Rn, $regs", 1, 1>,
             Requires<[HasV8_1MMainline, Has8MSecExt]> {
-    let Defs = [VPR, FPSCR, FPSCR_NZCV, D0,  D1,  D2,  D3,  D4,  D5,  D6,  D7,  D8,  D9,  D10, D11, D12, D13, D14, D15,
+    let Defs = [VPR, FPSCR, FPSCR_NZCV, FPSCR_RM, D0,  D1,  D2,  D3,  D4,  D5,  D6,  D7,  D8,  D9,  D10, D11, D12, D13, D14, D15,
                                         D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26, D27, D28, D29, D30, D31];
     let DecoderMethod = "DecodeLazyLoadStoreMul";
 }
@@ -356,8 +356,8 @@ def VLLDM_T2 : AXSI4FR<"vlldm${p}\t$Rn, $regs", 1, 1>,
 // The register list has no effect on the encoding, it is for assembly/disassembly purposes only.
 def VLSTM : AXSI4FR<"vlstm${p}\t$Rn, $regs", 0, 0>,
             Requires<[HasV8MMainline, Has8MSecExt]> {
-    let Defs = [VPR, FPSCR, FPSCR_NZCV];
-    let Uses = [VPR, FPSCR, FPSCR_NZCV, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15];
+    let Defs = [VPR, FPSCR, FPSCR_NZCV, FPSCR_RM];
+    let Uses = [VPR, FPSCR, FPSCR_NZCV, FPSCR_RM, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15];
     let DecoderMethod = "DecodeLazyLoadStoreMul";
 }
 // T1: assembly does not contain the register list.
@@ -367,8 +367,8 @@ def : InstAlias<"vlstm${p}\t$Rn", (VLSTM GPRnopc:$Rn, pred:$p, 0)>,
 // The register list has no effect on the encoding, it is for assembly/disassembly purposes only.
 def VLSTM_T2 : AXSI4FR<"vlstm${p}\t$Rn, $regs", 1, 0>,
             Requires<[HasV8_1MMainline, Has8MSecExt]> {
-    let Defs = [VPR, FPSCR, FPSCR_NZCV];
-    let Uses = [VPR, FPSCR, FPSCR_NZCV, D0,  D1,  D2,  D3,  D4,  D5,  D6,  D7,  D8,  D9,  D10, D11, D12, D13, D14, D15,
+    let Defs = [VPR, FPSCR, FPSCR_NZCV, FPSCR_RM];
+    let Uses = [VPR, FPSCR, FPSCR_NZCV, FPSCR_RM, D0,  D1,  D2,  D3,  D4,  D5,  D6,  D7,  D8,  D9,  D10, D11, D12, D13, D14, D15,
                                         D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26, D27, D28, D29, D30, D31];
     let DecoderMethod = "DecodeLazyLoadStoreMul";
 }
@@ -435,14 +435,14 @@ def : VFP2MnemonicAlias<"fstmfdx", "fstmdbx">;
 // FP Binary Operations.
 //
 
-let TwoOperandAliasConstraint = "$Dn = $Dd" in
+let TwoOperandAliasConstraint = "$Dn = $Dd", mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VADDD  : ADbI<0b11100, 0b11, 0, 0,
                   (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
                   IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
                   [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>,
              Sched<[WriteFPALU64]>;
 
-let TwoOperandAliasConstraint = "$Sn = $Sd" in
+let TwoOperandAliasConstraint = "$Sn = $Sd", mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VADDS  : ASbIn<0b11100, 0b11, 0, 0,
                    (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
                    IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
@@ -453,21 +453,21 @@ def VADDS  : ASbIn<0b11100, 0b11, 0, 0,
   let D = VFPNeonA8Domain;
 }
 
-let TwoOperandAliasConstraint = "$Sn = $Sd" in
+let TwoOperandAliasConstraint = "$Sn = $Sd", mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VADDH  : AHbI<0b11100, 0b11, 0, 0,
                   (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
                   IIC_fpALU16, "vadd", ".f16\t$Sd, $Sn, $Sm",
                   [(set (f16 HPR:$Sd), (fadd (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
              Sched<[WriteFPALU32]>;
 
-let TwoOperandAliasConstraint = "$Dn = $Dd" in
+let TwoOperandAliasConstraint = "$Dn = $Dd", mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VSUBD  : ADbI<0b11100, 0b11, 1, 0,
                   (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
                   IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm",
                   [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>,
              Sched<[WriteFPALU64]>;
 
-let TwoOperandAliasConstraint = "$Sn = $Sd" in
+let TwoOperandAliasConstraint = "$Sn = $Sd", mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VSUBS  : ASbIn<0b11100, 0b11, 1, 0,
                    (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
                    IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm",
@@ -478,42 +478,42 @@ def VSUBS  : ASbIn<0b11100, 0b11, 1, 0,
   let D = VFPNeonA8Domain;
 }
 
-let TwoOperandAliasConstraint = "$Sn = $Sd" in
+let TwoOperandAliasConstraint = "$Sn = $Sd", mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VSUBH  : AHbI<0b11100, 0b11, 1, 0,
                   (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
                   IIC_fpALU16, "vsub", ".f16\t$Sd, $Sn, $Sm",
                   [(set (f16 HPR:$Sd), (fsub (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
             Sched<[WriteFPALU32]>;
 
-let TwoOperandAliasConstraint = "$Dn = $Dd" in
+let TwoOperandAliasConstraint = "$Dn = $Dd", mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VDIVD  : ADbI<0b11101, 0b00, 0, 0,
                   (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
                   IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm",
                   [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>,
              Sched<[WriteFPDIV64]>;
 
-let TwoOperandAliasConstraint = "$Sn = $Sd" in
+let TwoOperandAliasConstraint = "$Sn = $Sd", mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VDIVS  : ASbI<0b11101, 0b00, 0, 0,
                   (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
                   IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm",
                   [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>,
              Sched<[WriteFPDIV32]>;
 
-let TwoOperandAliasConstraint = "$Sn = $Sd" in
+let TwoOperandAliasConstraint = "$Sn = $Sd", mayRaiseFPException = 1, Uses = [FPSCR_RM]  in
 def VDIVH  : AHbI<0b11101, 0b00, 0, 0,
                   (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
                   IIC_fpDIV16, "vdiv", ".f16\t$Sd, $Sn, $Sm",
                   [(set (f16 HPR:$Sd), (fdiv (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
              Sched<[WriteFPDIV32]>;
 
-let TwoOperandAliasConstraint = "$Dn = $Dd" in
+let TwoOperandAliasConstraint = "$Dn = $Dd", mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VMULD  : ADbI<0b11100, 0b10, 0, 0,
                   (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
                   IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm",
                   [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>,
              Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>;
 
-let TwoOperandAliasConstraint = "$Sn = $Sd" in
+let TwoOperandAliasConstraint = "$Sn = $Sd", mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VMULS  : ASbIn<0b11100, 0b10, 0, 0,
                    (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
                    IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm",
@@ -524,21 +524,21 @@ def VMULS  : ASbIn<0b11100, 0b10, 0, 0,
   let D = VFPNeonA8Domain;
 }
 
-let TwoOperandAliasConstraint = "$Sn = $Sd" in
+let TwoOperandAliasConstraint = "$Sn = $Sd", mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VMULH  : AHbI<0b11100, 0b10, 0, 0,
                   (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
                   IIC_fpMUL16, "vmul", ".f16\t$Sd, $Sn, $Sm",
                   [(set (f16 HPR:$Sd), (fmul (f16 HPR:$Sn), (f16 HPR:$Sm)))]>,
              Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>;
 
-let TwoOperandAliasConstraint = "$Dn = $Dd" in
+let TwoOperandAliasConstraint = "$Dn = $Dd", mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VNMULD : ADbI<0b11100, 0b10, 1, 0,
                   (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
                   IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm",
                   [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>,
              Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>;
 
-let TwoOperandAliasConstraint = "$Sn = $Sd" in
+let TwoOperandAliasConstraint = "$Sn = $Sd", mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VNMULS : ASbI<0b11100, 0b10, 1, 0,
                   (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
                   IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm",
@@ -549,7 +549,7 @@ def VNMULS : ASbI<0b11100, 0b10, 1, 0,
   let D = VFPNeonA8Domain;
 }
 
-let TwoOperandAliasConstraint = "$Sn = $Sd" in
+let TwoOperandAliasConstraint = "$Sn = $Sd", mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VNMULH : AHbI<0b11100, 0b10, 1, 0,
                   (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
                   IIC_fpMUL16, "vnmul", ".f16\t$Sd, $Sn, $Sm",
@@ -621,7 +621,7 @@ def : Pat<(fmul (fneg SPR:$a), SPR:$b),
           (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
 
 // These are encoded as unary instructions.
-let Defs = [FPSCR_NZCV] in {
+let Defs = [FPSCR_NZCV], mayRaiseFPException = 1, Uses = [FPSCR_RM] in {
 def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0,
                   (outs), (ins DPR:$Dd, DPR:$Dm),
                   IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm", "",
@@ -684,7 +684,7 @@ def VABSH  : AHuI<0b11101, 0b11, 0b0000, 0b11, 0,
                    IIC_fpUNA16, "vabs", ".f16\t$Sd, $Sm",
                    [(set (f16 HPR:$Sd), (fabs (f16 HPR:$Sm)))]>;
 
-let Defs = [FPSCR_NZCV] in {
+let Defs = [FPSCR_NZCV], mayRaiseFPException = 1, Uses = [FPSCR_RM] in {
 def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0,
                    (outs), (ins DPR:$Dd),
                    IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0", "",
@@ -742,6 +742,7 @@ def VCMPZH  : AHuI<0b11101, 0b11, 0b0101, 0b01, 0,
 }
 } // Defs = [FPSCR_NZCV]
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VCVTDS  : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
                    (outs DPR:$Dd), (ins SPR:$Sm),
                    IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm", "",
@@ -762,6 +763,7 @@ def VCVTDS  : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
 }
 
 // Special case encoding: bits 11-8 is 0b1011.
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VCVTSD  : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm,
                     IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm", "",
                     [(set SPR:$Sd, (fpround DPR:$Dm))]>,
@@ -787,7 +789,7 @@ def VCVTSD  : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm,
 }
 
 // Between half, single and double-precision.
-let hasSideEffects = 0 in
+let hasSideEffects = 0, mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VCVTBHS: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
                  /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$Sd, $Sm", "",
                  [/* Intentionally left blank, see patterns below */]>,
@@ -799,7 +801,7 @@ def : FP16Pat<(f32 (fpextend (f16 HPR:$Sm))),
 def : FP16Pat<(f16_to_fp GPR:$a),
               (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
 
-let hasSideEffects = 0 in
+let hasSideEffects = 0, mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VCVTBSH: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sda, SPR:$Sm),
                  /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$Sd, $Sm", "$Sd = $Sda",
                  [/* Intentionally left blank, see patterns below */]>,
@@ -821,7 +823,7 @@ def : FP16Pat<(insertelt (v4f16 DPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm
                                              SPR:$src2),
                                     (SSubReg_f16_reg imm:$lane)))>;
 
-let hasSideEffects = 0 in
+let hasSideEffects = 0, mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VCVTTHS: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
                  /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$Sd, $Sm", "",
                  [/* Intentionally left blank, see patterns below */]>,
@@ -835,7 +837,7 @@ def : FP16Pat<(f32 (fpextend (extractelt (v4f16 DPR:$src), imm_odd:$lane))),
                 (v2f32 (COPY_TO_REGCLASS (v4f16 DPR:$src), DPR_VFP2)),
                 (SSubReg_f16_reg imm_odd:$lane)))>;
 
-let hasSideEffects = 0 in
+let hasSideEffects = 0, mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VCVTTSH: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sda, SPR:$Sm),
                  /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$Sd, $Sm", "$Sd = $Sda",
                  [/* Intentionally left blank, see patterns below */]>,
@@ -853,6 +855,7 @@ def : FP16Pat<(insertelt (v4f16 DPR:$src1), (f16 (fpround (f32 SPR:$src2))), imm
                                              SPR:$src2),
                                     (SSubReg_f16_reg imm:$lane)))>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in 
 def VCVTBHD : ADuI<0b11101, 0b11, 0b0010, 0b01, 0,
                    (outs DPR:$Dd), (ins SPR:$Sm),
                    NoItinerary, "vcvtb", ".f64.f16\t$Dd, $Sm", "",
@@ -876,6 +879,7 @@ def : FP16Pat<(f64 (f16_to_fp GPR:$a)),
               (VCVTBHD (COPY_TO_REGCLASS GPR:$a, SPR))>,
               Requires<[HasFPARMv8, HasDPVFP]>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VCVTBDH : ADuI<0b11101, 0b11, 0b0011, 0b01, 0,
                    (outs SPR:$Sd), (ins SPR:$Sda, DPR:$Dm),
                    NoItinerary, "vcvtb", ".f16.f64\t$Sd, $Dm", "$Sd = $Sda",
@@ -901,6 +905,7 @@ def : FP16Pat<(fp_to_f16 (f64 DPR:$a)),
               (i32 (COPY_TO_REGCLASS (VCVTBDH (IMPLICIT_DEF), DPR:$a), GPR))>,
                    Requires<[HasFPARMv8, HasDPVFP]>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VCVTTHD : ADuI<0b11101, 0b11, 0b0010, 0b11, 0,
                    (outs DPR:$Dd), (ins SPR:$Sm),
                    NoItinerary, "vcvtt", ".f64.f16\t$Dd, $Sm", "",
@@ -915,6 +920,7 @@ def VCVTTHD : ADuI<0b11101, 0b11, 0b0010, 0b11, 0,
   let hasSideEffects = 0;
 }
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VCVTTDH : ADuI<0b11101, 0b11, 0b0011, 0b11, 0,
                    (outs SPR:$Sd), (ins SPR:$Sda, DPR:$Dm),
                    NoItinerary, "vcvtt", ".f16.f64\t$Sd, $Dm", "$Sd = $Sda",
@@ -1140,18 +1146,21 @@ defm VRINTN : vrint_inst_anpm<"n", 0b01, froundeven>;
 defm VRINTP : vrint_inst_anpm<"p", 0b10, fceil>;
 defm VRINTM : vrint_inst_anpm<"m", 0b11, ffloor>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0,
                   (outs DPR:$Dd), (ins DPR:$Dm),
                   IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm", "",
                   [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>,
              Sched<[WriteFPSQRT64]>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0,
                   (outs SPR:$Sd), (ins SPR:$Sm),
                   IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm", "",
                   [(set SPR:$Sd, (fsqrt SPR:$Sm))]>,
              Sched<[WriteFPSQRT32]>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VSQRTH : AHuI<0b11101, 0b11, 0b0001, 0b11, 0,
                   (outs HPR:$Sd), (ins HPR:$Sm),
                   IIC_fpSQRT16, "vsqrt", ".f16\t$Sd, $Sm",
@@ -1757,7 +1766,7 @@ def : VFPPat<(i32 (fp_to_uint_sat (f16 HPR:$a), i32)),
              (COPY_TO_REGCLASS (VTOUIZH (f16 HPR:$a)), GPR)>;
 
 // And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
-let Uses = [FPSCR] in {
+let Uses = [FPSCR_RM] in {
 def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
                                 (outs SPR:$Sd), (ins DPR:$Dm),
                                 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm",
@@ -2029,6 +2038,7 @@ def VULTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 1,
 } // End of 'let Constraints = "$a = $dst" in'
 
 // BFloat16  - Single precision, unary, predicated
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 class BF16_VCVT<string opc, bits<2> op7_6>
    : VFPAI<(outs SPR:$Sd), (ins SPR:$dst, SPR:$Sm),
            VFPUnaryFrm, NoItinerary,
@@ -2063,6 +2073,7 @@ def BF16_VCVTT : BF16_VCVT<"vcvtt", 0b11>;
 // FP Multiply-Accumulate Operations.
 //
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VMLAD : ADbI<0b11100, 0b00, 0, 0,
                  (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
                  IIC_fpMAC64, "vmla", ".f64\t$Dd, $Dn, $Dm",
@@ -2072,6 +2083,7 @@ def VMLAD : ADbI<0b11100, 0b00, 0, 0,
               Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
               Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
                   (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
                   IIC_fpMAC32, "vmla", ".f32\t$Sd, $Sn, $Sm",
@@ -2085,6 +2097,7 @@ def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
   let D = VFPNeonA8Domain;
 }
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VMLAH : AHbI<0b11100, 0b00, 0, 0,
                   (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
                   IIC_fpMAC16, "vmla", ".f16\t$Sd, $Sn, $Sm",
@@ -2104,6 +2117,7 @@ def : Pat<(fadd_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)),
           Requires<[HasFullFP16,DontUseNEONForFP, UseFPVMLx]>;
 
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VMLSD : ADbI<0b11100, 0b00, 1, 0,
                  (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
                  IIC_fpMAC64, "vmls", ".f64\t$Dd, $Dn, $Dm",
@@ -2113,6 +2127,7 @@ def VMLSD : ADbI<0b11100, 0b00, 1, 0,
               Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
               Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
                   (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
                   IIC_fpMAC32, "vmls", ".f32\t$Sd, $Sn, $Sm",
@@ -2126,6 +2141,7 @@ def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
   let D = VFPNeonA8Domain;
 }
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VMLSH : AHbI<0b11100, 0b00, 1, 0,
                   (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
                   IIC_fpMAC16, "vmls", ".f16\t$Sd, $Sn, $Sm",
@@ -2144,6 +2160,7 @@ def : Pat<(fsub_mlx HPR:$dstin, (fmul_su (f16 HPR:$a), HPR:$b)),
           (VMLSH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
           Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
                   (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
                   IIC_fpMAC64, "vnmla", ".f64\t$Dd, $Dn, $Dm",
@@ -2153,6 +2170,7 @@ def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
                 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
                 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
                   (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
                   IIC_fpMAC32, "vnmla", ".f32\t$Sd, $Sn, $Sm",
@@ -2166,6 +2184,7 @@ def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
   let D = VFPNeonA8Domain;
 }
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VNMLAH : AHbI<0b11100, 0b01, 1, 0,
                   (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
                   IIC_fpMAC16, "vnmla", ".f16\t$Sd, $Sn, $Sm",
@@ -2196,6 +2215,7 @@ def : Pat<(fsub_mlx (fneg HPR:$dstin), (fmul_su (f16 HPR:$a), HPR:$b)),
           (VNMLAH HPR:$dstin, (f16 HPR:$a), HPR:$b)>,
           Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
                   (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
                   IIC_fpMAC64, "vnmls", ".f64\t$Dd, $Dn, $Dm",
@@ -2205,6 +2225,7 @@ def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
                Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
                Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
                   (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
                   IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm",
@@ -2217,6 +2238,7 @@ def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
   let D = VFPNeonA8Domain;
 }
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VNMLSH : AHbI<0b11100, 0b01, 0, 0,
                   (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
                   IIC_fpMAC16, "vnmls", ".f16\t$Sd, $Sn, $Sm",
@@ -2237,6 +2259,7 @@ def : Pat<(fsub_mlx (fmul_su (f16 HPR:$a), HPR:$b), HPR:$dstin),
 //===----------------------------------------------------------------------===//
 // Fused FP Multiply-Accumulate Operations.
 //
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VFMAD : ADbI<0b11101, 0b10, 0, 0,
                  (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
                  IIC_fpFMAC64, "vfma", ".f64\t$Dd, $Dn, $Dm",
@@ -2246,6 +2269,7 @@ def VFMAD : ADbI<0b11101, 0b10, 0, 0,
               Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
             Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VFMAS : ASbIn<0b11101, 0b10, 0, 0,
                   (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
                   IIC_fpFMAC32, "vfma", ".f32\t$Sd, $Sn, $Sm",
@@ -2258,6 +2282,7 @@ def VFMAS : ASbIn<0b11101, 0b10, 0, 0,
   // VFP pipelines.
 }
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VFMAH : AHbI<0b11101, 0b10, 0, 0,
                   (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
                   IIC_fpFMAC16, "vfma", ".f16\t$Sd, $Sn, $Sm",
@@ -2289,6 +2314,7 @@ def : Pat<(f16 (fma HPR:$Sn, HPR:$Sm, (f16 HPR:$Sdin))),
           (VFMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
       Requires<[HasFullFP16]>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VFMSD : ADbI<0b11101, 0b10, 1, 0,
                  (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
                  IIC_fpFMAC64, "vfms", ".f64\t$Dd, $Dn, $Dm",
@@ -2298,6 +2324,7 @@ def VFMSD : ADbI<0b11101, 0b10, 1, 0,
               Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
               Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VFMSS : ASbIn<0b11101, 0b10, 1, 0,
                   (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
                   IIC_fpFMAC32, "vfms", ".f32\t$Sd, $Sn, $Sm",
@@ -2310,6 +2337,7 @@ def VFMSS : ASbIn<0b11101, 0b10, 1, 0,
   // VFP pipelines.
 }
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VFMSH : AHbI<0b11101, 0b10, 1, 0,
                   (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
                   IIC_fpFMAC16, "vfms", ".f16\t$Sd, $Sn, $Sm",
@@ -2341,6 +2369,7 @@ def : Pat<(f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (f16 HPR:$Sdin))),
           (VFMSH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
       Requires<[HasFullFP16]>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VFNMAD : ADbI<0b11101, 0b01, 1, 0,
                   (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
                   IIC_fpFMAC64, "vfnma", ".f64\t$Dd, $Dn, $Dm",
@@ -2350,6 +2379,7 @@ def VFNMAD : ADbI<0b11101, 0b01, 1, 0,
                 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
                 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VFNMAS : ASbI<0b11101, 0b01, 1, 0,
                   (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
                   IIC_fpFMAC32, "vfnma", ".f32\t$Sd, $Sn, $Sm",
@@ -2362,6 +2392,7 @@ def VFNMAS : ASbI<0b11101, 0b01, 1, 0,
   // VFP pipelines.
 }
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VFNMAH : AHbI<0b11101, 0b01, 1, 0,
                   (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
                   IIC_fpFMAC16, "vfnma", ".f16\t$Sd, $Sn, $Sm",
@@ -2400,6 +2431,7 @@ def : Pat<(f16 (fma (fneg (f16 HPR:$Sn)), (f16 HPR:$Sm), (fneg (f16 HPR:$Sdin)))
           (VFNMAH (f16 HPR:$Sdin), (f16 HPR:$Sn), (f16 HPR:$Sm))>,
       Requires<[HasFullFP16]>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VFNMSD : ADbI<0b11101, 0b01, 0, 0,
                   (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
                   IIC_fpFMAC64, "vfnms", ".f64\t$Dd, $Dn, $Dm",
@@ -2409,6 +2441,7 @@ def VFNMSD : ADbI<0b11101, 0b01, 0, 0,
                Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
                Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VFNMSS : ASbI<0b11101, 0b01, 0, 0,
                   (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
                   IIC_fpFMAC32, "vfnms", ".f32\t$Sd, $Sn, $Sm",
@@ -2420,6 +2453,7 @@ def VFNMSS : ASbI<0b11101, 0b01, 0, 0,
   // VFP pipelines.
 }
 
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in
 def VFNMSH : AHbI<0b11101, 0b01, 0, 0,
                   (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
                   IIC_fpFMAC16, "vfnms", ".f16\t$Sd, $Sn, $Sm",
diff --git a/llvm/lib/Target/ARM/ARMRegisterInfo.td b/llvm/lib/Target/ARM/ARMRegisterInfo.td
index 5a31b88ba7f70..de4219577c78c 100644
--- a/llvm/lib/Target/ARM/ARMRegisterInfo.td
+++ b/llvm/lib/Target/ARM/ARMRegisterInfo.td
@@ -177,8 +177,9 @@ def Q15 : ARMReg<15, "q15", [D30, D31]>;
 }
 
 // Current Program Status Register.
-// We model fpscr with two registers: FPSCR models the control bits and will be
-// reserved. FPSCR_NZCV models the flag bits and will be unreserved. APSR_NZCV
+// We model fpscr with three registers. FPSCR models the control bits and will be
+// reserved. FPSCR_RM models rounding mode control bits and will be reserved.
+// FPSCR_NZCV models the flag bits and will be unreserved. APSR_NZCV
 // models the APSR when it's accessed by some special instructions. In such cases
 // it has the same encoding as PC.
 def CPSR       : ARMReg<0,  "cpsr">;
@@ -189,6 +190,9 @@ def FPSCR      : ARMReg<3,  "fpscr">;
 def FPSCR_NZCV : ARMReg<3,  "fpscr_nzcv"> {
   let Aliases = [FPSCR];
 }
+def FPSCR_RM : ARMReg<3,  "fpscr_rm"> {
+  let Aliases = [FPSCR];
+}
 def ITSTATE    : ARMReg<4, "itstate">;
 
 // Special Registers - only available in privileged mode.
diff --git a/llvm/test/CodeGen/ARM/GlobalISel/arm-instruction-select-combos.mir b/llvm/test/CodeGen/ARM/GlobalISel/arm-instruction-select-combos.mir
index 77eeb34ef18cf..4dd8af01f873d 100644
--- a/llvm/test/CodeGen/ARM/GlobalISel/arm-instruction-select-combos.mir
+++ b/llvm/test/CodeGen/ARM/GlobalISel/arm-instruction-select-combos.mir
@@ -447,7 +447,7 @@ body:             |
     ; CHECK-LABEL: name: test_vnmuls
     ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0
     ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1
-    ; CHECK: [[VNMULS:%[0-9]+]]:spr = VNMULS [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg
+    ; CHECK: [[VNMULS:%[0-9]+]]:spr = nofpexcept VNMULS [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
     ; CHECK: $s0 = COPY [[VNMULS]]
     ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s32) = COPY $s0
@@ -477,7 +477,7 @@ body:             |
     ; CHECK-LABEL: name: test_vnmuls_reassociate
     ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0
     ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1
-    ; CHECK: [[VNMULS:%[0-9]+]]:spr = VNMULS [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg
+    ; CHECK: [[VNMULS:%[0-9]+]]:spr = nofpexcept VNMULS [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
     ; CHECK: $s0 = COPY [[VNMULS]]
     ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s32) = COPY $s0
@@ -507,7 +507,7 @@ body:             |
     ; CHECK-LABEL: name: test_vnmuld
     ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY $d0
     ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY $d1
-    ; CHECK: [[VNMULD:%[0-9]+]]:dpr = VNMULD [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg
+    ; CHECK: [[VNMULD:%[0-9]+]]:dpr = nofpexcept VNMULD [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
     ; CHECK: $d0 = COPY [[VNMULD]]
     ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(s64) = COPY $d0
@@ -539,7 +539,7 @@ body:             |
     ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0
     ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1
     ; CHECK: [[COPY2:%[0-9]+]]:spr = COPY $s2
-    ; CHECK: [[VFNMAS:%[0-9]+]]:spr = VFNMAS [[COPY2]], [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg
+    ; CHECK: [[VFNMAS:%[0-9]+]]:spr = nofpexcept VFNMAS [[COPY2]], [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
     ; CHECK: $s0 = COPY [[VFNMAS]]
     ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s32) = COPY $s0
@@ -573,7 +573,7 @@ body:             |
     ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY $d0
     ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY $d1
     ; CHECK: [[COPY2:%[0-9]+]]:dpr = COPY $d2
-    ; CHECK: [[VFNMAD:%[0-9]+]]:dpr = VFNMAD [[COPY2]], [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg
+    ; CHECK: [[VFNMAD:%[0-9]+]]:dpr = nofpexcept VFNMAD [[COPY2]], [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
     ; CHECK: $d0 = COPY [[VFNMAD]]
     ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(s64) = COPY $d0
@@ -607,7 +607,7 @@ body:             |
     ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0
     ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1
     ; CHECK: [[COPY2:%[0-9]+]]:spr = COPY $s2
-    ; CHECK: [[VFMSS:%[0-9]+]]:spr = VFMSS [[COPY2]], [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg
+    ; CHECK: [[VFMSS:%[0-9]+]]:spr = nofpexcept VFMSS [[COPY2]], [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
     ; CHECK: $s0 = COPY [[VFMSS]]
     ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s32) = COPY $s0
@@ -640,7 +640,7 @@ body:             |
     ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY $d0
     ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY $d1
     ; CHECK: [[COPY2:%[0-9]+]]:dpr = COPY $d2
-    ; CHECK: [[VFMSD:%[0-9]+]]:dpr = VFMSD [[COPY2]], [[COPY1]], [[COPY]], 14 /* CC::al */, $noreg
+    ; CHECK: [[VFMSD:%[0-9]+]]:dpr = nofpexcept VFMSD [[COPY2]], [[COPY1]], [[COPY]], 14 /* CC::al */, $noreg, implicit $fpscr
     ; CHECK: $d0 = COPY [[VFMSD]]
     ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(s64) = COPY $d0
@@ -673,7 +673,7 @@ body:             |
     ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0
     ; CHECK: [[COPY1:%[0-9]+]]:spr = COPY $s1
     ; CHECK: [[COPY2:%[0-9]+]]:spr = COPY $s2
-    ; CHECK: [[VFNMSS:%[0-9]+]]:spr = VFNMSS [[COPY2]], [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg
+    ; CHECK: [[VFNMSS:%[0-9]+]]:spr = nofpexcept VFNMSS [[COPY2]], [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
     ; CHECK: $s0 = COPY [[VFNMSS]]
     ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s32) = COPY $s0
diff --git a/llvm/test/CodeGen/ARM/GlobalISel/select-fp.mir b/llvm/test/CodeGen/ARM/GlobalISel/select-fp.mir
index ec834f1233ace..07a683842b59f 100644
--- a/llvm/test/CodeGen/ARM/GlobalISel/select-fp.mir
+++ b/llvm/test/CodeGen/ARM/GlobalISel/select-fp.mir
@@ -1,3 +1,4 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
 # RUN: llc -O0 -mtriple arm-- -mattr=+vfp4,-neonfp -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
 # RUN: llc -O0 -mtriple thumb-- -mattr=+v6t2,+vfp4,-neonfp -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
 --- |
@@ -76,11 +77,9 @@ body:             |
 ...
 ---
 name:            test_fadd_s32
-# CHECK-LABEL: name: test_fadd_s32
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: fprb }
   - { id: 1, class: fprb }
@@ -89,28 +88,29 @@ body:             |
   bb.0:
     liveins: $s0, $s1
 
+    ; CHECK-LABEL: name: test_fadd_s32
+    ; CHECK: liveins: $s0, $s1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:spr = COPY $s0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:spr = COPY $s1
+    ; CHECK-NEXT: [[VADDS:%[0-9]+]]:spr = nofpexcept VADDS [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: $s0 = COPY [[VADDS]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s32) = COPY $s0
-    ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY $s0
 
     %1(s32) = COPY $s1
-    ; CHECK: [[VREGY:%[0-9]+]]:spr = COPY $s1
 
     %2(s32) = G_FADD %0, %1
-    ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VADDS [[VREGX]], [[VREGY]], 14 /* CC::al */, $noreg
 
     $s0 = COPY %2(s32)
-    ; CHECK: $s0 = COPY [[VREGSUM]]
 
     BX_RET 14, $noreg, implicit $s0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $s0
 ...
 ---
 name:            test_fadd_s64
-# CHECK-LABEL: name: test_fadd_s64
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: fprb }
   - { id: 1, class: fprb }
@@ -119,28 +119,29 @@ body:             |
   bb.0:
     liveins: $d0, $d1
 
+    ; CHECK-LABEL: name: test_fadd_s64
+    ; CHECK: liveins: $d0, $d1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:dpr = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:dpr = COPY $d1
+    ; CHECK-NEXT: [[VADDD:%[0-9]+]]:dpr = nofpexcept VADDD [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: $d0 = COPY [[VADDD]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(s64) = COPY $d0
-    ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY $d0
 
     %1(s64) = COPY $d1
-    ; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY $d1
 
     %2(s64) = G_FADD %0, %1
-    ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VADDD [[VREGX]], [[VREGY]], 14 /* CC::al */, $noreg
 
     $d0 = COPY %2(s64)
-    ; CHECK: $d0 = COPY [[VREGSUM]]
 
     BX_RET 14, $noreg, implicit $d0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $d0
 ...
 ---
 name:            test_fsub_s32
-# CHECK-LABEL: name: test_fsub_s32
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: fprb }
   - { id: 1, class: fprb }
@@ -149,28 +150,29 @@ body:             |
   bb.0:
     liveins: $s0, $s1
 
+    ; CHECK-LABEL: name: test_fsub_s32
+    ; CHECK: liveins: $s0, $s1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:spr = COPY $s0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:spr = COPY $s1
+    ; CHECK-NEXT: [[VSUBS:%[0-9]+]]:spr = nofpexcept VSUBS [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: $s0 = COPY [[VSUBS]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s32) = COPY $s0
-    ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY $s0
 
     %1(s32) = COPY $s1
-    ; CHECK: [[VREGY:%[0-9]+]]:spr = COPY $s1
 
     %2(s32) = G_FSUB %0, %1
-    ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VSUBS [[VREGX]], [[VREGY]], 14 /* CC::al */, $noreg
 
     $s0 = COPY %2(s32)
-    ; CHECK: $s0 = COPY [[VREGSUM]]
 
     BX_RET 14, $noreg, implicit $s0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $s0
 ...
 ---
 name:            test_fsub_s64
-# CHECK-LABEL: name: test_fsub_s64
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: fprb }
   - { id: 1, class: fprb }
@@ -179,28 +181,29 @@ body:             |
   bb.0:
     liveins: $d0, $d1
 
+    ; CHECK-LABEL: name: test_fsub_s64
+    ; CHECK: liveins: $d0, $d1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:dpr = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:dpr = COPY $d1
+    ; CHECK-NEXT: [[VSUBD:%[0-9]+]]:dpr = nofpexcept VSUBD [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: $d0 = COPY [[VSUBD]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(s64) = COPY $d0
-    ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY $d0
 
     %1(s64) = COPY $d1
-    ; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY $d1
 
     %2(s64) = G_FSUB %0, %1
-    ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VSUBD [[VREGX]], [[VREGY]], 14 /* CC::al */, $noreg
 
     $d0 = COPY %2(s64)
-    ; CHECK: $d0 = COPY [[VREGSUM]]
 
     BX_RET 14, $noreg, implicit $d0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $d0
 ...
 ---
 name:            test_fmul_s32
-# CHECK-LABEL: name: test_fmul_s32
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: fprb }
   - { id: 1, class: fprb }
@@ -209,28 +212,29 @@ body:             |
   bb.0:
     liveins: $s0, $s1
 
+    ; CHECK-LABEL: name: test_fmul_s32
+    ; CHECK: liveins: $s0, $s1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:spr = COPY $s0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:spr = COPY $s1
+    ; CHECK-NEXT: [[VMULS:%[0-9]+]]:spr = nofpexcept VMULS [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: $s0 = COPY [[VMULS]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s32) = COPY $s0
-    ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY $s0
 
     %1(s32) = COPY $s1
-    ; CHECK: [[VREGY:%[0-9]+]]:spr = COPY $s1
 
     %2(s32) = G_FMUL %0, %1
-    ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VMULS [[VREGX]], [[VREGY]], 14 /* CC::al */, $noreg
 
     $s0 = COPY %2(s32)
-    ; CHECK: $s0 = COPY [[VREGSUM]]
 
     BX_RET 14, $noreg, implicit $s0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $s0
 ...
 ---
 name:            test_fmul_s64
-# CHECK-LABEL: name: test_fmul_s64
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: fprb }
   - { id: 1, class: fprb }
@@ -239,28 +243,29 @@ body:             |
   bb.0:
     liveins: $d0, $d1
 
+    ; CHECK-LABEL: name: test_fmul_s64
+    ; CHECK: liveins: $d0, $d1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:dpr = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:dpr = COPY $d1
+    ; CHECK-NEXT: [[VMULD:%[0-9]+]]:dpr = nofpexcept VMULD [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: $d0 = COPY [[VMULD]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(s64) = COPY $d0
-    ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY $d0
 
     %1(s64) = COPY $d1
-    ; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY $d1
 
     %2(s64) = G_FMUL %0, %1
-    ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VMULD [[VREGX]], [[VREGY]], 14 /* CC::al */, $noreg
 
     $d0 = COPY %2(s64)
-    ; CHECK: $d0 = COPY [[VREGSUM]]
 
     BX_RET 14, $noreg, implicit $d0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $d0
 ...
 ---
 name:            test_fdiv_s32
-# CHECK-LABEL: name: test_fdiv_s32
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: fprb }
   - { id: 1, class: fprb }
@@ -269,28 +274,29 @@ body:             |
   bb.0:
     liveins: $s0, $s1
 
+    ; CHECK-LABEL: name: test_fdiv_s32
+    ; CHECK: liveins: $s0, $s1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:spr = COPY $s0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:spr = COPY $s1
+    ; CHECK-NEXT: [[VDIVS:%[0-9]+]]:spr = nofpexcept VDIVS [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: $s0 = COPY [[VDIVS]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s32) = COPY $s0
-    ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY $s0
 
     %1(s32) = COPY $s1
-    ; CHECK: [[VREGY:%[0-9]+]]:spr = COPY $s1
 
     %2(s32) = G_FDIV %0, %1
-    ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VDIVS [[VREGX]], [[VREGY]], 14 /* CC::al */, $noreg
 
     $s0 = COPY %2(s32)
-    ; CHECK: $s0 = COPY [[VREGSUM]]
 
     BX_RET 14, $noreg, implicit $s0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $s0
 ...
 ---
 name:            test_fdiv_s64
-# CHECK-LABEL: name: test_fdiv_s64
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: fprb }
   - { id: 1, class: fprb }
@@ -299,28 +305,29 @@ body:             |
   bb.0:
     liveins: $d0, $d1
 
+    ; CHECK-LABEL: name: test_fdiv_s64
+    ; CHECK: liveins: $d0, $d1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:dpr = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:dpr = COPY $d1
+    ; CHECK-NEXT: [[VDIVD:%[0-9]+]]:dpr = nofpexcept VDIVD [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: $d0 = COPY [[VDIVD]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(s64) = COPY $d0
-    ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY $d0
 
     %1(s64) = COPY $d1
-    ; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY $d1
 
     %2(s64) = G_FDIV %0, %1
-    ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VDIVD [[VREGX]], [[VREGY]], 14 /* CC::al */, $noreg
 
     $d0 = COPY %2(s64)
-    ; CHECK: $d0 = COPY [[VREGSUM]]
 
     BX_RET 14, $noreg, implicit $d0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $d0
 ...
 ---
 name:            test_fneg_s32
-# CHECK-LABEL: name: test_fneg_s32
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: fprb }
   - { id: 1, class: fprb }
@@ -328,25 +335,26 @@ body:             |
   bb.0:
     liveins: $s0
 
+    ; CHECK-LABEL: name: test_fneg_s32
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:spr = COPY $s0
+    ; CHECK-NEXT: [[VNEGS:%[0-9]+]]:spr = VNEGS [[COPY]], 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: $s0 = COPY [[VNEGS]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s32) = COPY $s0
-    ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY $s0
 
     %1(s32) = G_FNEG %0
-    ; CHECK: [[VREGSUM:%[0-9]+]]:spr = VNEGS [[VREGX]], 14 /* CC::al */, $noreg
 
     $s0 = COPY %1(s32)
-    ; CHECK: $s0 = COPY [[VREGSUM]]
 
     BX_RET 14, $noreg, implicit $s0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $s0
 ...
 ---
 name:            test_fneg_s64
-# CHECK-LABEL: name: test_fneg_s64
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: fprb }
   - { id: 1, class: fprb }
@@ -355,25 +363,26 @@ body:             |
   bb.0:
     liveins: $d0
 
+    ; CHECK-LABEL: name: test_fneg_s64
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:dpr = COPY $d0
+    ; CHECK-NEXT: [[VNEGD:%[0-9]+]]:dpr = VNEGD [[COPY]], 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: $d0 = COPY [[VNEGD]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(s64) = COPY $d0
-    ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY $d0
 
     %1(s64) = G_FNEG %0
-    ; CHECK: [[VREGSUM:%[0-9]+]]:dpr = VNEGD [[VREGX]], 14 /* CC::al */, $noreg
 
     $d0 = COPY %1(s64)
-    ; CHECK: $d0 = COPY [[VREGSUM]]
 
     BX_RET 14, $noreg, implicit $d0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $d0
 ...
 ---
 name:            test_fma_s32
-# CHECK-LABEL: name: test_fma_s32
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: fprb }
   - { id: 1, class: fprb }
@@ -383,31 +392,32 @@ body:             |
   bb.0:
     liveins: $s0, $s1, $s2
 
+    ; CHECK-LABEL: name: test_fma_s32
+    ; CHECK: liveins: $s0, $s1, $s2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:spr = COPY $s0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:spr = COPY $s1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:spr = COPY $s2
+    ; CHECK-NEXT: [[VFMAS:%[0-9]+]]:spr = nofpexcept VFMAS [[COPY2]], [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: $s0 = COPY [[VFMAS]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s32) = COPY $s0
-    ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY $s0
 
     %1(s32) = COPY $s1
-    ; CHECK: [[VREGY:%[0-9]+]]:spr = COPY $s1
 
     %2(s32) = COPY $s2
-    ; CHECK: [[VREGZ:%[0-9]+]]:spr = COPY $s2
 
     %3(s32) = G_FMA %0, %1, %2
-    ; CHECK: [[VREGR:%[0-9]+]]:spr = VFMAS [[VREGZ]], [[VREGX]], [[VREGY]], 14 /* CC::al */, $noreg
 
     $s0 = COPY %3(s32)
-    ; CHECK: $s0 = COPY [[VREGR]]
 
     BX_RET 14, $noreg, implicit $s0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $s0
 ...
 ---
 name:            test_fma_s64
-# CHECK-LABEL: name: test_fma_s64
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: fprb }
   - { id: 1, class: fprb }
@@ -417,31 +427,32 @@ body:             |
   bb.0:
     liveins: $d0, $d1, $d2
 
+    ; CHECK-LABEL: name: test_fma_s64
+    ; CHECK: liveins: $d0, $d1, $d2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:dpr = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:dpr = COPY $d1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:dpr = COPY $d2
+    ; CHECK-NEXT: [[VFMAD:%[0-9]+]]:dpr = nofpexcept VFMAD [[COPY2]], [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: $d0 = COPY [[VFMAD]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(s64) = COPY $d0
-    ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY $d0
 
     %1(s64) = COPY $d1
-    ; CHECK: [[VREGY:%[0-9]+]]:dpr = COPY $d1
 
     %2(s64) = COPY $d2
-    ; CHECK: [[VREGZ:%[0-9]+]]:dpr = COPY $d2
 
     %3(s64) = G_FMA %0, %1, %2
-    ; CHECK: [[VREGR:%[0-9]+]]:dpr = VFMAD [[VREGZ]], [[VREGX]], [[VREGY]], 14 /* CC::al */, $noreg
 
     $d0 = COPY %3(s64)
-    ; CHECK: $d0 = COPY [[VREGR]]
 
     BX_RET 14, $noreg, implicit $d0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $d0
 ...
 ---
 name:            test_fpext_s32_to_s64
-# CHECK-LABEL: name: test_fpext_s32_to_s64
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: fprb }
   - { id: 1, class: fprb }
@@ -449,25 +460,26 @@ body:             |
   bb.0:
     liveins: $s0
 
+    ; CHECK-LABEL: name: test_fpext_s32_to_s64
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:spr = COPY $s0
+    ; CHECK-NEXT: [[VCVTDS:%[0-9]+]]:dpr = nofpexcept VCVTDS [[COPY]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: $d0 = COPY [[VCVTDS]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(s32) = COPY $s0
-    ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY $s0
 
     %1(s64) = G_FPEXT %0(s32)
-    ; CHECK: [[VREGR:%[0-9]+]]:dpr = VCVTDS [[VREGX]], 14 /* CC::al */, $noreg
 
     $d0 = COPY %1(s64)
-    ; CHECK: $d0 = COPY [[VREGR]]
 
     BX_RET 14, $noreg, implicit $d0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $d0
 ...
 ---
 name:            test_fptrunc_s64_to_s32
-# CHECK-LABEL: name: test_fptrunc_s64_to_s32
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: fprb }
   - { id: 1, class: fprb }
@@ -475,25 +487,26 @@ body:             |
   bb.0:
     liveins: $d0
 
+    ; CHECK-LABEL: name: test_fptrunc_s64_to_s32
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:dpr = COPY $d0
+    ; CHECK-NEXT: [[VCVTSD:%[0-9]+]]:spr = nofpexcept VCVTSD [[COPY]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: $s0 = COPY [[VCVTSD]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s64) = COPY $d0
-    ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY $d0
 
     %1(s32) = G_FPTRUNC %0(s64)
-    ; CHECK: [[VREGR:%[0-9]+]]:spr = VCVTSD [[VREGX]], 14 /* CC::al */, $noreg
 
     $s0 = COPY %1(s32)
-    ; CHECK: $s0 = COPY [[VREGR]]
 
     BX_RET 14, $noreg, implicit $s0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $s0
 ...
 ---
 name:            test_fptosi_s32
-# CHECK-LABEL: name: test_fptosi_s32
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: fprb }
   - { id: 1, class: gprb }
@@ -501,26 +514,27 @@ body:             |
   bb.0:
     liveins: $s0
 
+    ; CHECK-LABEL: name: test_fptosi_s32
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:spr = COPY $s0
+    ; CHECK-NEXT: [[VTOSIZS:%[0-9]+]]:spr = VTOSIZS [[COPY]], 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[VTOSIZS]]
+    ; CHECK-NEXT: $r0 = COPY [[COPY1]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $r0
     %0(s32) = COPY $s0
-    ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY $s0
 
     %1(s32) = G_FPTOSI %0(s32)
-    ; CHECK: [[VREGI:%[0-9]+]]:spr = VTOSIZS [[VREGX]], 14 /* CC::al */, $noreg
-    ; CHECK: [[VREGR:%[0-9]+]]:gpr = COPY [[VREGI]]
 
     $r0 = COPY %1(s32)
-    ; CHECK: $r0 = COPY [[VREGR]]
 
     BX_RET 14, $noreg, implicit $r0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $r0
 ...
 ---
 name:            test_fptosi_s64
-# CHECK-LABEL: name: test_fptosi_s64
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: fprb }
   - { id: 1, class: gprb }
@@ -528,26 +542,27 @@ body:             |
   bb.0:
     liveins: $d0
 
+    ; CHECK-LABEL: name: test_fptosi_s64
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:dpr = COPY $d0
+    ; CHECK-NEXT: [[VTOSIZD:%[0-9]+]]:spr = VTOSIZD [[COPY]], 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[VTOSIZD]]
+    ; CHECK-NEXT: $r0 = COPY [[COPY1]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $r0
     %0(s64) = COPY $d0
-    ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY $d0
 
     %1(s32) = G_FPTOSI %0(s64)
-    ; CHECK: [[VREGI:%[0-9]+]]:spr = VTOSIZD [[VREGX]], 14 /* CC::al */, $noreg
-    ; CHECK: [[VREGR:%[0-9]+]]:gpr = COPY [[VREGI]]
 
     $r0 = COPY %1(s32)
-    ; CHECK: $r0 = COPY [[VREGR]]
 
     BX_RET 14, $noreg, implicit $r0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $r0
 ...
 ---
 name:            test_fptoui_s32
-# CHECK-LABEL: name: test_fptoui_s32
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: fprb }
   - { id: 1, class: gprb }
@@ -555,26 +570,27 @@ body:             |
   bb.0:
     liveins: $s0
 
+    ; CHECK-LABEL: name: test_fptoui_s32
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:spr = COPY $s0
+    ; CHECK-NEXT: [[VTOUIZS:%[0-9]+]]:spr = VTOUIZS [[COPY]], 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[VTOUIZS]]
+    ; CHECK-NEXT: $r0 = COPY [[COPY1]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $r0
     %0(s32) = COPY $s0
-    ; CHECK: [[VREGX:%[0-9]+]]:spr = COPY $s0
 
     %1(s32) = G_FPTOUI %0(s32)
-    ; CHECK: [[VREGI:%[0-9]+]]:spr = VTOUIZS [[VREGX]], 14 /* CC::al */, $noreg
-    ; CHECK: [[VREGR:%[0-9]+]]:gpr = COPY [[VREGI]]
 
     $r0 = COPY %1(s32)
-    ; CHECK: $r0 = COPY [[VREGR]]
 
     BX_RET 14, $noreg, implicit $r0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $r0
 ...
 ---
 name:            test_fptoui_s64
-# CHECK-LABEL: name: test_fptoui_s64
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: fprb }
   - { id: 1, class: gprb }
@@ -582,26 +598,27 @@ body:             |
   bb.0:
     liveins: $d0
 
+    ; CHECK-LABEL: name: test_fptoui_s64
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:dpr = COPY $d0
+    ; CHECK-NEXT: [[VTOUIZD:%[0-9]+]]:spr = VTOUIZD [[COPY]], 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[VTOUIZD]]
+    ; CHECK-NEXT: $r0 = COPY [[COPY1]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $r0
     %0(s64) = COPY $d0
-    ; CHECK: [[VREGX:%[0-9]+]]:dpr = COPY $d0
 
     %1(s32) = G_FPTOUI %0(s64)
-    ; CHECK: [[VREGI:%[0-9]+]]:spr = VTOUIZD [[VREGX]], 14 /* CC::al */, $noreg
-    ; CHECK: [[VREGR:%[0-9]+]]:gpr = COPY [[VREGI]]
 
     $r0 = COPY %1(s32)
-    ; CHECK: $r0 = COPY [[VREGR]]
 
     BX_RET 14, $noreg, implicit $r0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $r0
 ...
 ---
 name:            test_sitofp_s32
-# CHECK-LABEL: name: test_sitofp_s32
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: gprb }
   - { id: 1, class: fprb }
@@ -609,26 +626,27 @@ body:             |
   bb.0:
     liveins: $r0
 
+    ; CHECK-LABEL: name: test_sitofp_s32
+    ; CHECK: liveins: $r0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $r0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:spr = COPY [[COPY]]
+    ; CHECK-NEXT: [[VSITOS:%[0-9]+]]:spr = VSITOS [[COPY1]], 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: $s0 = COPY [[VSITOS]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s32) = COPY $r0
-    ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0
 
     %1(s32) = G_SITOFP %0(s32)
-    ; CHECK: [[VREGF:%[0-9]+]]:spr = COPY [[VREGX]]
-    ; CHECK: [[VREGR:%[0-9]+]]:spr = VSITOS [[VREGF]], 14 /* CC::al */, $noreg
 
     $s0 = COPY %1(s32)
-    ; CHECK: $s0 = COPY [[VREGR]]
 
     BX_RET 14, $noreg, implicit $s0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $s0
 ...
 ---
 name:            test_sitofp_s64
-# CHECK-LABEL: name: test_sitofp_s64
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: gprb }
   - { id: 1, class: fprb }
@@ -636,26 +654,27 @@ body:             |
   bb.0:
     liveins: $r0
 
+    ; CHECK-LABEL: name: test_sitofp_s64
+    ; CHECK: liveins: $r0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $r0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:spr = COPY [[COPY]]
+    ; CHECK-NEXT: [[VSITOD:%[0-9]+]]:dpr = VSITOD [[COPY1]], 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: $d0 = COPY [[VSITOD]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(s32) = COPY $r0
-    ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0
 
     %1(s64) = G_SITOFP %0(s32)
-    ; CHECK: [[VREGF:%[0-9]+]]:spr = COPY [[VREGX]]
-    ; CHECK: [[VREGR:%[0-9]+]]:dpr = VSITOD [[VREGF]], 14 /* CC::al */, $noreg
 
     $d0 = COPY %1(s64)
-    ; CHECK: $d0 = COPY [[VREGR]]
 
     BX_RET 14, $noreg, implicit $d0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $d0
 ...
 ---
 name:            test_uitofp_s32
-# CHECK-LABEL: name: test_uitofp_s32
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: gprb }
   - { id: 1, class: fprb }
@@ -663,26 +682,27 @@ body:             |
   bb.0:
     liveins: $r0
 
+    ; CHECK-LABEL: name: test_uitofp_s32
+    ; CHECK: liveins: $r0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $r0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:spr = COPY [[COPY]]
+    ; CHECK-NEXT: [[VUITOS:%[0-9]+]]:spr = VUITOS [[COPY1]], 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: $s0 = COPY [[VUITOS]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s32) = COPY $r0
-    ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0
 
     %1(s32) = G_UITOFP %0(s32)
-    ; CHECK: [[VREGF:%[0-9]+]]:spr = COPY [[VREGX]]
-    ; CHECK: [[VREGR:%[0-9]+]]:spr = VUITOS [[VREGF]], 14 /* CC::al */, $noreg
 
     $s0 = COPY %1(s32)
-    ; CHECK: $s0 = COPY [[VREGR]]
 
     BX_RET 14, $noreg, implicit $s0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $s0
 ...
 ---
 name:            test_uitofp_s64
-# CHECK-LABEL: name: test_uitofp_s64
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: gprb }
   - { id: 1, class: fprb }
@@ -690,26 +710,27 @@ body:             |
   bb.0:
     liveins: $r0
 
+    ; CHECK-LABEL: name: test_uitofp_s64
+    ; CHECK: liveins: $r0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $r0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:spr = COPY [[COPY]]
+    ; CHECK-NEXT: [[VUITOD:%[0-9]+]]:dpr = VUITOD [[COPY1]], 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: $d0 = COPY [[VUITOD]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(s32) = COPY $r0
-    ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0
 
     %1(s64) = G_UITOFP %0(s32)
-    ; CHECK: [[VREGF:%[0-9]+]]:spr = COPY [[VREGX]]
-    ; CHECK: [[VREGR:%[0-9]+]]:dpr = VUITOD [[VREGF]], 14 /* CC::al */, $noreg
 
     $d0 = COPY %1(s64)
-    ; CHECK: $d0 = COPY [[VREGR]]
 
     BX_RET 14, $noreg, implicit $d0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $d0
 ...
 ---
 name:            test_load_f32
-# CHECK-LABEL: name: test_load_f32
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: gprb }
   - { id: 1, class: fprb }
@@ -717,25 +738,26 @@ body:             |
   bb.0:
     liveins: $r0
 
+    ; CHECK-LABEL: name: test_load_f32
+    ; CHECK: liveins: $r0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $r0
+    ; CHECK-NEXT: [[VLDRS:%[0-9]+]]:spr = VLDRS [[COPY]], 0, 14 /* CC::al */, $noreg :: (load (s32))
+    ; CHECK-NEXT: $s0 = COPY [[VLDRS]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(p0) = COPY $r0
-    ; CHECK: %[[P:[0-9]+]]:gpr = COPY $r0
 
     %1(s32) = G_LOAD %0(p0) :: (load (s32))
-    ; CHECK: %[[V:[0-9]+]]:spr = VLDRS %[[P]], 0, 14 /* CC::al */, $noreg
 
     $s0 = COPY %1
-    ; CHECK: $s0 = COPY %[[V]]
 
     BX_RET 14, $noreg, implicit $s0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $s0
 ...
 ---
 name:            test_load_f64
-# CHECK-LABEL: name: test_load_f64
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: gprb }
   - { id: 1, class: fprb }
@@ -743,45 +765,50 @@ body:             |
   bb.0:
     liveins: $r0
 
+    ; CHECK-LABEL: name: test_load_f64
+    ; CHECK: liveins: $r0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $r0
+    ; CHECK-NEXT: [[VLDRD:%[0-9]+]]:dpr = VLDRD [[COPY]], 0, 14 /* CC::al */, $noreg :: (load (s64))
+    ; CHECK-NEXT: $d0 = COPY [[VLDRD]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(p0) = COPY $r0
-    ; CHECK: %[[P:[0-9]+]]:gpr = COPY $r0
 
     %1(s64) = G_LOAD %0(p0) :: (load (s64))
-    ; CHECK: %[[V:[0-9]+]]:dpr = VLDRD %[[P]], 0, 14 /* CC::al */, $noreg
 
     $d0 = COPY %1
-    ; CHECK: $d0 = COPY %[[V]]
 
     BX_RET 14, $noreg, implicit $d0
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $d0
 ...
 ---
 name:            test_stores
-# CHECK-LABEL: name: test_stores
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: gprb }
   - { id: 1, class: fprb }
   - { id: 2, class: fprb }
-# CHECK: id: [[P:[0-9]+]], class: gpr
-# CHECK: id: [[F32:[0-9]+]], class: spr
-# CHECK: id: [[F64:[0-9]+]], class: dpr
 body:             |
   bb.0:
     liveins: $r0, $s0, $d0
 
+    ; CHECK-LABEL: name: test_stores
+    ; CHECK: liveins: $r0, $s0, $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $r0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:spr = COPY $s0
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:dpr = COPY $d2
+    ; CHECK-NEXT: VSTRS [[COPY1]], [[COPY]], 0, 14 /* CC::al */, $noreg :: (store (s32))
+    ; CHECK-NEXT: VSTRD [[COPY2]], [[COPY]], 0, 14 /* CC::al */, $noreg :: (store (s64))
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg
     %0(p0) = COPY $r0
     %1(s32) = COPY $s0
     %2(s64) = COPY $d2
 
     G_STORE %1(s32), %0(p0) :: (store (s32))
-    ; CHECK: VSTRS %[[F32]], %[[P]], 0, 14 /* CC::al */, $noreg
 
     G_STORE %2(s64), %0(p0) :: (store (s64))
-    ; CHECK: VSTRD %[[F64]], %[[P]], 0, 14 /* CC::al */, $noreg
 
     BX_RET 14, $noreg
 ...
@@ -833,11 +860,9 @@ body:             |
 ...
 ---
 name:            test_soft_fp_double
-# CHECK-LABEL: name: test_soft_fp_double
 legalized:       true
 regBankSelected: true
 selected:        false
-# CHECK: selected: true
 registers:
   - { id: 0, class: gprb }
   - { id: 1, class: gprb }
@@ -848,24 +873,27 @@ body:             |
   bb.0:
     liveins: $r0, $r1, $r2, $r3
 
+    ; CHECK-LABEL: name: test_soft_fp_double
+    ; CHECK: liveins: $r0, $r1, $r2, $r3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $r2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $r3
+    ; CHECK-NEXT: [[VMOVDRR:%[0-9]+]]:dpr = VMOVDRR [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: [[VMOVRRD:%[0-9]+]]:gpr, [[VMOVRRD1:%[0-9]+]]:gpr = VMOVRRD [[VMOVDRR]], 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: $r0 = COPY [[VMOVRRD]]
+    ; CHECK-NEXT: $r1 = COPY [[VMOVRRD1]]
+    ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $r0, implicit $r1
     %0(s32) = COPY $r2
-    ; CHECK: [[IN1:%[0-9]+]]:gpr = COPY $r2
 
     %1(s32) = COPY $r3
-    ; CHECK: [[IN2:%[0-9]+]]:gpr = COPY $r3
 
     %2(s64) = G_MERGE_VALUES %0(s32), %1(s32)
-    ; CHECK: %[[DREG:[0-9]+]]:dpr = VMOVDRR [[IN1]], [[IN2]]
 
     %3(s32), %4(s32) = G_UNMERGE_VALUES %2(s64)
-    ; CHECK: [[OUT1:%[0-9]+]]:gpr, [[OUT2:%[0-9]+]]:gpr = VMOVRRD %[[DREG]]
 
     $r0 = COPY %3
-    ; CHECK: $r0 = COPY [[OUT1]]
 
     $r1 = COPY %4
-    ; CHECK: $r1 = COPY [[OUT2]]
 
     BX_RET 14, $noreg, implicit $r0, implicit $r1
-    ; CHECK: BX_RET 14 /* CC::al */, $noreg, implicit $r0, implicit $r1
 ...
diff --git a/llvm/test/CodeGen/ARM/GlobalISel/select-pr35926.mir b/llvm/test/CodeGen/ARM/GlobalISel/select-pr35926.mir
index a6fc4dad49fd2..fa982d8a60d75 100644
--- a/llvm/test/CodeGen/ARM/GlobalISel/select-pr35926.mir
+++ b/llvm/test/CodeGen/ARM/GlobalISel/select-pr35926.mir
@@ -31,7 +31,7 @@ body:             |
     ; CHECK: [[COPY:%[0-9]+]]:dpr = COPY $d0
     ; CHECK: [[COPY1:%[0-9]+]]:dpr = COPY $d1
     ; CHECK: [[COPY2:%[0-9]+]]:dpr = COPY $d2
-    ; CHECK: [[VFNMSD:%[0-9]+]]:dpr = VFNMSD [[COPY2]], [[COPY1]], [[COPY]], 14 /* CC::al */, $noreg
+    ; CHECK: [[VFNMSD:%[0-9]+]]:dpr = nofpexcept VFNMSD [[COPY2]], [[COPY1]], [[COPY]], 14 /* CC::al */, $noreg, implicit $fpscr
     ; CHECK: $d0 = COPY [[VFNMSD]]
     ; CHECK: MOVPCLR 14 /* CC::al */, $noreg, implicit $d0
     %0:fprb(s64) = COPY $d0
diff --git a/llvm/test/CodeGen/ARM/bf16_fast_math.ll b/llvm/test/CodeGen/ARM/bf16_fast_math.ll
index 1b18ea6feb2ea..5f7e1e69d99d6 100644
--- a/llvm/test/CodeGen/ARM/bf16_fast_math.ll
+++ b/llvm/test/CodeGen/ARM/bf16_fast_math.ll
@@ -17,7 +17,7 @@ define bfloat @normal_fadd(bfloat %x, bfloat %y) {
   ; CHECK-NOBF16-NEXT:   [[VMOVSR:%[0-9]+]]:spr = VMOVSR killed [[MOVsi]], 14 /* CC::al */, $noreg
   ; CHECK-NOBF16-NEXT:   [[MOVsi1:%[0-9]+]]:gpr = MOVsi [[COPY1]], 130, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NOBF16-NEXT:   [[VMOVSR1:%[0-9]+]]:spr = VMOVSR killed [[MOVsi1]], 14 /* CC::al */, $noreg
-  ; CHECK-NOBF16-NEXT:   [[VADDS:%[0-9]+]]:spr = VADDS killed [[VMOVSR1]], killed [[VMOVSR]], 14 /* CC::al */, $noreg
+  ; CHECK-NOBF16-NEXT:   [[VADDS:%[0-9]+]]:spr = nofpexcept VADDS killed [[VMOVSR1]], killed [[VMOVSR]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-NOBF16-NEXT:   [[VMOVRS:%[0-9]+]]:gpr = VMOVRS killed [[VADDS]], 14 /* CC::al */, $noreg
   ; CHECK-NOBF16-NEXT:   ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
   ; CHECK-NOBF16-NEXT:   $r0 = COPY [[VMOVRS]]
@@ -44,7 +44,7 @@ define bfloat @fast_fadd(bfloat %x, bfloat %y) {
   ; CHECK-NOBF16-NEXT:   [[VMOVSR:%[0-9]+]]:spr = VMOVSR killed [[MOVsi]], 14 /* CC::al */, $noreg
   ; CHECK-NOBF16-NEXT:   [[MOVsi1:%[0-9]+]]:gpr = MOVsi [[COPY1]], 130, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NOBF16-NEXT:   [[VMOVSR1:%[0-9]+]]:spr = VMOVSR killed [[MOVsi1]], 14 /* CC::al */, $noreg
-  ; CHECK-NOBF16-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS killed [[VMOVSR1]], killed [[VMOVSR]], 14 /* CC::al */, $noreg
+  ; CHECK-NOBF16-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc nofpexcept VADDS killed [[VMOVSR1]], killed [[VMOVSR]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-NOBF16-NEXT:   [[VMOVRS:%[0-9]+]]:gpr = VMOVRS killed [[VADDS]], 14 /* CC::al */, $noreg
   ; CHECK-NOBF16-NEXT:   ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
   ; CHECK-NOBF16-NEXT:   $r0 = COPY [[VMOVRS]]
@@ -71,7 +71,7 @@ define bfloat @ninf_fadd(bfloat %x, bfloat %y) {
   ; CHECK-NOBF16-NEXT:   [[VMOVSR:%[0-9]+]]:spr = VMOVSR killed [[MOVsi]], 14 /* CC::al */, $noreg
   ; CHECK-NOBF16-NEXT:   [[MOVsi1:%[0-9]+]]:gpr = MOVsi [[COPY1]], 130, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NOBF16-NEXT:   [[VMOVSR1:%[0-9]+]]:spr = VMOVSR killed [[MOVsi1]], 14 /* CC::al */, $noreg
-  ; CHECK-NOBF16-NEXT:   [[VADDS:%[0-9]+]]:spr = ninf VADDS killed [[VMOVSR1]], killed [[VMOVSR]], 14 /* CC::al */, $noreg
+  ; CHECK-NOBF16-NEXT:   [[VADDS:%[0-9]+]]:spr = ninf nofpexcept VADDS killed [[VMOVSR1]], killed [[VMOVSR]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-NOBF16-NEXT:   [[VMOVRS:%[0-9]+]]:gpr = VMOVRS killed [[VADDS]], 14 /* CC::al */, $noreg
   ; CHECK-NOBF16-NEXT:   ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
   ; CHECK-NOBF16-NEXT:   $r0 = COPY [[VMOVRS]]
@@ -102,7 +102,7 @@ define bfloat @normal_fadd_sequence(bfloat %x, bfloat %y, bfloat %z) {
   ; CHECK-NOBF16-NEXT:   [[VMOVSR:%[0-9]+]]:spr = VMOVSR killed [[MOVsi]], 14 /* CC::al */, $noreg
   ; CHECK-NOBF16-NEXT:   [[MOVsi1:%[0-9]+]]:gpr = MOVsi [[COPY2]], 130, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NOBF16-NEXT:   [[VMOVSR1:%[0-9]+]]:spr = VMOVSR killed [[MOVsi1]], 14 /* CC::al */, $noreg
-  ; CHECK-NOBF16-NEXT:   [[VADDS:%[0-9]+]]:spr = VADDS killed [[VMOVSR1]], killed [[VMOVSR]], 14 /* CC::al */, $noreg
+  ; CHECK-NOBF16-NEXT:   [[VADDS:%[0-9]+]]:spr = nofpexcept VADDS killed [[VMOVSR1]], killed [[VMOVSR]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-NOBF16-NEXT:   [[VMOVRS:%[0-9]+]]:gpr = VMOVRS killed [[VADDS]], 14 /* CC::al */, $noreg
   ; CHECK-NOBF16-NEXT:   ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
   ; CHECK-NOBF16-NEXT:   $r0 = COPY [[VMOVRS]]
@@ -113,7 +113,7 @@ define bfloat @normal_fadd_sequence(bfloat %x, bfloat %y, bfloat %z) {
   ; CHECK-NOBF16-NEXT:   [[VMOVSR2:%[0-9]+]]:spr = VMOVSR killed [[MOVsi2]], 14 /* CC::al */, $noreg
   ; CHECK-NOBF16-NEXT:   [[MOVsi3:%[0-9]+]]:gpr = MOVsi [[COPY3]], 130, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NOBF16-NEXT:   [[VMOVSR3:%[0-9]+]]:spr = VMOVSR killed [[MOVsi3]], 14 /* CC::al */, $noreg
-  ; CHECK-NOBF16-NEXT:   [[VADDS1:%[0-9]+]]:spr = VADDS killed [[VMOVSR3]], killed [[VMOVSR2]], 14 /* CC::al */, $noreg
+  ; CHECK-NOBF16-NEXT:   [[VADDS1:%[0-9]+]]:spr = nofpexcept VADDS killed [[VMOVSR3]], killed [[VMOVSR2]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-NOBF16-NEXT:   [[VMOVRS1:%[0-9]+]]:gpr = VMOVRS killed [[VADDS1]], 14 /* CC::al */, $noreg
   ; CHECK-NOBF16-NEXT:   ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
   ; CHECK-NOBF16-NEXT:   $r0 = COPY [[VMOVRS1]]
@@ -142,10 +142,10 @@ define bfloat @nnan_ninf_contract_fadd_sequence(bfloat %x, bfloat %y, bfloat %z)
   ; CHECK-NOBF16-NEXT:   [[VMOVSR:%[0-9]+]]:spr = VMOVSR killed [[MOVsi]], 14 /* CC::al */, $noreg
   ; CHECK-NOBF16-NEXT:   [[MOVsi1:%[0-9]+]]:gpr = MOVsi [[COPY2]], 130, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NOBF16-NEXT:   [[VMOVSR1:%[0-9]+]]:spr = VMOVSR killed [[MOVsi1]], 14 /* CC::al */, $noreg
-  ; CHECK-NOBF16-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf contract VADDS killed [[VMOVSR1]], killed [[VMOVSR]], 14 /* CC::al */, $noreg
+  ; CHECK-NOBF16-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf contract nofpexcept VADDS killed [[VMOVSR1]], killed [[VMOVSR]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-NOBF16-NEXT:   [[MOVsi2:%[0-9]+]]:gpr = MOVsi [[COPY]], 130, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NOBF16-NEXT:   [[VMOVSR2:%[0-9]+]]:spr = VMOVSR killed [[MOVsi2]], 14 /* CC::al */, $noreg
-  ; CHECK-NOBF16-NEXT:   [[VADDS1:%[0-9]+]]:spr = nnan ninf contract VADDS killed [[VADDS]], killed [[VMOVSR2]], 14 /* CC::al */, $noreg
+  ; CHECK-NOBF16-NEXT:   [[VADDS1:%[0-9]+]]:spr = nnan ninf contract nofpexcept VADDS killed [[VADDS]], killed [[VMOVSR2]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-NOBF16-NEXT:   [[VMOVRS:%[0-9]+]]:gpr = VMOVRS killed [[VADDS1]], 14 /* CC::al */, $noreg
   ; CHECK-NOBF16-NEXT:   ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
   ; CHECK-NOBF16-NEXT:   $r0 = COPY [[VMOVRS]]
@@ -174,7 +174,7 @@ define bfloat @ninf_fadd_sequence(bfloat %x, bfloat %y, bfloat %z) {
   ; CHECK-NOBF16-NEXT:   [[VMOVSR:%[0-9]+]]:spr = VMOVSR killed [[MOVsi]], 14 /* CC::al */, $noreg
   ; CHECK-NOBF16-NEXT:   [[MOVsi1:%[0-9]+]]:gpr = MOVsi [[COPY2]], 130, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NOBF16-NEXT:   [[VMOVSR1:%[0-9]+]]:spr = VMOVSR killed [[MOVsi1]], 14 /* CC::al */, $noreg
-  ; CHECK-NOBF16-NEXT:   [[VADDS:%[0-9]+]]:spr = ninf VADDS killed [[VMOVSR1]], killed [[VMOVSR]], 14 /* CC::al */, $noreg
+  ; CHECK-NOBF16-NEXT:   [[VADDS:%[0-9]+]]:spr = ninf nofpexcept VADDS killed [[VMOVSR1]], killed [[VMOVSR]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-NOBF16-NEXT:   [[VMOVRS:%[0-9]+]]:gpr = VMOVRS killed [[VADDS]], 14 /* CC::al */, $noreg
   ; CHECK-NOBF16-NEXT:   ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
   ; CHECK-NOBF16-NEXT:   $r0 = COPY [[VMOVRS]]
@@ -185,7 +185,7 @@ define bfloat @ninf_fadd_sequence(bfloat %x, bfloat %y, bfloat %z) {
   ; CHECK-NOBF16-NEXT:   [[VMOVSR2:%[0-9]+]]:spr = VMOVSR killed [[MOVsi2]], 14 /* CC::al */, $noreg
   ; CHECK-NOBF16-NEXT:   [[MOVsi3:%[0-9]+]]:gpr = MOVsi [[COPY3]], 130, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NOBF16-NEXT:   [[VMOVSR3:%[0-9]+]]:spr = VMOVSR killed [[MOVsi3]], 14 /* CC::al */, $noreg
-  ; CHECK-NOBF16-NEXT:   [[VADDS1:%[0-9]+]]:spr = ninf VADDS killed [[VMOVSR3]], killed [[VMOVSR2]], 14 /* CC::al */, $noreg
+  ; CHECK-NOBF16-NEXT:   [[VADDS1:%[0-9]+]]:spr = ninf nofpexcept VADDS killed [[VMOVSR3]], killed [[VMOVSR2]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-NOBF16-NEXT:   [[VMOVRS1:%[0-9]+]]:gpr = VMOVRS killed [[VADDS1]], 14 /* CC::al */, $noreg
   ; CHECK-NOBF16-NEXT:   ADJCALLSTACKDOWN 0, 0, 14 /* CC::al */, $noreg, implicit-def dead $sp, implicit $sp
   ; CHECK-NOBF16-NEXT:   $r0 = COPY [[VMOVRS1]]
diff --git a/llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir b/llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir
index 9bc335c2ab09a..674ed04616fa5 100644
--- a/llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir
+++ b/llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir
@@ -90,7 +90,7 @@ body:             |
 # CHECK-NEXT:  $r0 = t2BICri $r0, 1, 14 /* CC::al */, $noreg, $noreg
 # CHECK-NEXT:  $sp = tSUBspi $sp, 34, 14 /* CC::al */, $noreg
 # CHECK-NEXT:  dead $s0 = VMOVS undef $s0, 14 /* CC::al */, $noreg
-# CHECK-NEXT:  VLSTM $sp, 14 /* CC::al */, $noreg, 0, implicit-def $vpr, implicit-def $fpscr, implicit-def $fpscr_nzcv, implicit undef $vpr, implicit undef $fpscr, implicit undef $fpscr_nzcv, implicit undef $d0, implicit undef $d1, implicit undef $d2, implicit undef $d3, implicit undef $d4, implicit undef $d5, implicit undef $d6, implicit undef $d7, implicit $d8, implicit $d9, implicit $d10, implicit $d11, implicit $d12, implicit $d13, implicit $d14, implicit $d15
+# CHECK-NEXT:  VLSTM $sp, 14 /* CC::al */, $noreg, 0, implicit-def $vpr, implicit-def $fpscr, implicit-def $fpscr_nzcv, implicit-def $fpscr_rm, implicit undef $vpr, implicit undef $fpscr, implicit undef $fpscr_nzcv, implicit undef $fpscr_rm, implicit undef $d0, implicit undef $d1, implicit undef $d2, implicit undef $d3, implicit undef $d4, implicit undef $d5, implicit undef $d6, implicit undef $d7, implicit $d8, implicit $d9, implicit $d10, implicit $d11, implicit $d12, implicit $d13, implicit $d14, implicit $d15
 # CHECK-NEXT:  $r1 = tMOVr $r0, 14 /* CC::al */, $noreg
 # CHECK-NEXT:  $r2 = tMOVr $r0, 14 /* CC::al */, $noreg
 # CHECK-NEXT:  $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
@@ -106,7 +106,7 @@ body:             |
 # CHECK-NEXT:  t2MSR_M 3072, $r0, 14 /* CC::al */, $noreg, implicit-def $cpsr
 # CHECK-NEXT:  tBLXNSr 14 /* CC::al */, $noreg, killed $r0, csr_aapcs, implicit-def $lr, implicit $sp, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $s0
 # CHECK-NEXT:  $r12 = VMOVRS $s0, 14 /* CC::al */, $noreg
-# CHECK-NEXT:  VLLDM $sp, 14 /* CC::al */, $noreg, 0, implicit-def $vpr, implicit-def $fpscr, implicit-def $fpscr_nzcv, implicit-def $d0, implicit-def $d1, implicit-def $d2, implicit-def $d3, implicit-def $d4, implicit-def $d5, implicit-def $d6, implicit-def $d7, implicit-def $d8, implicit-def $d9, implicit-def $d10, implicit-def $d11, implicit-def $d12, implicit-def $d13, implicit-def $d14, implicit-def $d15
+# CHECK-NEXT:  VLLDM $sp, 14 /* CC::al */, $noreg, 0, implicit-def $vpr, implicit-def $fpscr, implicit-def $fpscr_nzcv, implicit-def $fpscr_rm, implicit-def $d0, implicit-def $d1, implicit-def $d2, implicit-def $d3, implicit-def $d4, implicit-def $d5, implicit-def $d6, implicit-def $d7, implicit-def $d8, implicit-def $d9, implicit-def $d10, implicit-def $d11, implicit-def $d12, implicit-def $d13, implicit-def $d14, implicit-def $d15
 # CHECK-NEXT:  $s0 = VMOVSR $r12, 14 /* CC::al */, $noreg
 # CHECK-NEXT:  $sp = tADDspi $sp, 34, 14 /* CC::al */, $noreg
 # CHECK-NEXT:  $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11
diff --git a/llvm/test/CodeGen/ARM/cortex-m7-wideops.mir b/llvm/test/CodeGen/ARM/cortex-m7-wideops.mir
index 1bee32f4c90cd..fe23e8594c946 100644
--- a/llvm/test/CodeGen/ARM/cortex-m7-wideops.mir
+++ b/llvm/test/CodeGen/ARM/cortex-m7-wideops.mir
@@ -22,15 +22,16 @@ body:             |
 
     ; CHECK-LABEL: name: test_groups
     ; CHECK: liveins: $d0, $r0, $r1, $r2, $r3, $r4
-    ; CHECK: renamable $d0 = VADDD killed renamable $d0, renamable $d0, 14 /* CC::al */, $noreg
-    ; CHECK: renamable $r3 = t2ADDrr killed renamable $r3, renamable $r3, 14 /* CC::al */, $noreg, $noreg
-    ; CHECK: renamable $s2 = VLDRS killed renamable $r0, 0, 14 /* CC::al */, $noreg
-    ; CHECK: VSTRS killed renamable $s2, killed renamable $r1, 0, 14 /* CC::al */, $noreg
-    ; CHECK: t2STRi12 killed renamable $r3, killed renamable $r2, 0, 14 /* CC::al */, $noreg
-    ; CHECK: renamable $r4 = t2ADDrr killed renamable $r4, renamable $r4, 14 /* CC::al */, $noreg, $noreg
-    ; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit killed $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: renamable $s2 = VLDRS killed renamable $r0, 0, 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: renamable $r3 = t2ADDrr killed renamable $r3, renamable $r3, 14 /* CC::al */, $noreg, $noreg
+    ; CHECK-NEXT: renamable $d0 = VADDD killed renamable $d0, renamable $d0, 14 /* CC::al */, $noreg, implicit $fpscr_rm
+    ; CHECK-NEXT: renamable $r4 = t2ADDrr killed renamable $r4, renamable $r4, 14 /* CC::al */, $noreg, $noreg
+    ; CHECK-NEXT: VSTRS killed renamable $s2, killed renamable $r1, 0, 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: t2STRi12 killed renamable $r3, killed renamable $r2, 0, 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: tBX_RET 14 /* CC::al */, $noreg, implicit killed $d0
     renamable $s2 = VLDRS killed renamable $r0, 0, 14 /* CC::al */, $noreg
-    renamable $d0 = VADDD killed renamable $d0, renamable $d0, 14 /* CC::al */, $noreg
+    renamable $d0 = VADDD killed renamable $d0, renamable $d0, 14 /* CC::al */, $noreg, implicit $fpscr_rm
     VSTRS killed renamable $s2, killed renamable $r1, 0, 14 /* CC::al */, $noreg
     renamable $r3 = t2ADDrr killed renamable $r3, renamable $r3, 14 /* CC::al */, $noreg, $noreg
     t2STRi12 killed renamable $r3, killed renamable $r2, 0, 14 /* CC::al */, $noreg
diff --git a/llvm/test/CodeGen/ARM/fp16-litpool-arm.mir b/llvm/test/CodeGen/ARM/fp16-litpool-arm.mir
index 8e671c903adda..f5b2e98b62fdd 100644
--- a/llvm/test/CodeGen/ARM/fp16-litpool-arm.mir
+++ b/llvm/test/CodeGen/ARM/fp16-litpool-arm.mir
@@ -81,7 +81,7 @@ body:             |
     STRi12 killed renamable $r1, killed renamable $r0, 0, 14, $noreg :: (volatile store (s32) into %ir.LL, align 8)
     dead renamable $r0 = SPACE 8920, undef renamable $r0
     renamable $s2 = VLDRH $sp, 1, 14, $noreg :: (volatile dereferenceable load (s16) from %ir.S)
-    renamable $s0 = VADDH killed renamable $s2, killed renamable $s0, 14, $noreg
+    renamable $s0 = VADDH killed renamable $s2, killed renamable $s0, 14, $noreg, implicit $fpscr_rm
     VSTRH renamable $s0, $sp, 1, 14, $noreg :: (volatile store (s16) into %ir.S)
     renamable $r0 = VMOVRH killed renamable $s0, 14, $noreg
     dead renamable $r1 = SPACE 1350, undef renamable $r0
diff --git a/llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir b/llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir
index 03ddd80ed0ead..4b6647683139c 100644
--- a/llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir
+++ b/llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir
@@ -72,7 +72,7 @@ body:             |
     renamable $s2 = VLDRH $sp, 1, 14, $noreg :: (volatile dereferenceable load (s16) from %ir.S)
     renamable $s0 = VLDRH %const.1, 0, 14, $noreg :: (load (s16) from constant-pool)
     dead renamable $r0 = SPACE 1230, undef renamable $r0
-    renamable $s0 = VADDH killed renamable $s2, killed renamable $s0, 14, $noreg
+    renamable $s0 = VADDH killed renamable $s2, killed renamable $s0, 14, $noreg, implicit $fpscr_rm
     VSTRH renamable $s0, $sp, 1, 14, $noreg :: (volatile store (s16) into %ir.S)
     renamable $r0 = VMOVRH killed renamable $s0, 14, $noreg
     dead renamable $r1 = SPACE 1330, undef renamable $r0
diff --git a/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir b/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir
index 46f028bd492db..c16a62a8a989c 100644
--- a/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir
+++ b/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir
@@ -89,7 +89,7 @@ body:             |
     $sp = frame-setup SUBri $sp, 4, 14, $noreg, $noreg
     frame-setup CFI_INSTRUCTION def_cfa_offset 4
     renamable $s0 = VLDRH %const.0, 0, 14, $noreg :: (load (s16) from constant-pool)
-    VCMPZH renamable $s0, 14, $noreg, implicit-def $fpscr_nzcv
+    VCMPZH renamable $s0, 14, $noreg, implicit-def $fpscr_nzcv, implicit $fpscr_rm
     VSTRH killed renamable $s0, $sp, 1, 14, $noreg :: (store (s16) into %ir.res)
     FMSTAT 14, $noreg, implicit-def $cpsr, implicit killed $fpscr_nzcv
     Bcc %bb.2, 0, killed $cpsr
diff --git a/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir b/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir
index 5a03fcdb7fdf7..049b7d9b46139 100644
--- a/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir
+++ b/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir
@@ -95,7 +95,7 @@ body:             |
     $sp = frame-setup SUBri $sp, 4, 14, $noreg, $noreg
     frame-setup CFI_INSTRUCTION def_cfa_offset 4
     renamable $s0 = VLDRH %const.0, 0, 14, $noreg :: (load (s16) from constant-pool)
-    VCMPZH renamable $s0, 14, $noreg, implicit-def $fpscr_nzcv
+    VCMPZH renamable $s0, 14, $noreg, implicit-def $fpscr_nzcv, implicit $fpscr_rm
     VSTRH killed renamable $s0, $sp, 1, 14, $noreg :: (store (s16) into %ir.res)
     FMSTAT 14, $noreg, implicit-def $cpsr, implicit killed $fpscr_nzcv
     Bcc %bb.2, 0, killed $cpsr
diff --git a/llvm/test/CodeGen/ARM/fp16_fast_math.ll b/llvm/test/CodeGen/ARM/fp16_fast_math.ll
index 165eb4b8af43e..47e1f84ff664e 100644
--- a/llvm/test/CodeGen/ARM/fp16_fast_math.ll
+++ b/llvm/test/CodeGen/ARM/fp16_fast_math.ll
@@ -16,11 +16,11 @@ define half @normal_fadd(half %x, half %y) {
   ; CHECK-CVT-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $r0
   ; CHECK-CVT-NEXT:   [[COPY2:%[0-9]+]]:spr = COPY [[COPY1]]
   ; CHECK-CVT-NEXT:   [[COPY3:%[0-9]+]]:spr = COPY [[COPY]]
-  ; CHECK-CVT-NEXT:   [[VCVTBHS:%[0-9]+]]:spr = VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[VCVTBHS1:%[0-9]+]]:spr = VCVTBHS killed [[COPY2]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[VADDS:%[0-9]+]]:spr = VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBHS:%[0-9]+]]:spr = nofpexcept VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg, implicit $fpscr
+  ; CHECK-CVT-NEXT:   [[VCVTBHS1:%[0-9]+]]:spr = nofpexcept VCVTBHS killed [[COPY2]], 14 /* CC::al */, $noreg, implicit $fpscr
+  ; CHECK-CVT-NEXT:   [[VADDS:%[0-9]+]]:spr = nofpexcept VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[DEF:%[0-9]+]]:spr = IMPLICIT_DEF
-  ; CHECK-CVT-NEXT:   [[VCVTBSH:%[0-9]+]]:spr = VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBSH:%[0-9]+]]:spr = nofpexcept VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY killed [[VCVTBSH]]
   ; CHECK-CVT-NEXT:   $r0 = COPY [[COPY4]]
   ; CHECK-CVT-NEXT:   MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
@@ -33,7 +33,7 @@ define half @normal_fadd(half %x, half %y) {
   ; CHECK-FP16-NEXT:   [[COPY1:%[0-9]+]]:rgpr = COPY $r0
   ; CHECK-FP16-NEXT:   [[VMOVHR:%[0-9]+]]:hpr = VMOVHR [[COPY]], 14, $noreg
   ; CHECK-FP16-NEXT:   [[VMOVHR1:%[0-9]+]]:hpr = VMOVHR [[COPY1]], 14, $noreg
-  ; CHECK-FP16-NEXT:   [[VADDH:%[0-9]+]]:hpr = VADDH killed [[VMOVHR1]], killed [[VMOVHR]], 14, $noreg
+  ; CHECK-FP16-NEXT:   [[VADDH:%[0-9]+]]:hpr = nofpexcept VADDH killed [[VMOVHR1]], killed [[VMOVHR]], 14, $noreg, implicit $fpscr
   ; CHECK-FP16-NEXT:   $r0 = COPY [[VADDH]]
   ; CHECK-FP16-NEXT:   MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
 entry:
@@ -50,11 +50,11 @@ define half @fast_fadd(half %x, half %y) {
   ; CHECK-CVT-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $r0
   ; CHECK-CVT-NEXT:   [[COPY2:%[0-9]+]]:spr = COPY [[COPY1]]
   ; CHECK-CVT-NEXT:   [[COPY3:%[0-9]+]]:spr = COPY [[COPY]]
-  ; CHECK-CVT-NEXT:   [[VCVTBHS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[VCVTBHS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VCVTBHS killed [[COPY2]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBHS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc nofpexcept VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg, implicit $fpscr
+  ; CHECK-CVT-NEXT:   [[VCVTBHS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc nofpexcept VCVTBHS killed [[COPY2]], 14 /* CC::al */, $noreg, implicit $fpscr
+  ; CHECK-CVT-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc nofpexcept VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[DEF:%[0-9]+]]:spr = IMPLICIT_DEF
-  ; CHECK-CVT-NEXT:   [[VCVTBSH:%[0-9]+]]:spr = VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBSH:%[0-9]+]]:spr = nofpexcept VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY killed [[VCVTBSH]]
   ; CHECK-CVT-NEXT:   $r0 = COPY [[COPY4]]
   ; CHECK-CVT-NEXT:   MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
@@ -67,7 +67,7 @@ define half @fast_fadd(half %x, half %y) {
   ; CHECK-FP16-NEXT:   [[COPY1:%[0-9]+]]:rgpr = COPY $r0
   ; CHECK-FP16-NEXT:   [[VMOVHR:%[0-9]+]]:hpr = VMOVHR [[COPY]], 14, $noreg
   ; CHECK-FP16-NEXT:   [[VMOVHR1:%[0-9]+]]:hpr = VMOVHR [[COPY1]], 14, $noreg
-  ; CHECK-FP16-NEXT:   [[VADDH:%[0-9]+]]:hpr = nnan ninf nsz arcp contract afn reassoc VADDH killed [[VMOVHR1]], killed [[VMOVHR]], 14, $noreg
+  ; CHECK-FP16-NEXT:   [[VADDH:%[0-9]+]]:hpr = nnan ninf nsz arcp contract afn reassoc nofpexcept VADDH killed [[VMOVHR1]], killed [[VMOVHR]], 14, $noreg, implicit $fpscr
   ; CHECK-FP16-NEXT:   $r0 = COPY [[VADDH]]
   ; CHECK-FP16-NEXT:   MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
 entry:
@@ -84,11 +84,11 @@ define half @ninf_fadd(half %x, half %y) {
   ; CHECK-CVT-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $r0
   ; CHECK-CVT-NEXT:   [[COPY2:%[0-9]+]]:spr = COPY [[COPY1]]
   ; CHECK-CVT-NEXT:   [[COPY3:%[0-9]+]]:spr = COPY [[COPY]]
-  ; CHECK-CVT-NEXT:   [[VCVTBHS:%[0-9]+]]:spr = ninf VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[VCVTBHS1:%[0-9]+]]:spr = ninf VCVTBHS killed [[COPY2]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[VADDS:%[0-9]+]]:spr = ninf VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBHS:%[0-9]+]]:spr = ninf nofpexcept VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg, implicit $fpscr
+  ; CHECK-CVT-NEXT:   [[VCVTBHS1:%[0-9]+]]:spr = ninf nofpexcept VCVTBHS killed [[COPY2]], 14 /* CC::al */, $noreg, implicit $fpscr
+  ; CHECK-CVT-NEXT:   [[VADDS:%[0-9]+]]:spr = ninf nofpexcept VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[DEF:%[0-9]+]]:spr = IMPLICIT_DEF
-  ; CHECK-CVT-NEXT:   [[VCVTBSH:%[0-9]+]]:spr = VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBSH:%[0-9]+]]:spr = nofpexcept VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY killed [[VCVTBSH]]
   ; CHECK-CVT-NEXT:   $r0 = COPY [[COPY4]]
   ; CHECK-CVT-NEXT:   MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
@@ -101,7 +101,7 @@ define half @ninf_fadd(half %x, half %y) {
   ; CHECK-FP16-NEXT:   [[COPY1:%[0-9]+]]:rgpr = COPY $r0
   ; CHECK-FP16-NEXT:   [[VMOVHR:%[0-9]+]]:hpr = VMOVHR [[COPY]], 14, $noreg
   ; CHECK-FP16-NEXT:   [[VMOVHR1:%[0-9]+]]:hpr = VMOVHR [[COPY1]], 14, $noreg
-  ; CHECK-FP16-NEXT:   [[VADDH:%[0-9]+]]:hpr = ninf VADDH killed [[VMOVHR1]], killed [[VMOVHR]], 14, $noreg
+  ; CHECK-FP16-NEXT:   [[VADDH:%[0-9]+]]:hpr = ninf nofpexcept VADDH killed [[VMOVHR1]], killed [[VMOVHR]], 14, $noreg, implicit $fpscr
   ; CHECK-FP16-NEXT:   $r0 = COPY [[VADDH]]
   ; CHECK-FP16-NEXT:   MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
 entry:
@@ -122,19 +122,19 @@ define half @normal_fadd_sequence(half %x, half %y, half %z) {
   ; CHECK-CVT-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $r0
   ; CHECK-CVT-NEXT:   [[COPY3:%[0-9]+]]:spr = COPY [[COPY2]]
   ; CHECK-CVT-NEXT:   [[COPY4:%[0-9]+]]:spr = COPY [[COPY1]]
-  ; CHECK-CVT-NEXT:   [[VCVTBHS:%[0-9]+]]:spr = VCVTBHS killed [[COPY4]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[VCVTBHS1:%[0-9]+]]:spr = VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[VADDS:%[0-9]+]]:spr = VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBHS:%[0-9]+]]:spr = nofpexcept VCVTBHS killed [[COPY4]], 14 /* CC::al */, $noreg, implicit $fpscr
+  ; CHECK-CVT-NEXT:   [[VCVTBHS1:%[0-9]+]]:spr = nofpexcept VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg, implicit $fpscr
+  ; CHECK-CVT-NEXT:   [[VADDS:%[0-9]+]]:spr = nofpexcept VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[COPY5:%[0-9]+]]:spr = COPY [[COPY]]
-  ; CHECK-CVT-NEXT:   [[VCVTBHS2:%[0-9]+]]:spr = VCVTBHS killed [[COPY5]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBHS2:%[0-9]+]]:spr = nofpexcept VCVTBHS killed [[COPY5]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[DEF:%[0-9]+]]:spr = IMPLICIT_DEF
-  ; CHECK-CVT-NEXT:   [[VCVTBSH:%[0-9]+]]:spr = VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBSH:%[0-9]+]]:spr = nofpexcept VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[COPY6:%[0-9]+]]:gpr = COPY killed [[VCVTBSH]]
   ; CHECK-CVT-NEXT:   [[COPY7:%[0-9]+]]:spr = COPY killed [[COPY6]]
-  ; CHECK-CVT-NEXT:   [[VCVTBHS3:%[0-9]+]]:spr = VCVTBHS killed [[COPY7]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[VADDS1:%[0-9]+]]:spr = VADDS killed [[VCVTBHS3]], killed [[VCVTBHS2]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBHS3:%[0-9]+]]:spr = nofpexcept VCVTBHS killed [[COPY7]], 14 /* CC::al */, $noreg, implicit $fpscr
+  ; CHECK-CVT-NEXT:   [[VADDS1:%[0-9]+]]:spr = nofpexcept VADDS killed [[VCVTBHS3]], killed [[VCVTBHS2]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[DEF1:%[0-9]+]]:spr = IMPLICIT_DEF
-  ; CHECK-CVT-NEXT:   [[VCVTBSH1:%[0-9]+]]:spr = VCVTBSH [[DEF1]], killed [[VADDS1]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBSH1:%[0-9]+]]:spr = nofpexcept VCVTBSH [[DEF1]], killed [[VADDS1]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[COPY8:%[0-9]+]]:gpr = COPY killed [[VCVTBSH1]]
   ; CHECK-CVT-NEXT:   $r0 = COPY [[COPY8]]
   ; CHECK-CVT-NEXT:   MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
@@ -148,9 +148,9 @@ define half @normal_fadd_sequence(half %x, half %y, half %z) {
   ; CHECK-FP16-NEXT:   [[COPY2:%[0-9]+]]:rgpr = COPY $r0
   ; CHECK-FP16-NEXT:   [[VMOVHR:%[0-9]+]]:hpr = VMOVHR [[COPY1]], 14, $noreg
   ; CHECK-FP16-NEXT:   [[VMOVHR1:%[0-9]+]]:hpr = VMOVHR [[COPY2]], 14, $noreg
-  ; CHECK-FP16-NEXT:   [[VADDH:%[0-9]+]]:hpr = VADDH killed [[VMOVHR1]], killed [[VMOVHR]], 14, $noreg
+  ; CHECK-FP16-NEXT:   [[VADDH:%[0-9]+]]:hpr = nofpexcept VADDH killed [[VMOVHR1]], killed [[VMOVHR]], 14, $noreg, implicit $fpscr
   ; CHECK-FP16-NEXT:   [[VMOVHR2:%[0-9]+]]:hpr = VMOVHR [[COPY]], 14, $noreg
-  ; CHECK-FP16-NEXT:   [[VADDH1:%[0-9]+]]:hpr = VADDH killed [[VADDH]], killed [[VMOVHR2]], 14, $noreg
+  ; CHECK-FP16-NEXT:   [[VADDH1:%[0-9]+]]:hpr = nofpexcept VADDH killed [[VADDH]], killed [[VMOVHR2]], 14, $noreg, implicit $fpscr
   ; CHECK-FP16-NEXT:   $r0 = COPY [[VADDH1]]
   ; CHECK-FP16-NEXT:   MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
 entry:
@@ -169,14 +169,14 @@ define half @nnan_ninf_contract_fadd_sequence(half %x, half %y, half %z) {
   ; CHECK-CVT-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $r0
   ; CHECK-CVT-NEXT:   [[COPY3:%[0-9]+]]:spr = COPY [[COPY2]]
   ; CHECK-CVT-NEXT:   [[COPY4:%[0-9]+]]:spr = COPY [[COPY1]]
-  ; CHECK-CVT-NEXT:   [[VCVTBHS:%[0-9]+]]:spr = nnan ninf contract VCVTBHS killed [[COPY4]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[VCVTBHS1:%[0-9]+]]:spr = nnan ninf contract VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf contract VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBHS:%[0-9]+]]:spr = nnan ninf contract nofpexcept VCVTBHS killed [[COPY4]], 14 /* CC::al */, $noreg, implicit $fpscr
+  ; CHECK-CVT-NEXT:   [[VCVTBHS1:%[0-9]+]]:spr = nnan ninf contract nofpexcept VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg, implicit $fpscr
+  ; CHECK-CVT-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf contract nofpexcept VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[COPY5:%[0-9]+]]:spr = COPY [[COPY]]
-  ; CHECK-CVT-NEXT:   [[VCVTBHS2:%[0-9]+]]:spr = nnan ninf contract VCVTBHS killed [[COPY5]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[VADDS1:%[0-9]+]]:spr = nnan ninf contract VADDS killed [[VADDS]], killed [[VCVTBHS2]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBHS2:%[0-9]+]]:spr = nnan ninf contract nofpexcept VCVTBHS killed [[COPY5]], 14 /* CC::al */, $noreg, implicit $fpscr
+  ; CHECK-CVT-NEXT:   [[VADDS1:%[0-9]+]]:spr = nnan ninf contract nofpexcept VADDS killed [[VADDS]], killed [[VCVTBHS2]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[DEF:%[0-9]+]]:spr = IMPLICIT_DEF
-  ; CHECK-CVT-NEXT:   [[VCVTBSH:%[0-9]+]]:spr = VCVTBSH [[DEF]], killed [[VADDS1]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBSH:%[0-9]+]]:spr = nofpexcept VCVTBSH [[DEF]], killed [[VADDS1]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[COPY6:%[0-9]+]]:gpr = COPY killed [[VCVTBSH]]
   ; CHECK-CVT-NEXT:   $r0 = COPY [[COPY6]]
   ; CHECK-CVT-NEXT:   MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
@@ -190,9 +190,9 @@ define half @nnan_ninf_contract_fadd_sequence(half %x, half %y, half %z) {
   ; CHECK-FP16-NEXT:   [[COPY2:%[0-9]+]]:rgpr = COPY $r0
   ; CHECK-FP16-NEXT:   [[VMOVHR:%[0-9]+]]:hpr = VMOVHR [[COPY1]], 14, $noreg
   ; CHECK-FP16-NEXT:   [[VMOVHR1:%[0-9]+]]:hpr = VMOVHR [[COPY2]], 14, $noreg
-  ; CHECK-FP16-NEXT:   [[VADDH:%[0-9]+]]:hpr = nnan ninf contract VADDH killed [[VMOVHR1]], killed [[VMOVHR]], 14, $noreg
+  ; CHECK-FP16-NEXT:   [[VADDH:%[0-9]+]]:hpr = nnan ninf contract nofpexcept VADDH killed [[VMOVHR1]], killed [[VMOVHR]], 14, $noreg, implicit $fpscr
   ; CHECK-FP16-NEXT:   [[VMOVHR2:%[0-9]+]]:hpr = VMOVHR [[COPY]], 14, $noreg
-  ; CHECK-FP16-NEXT:   [[VADDH1:%[0-9]+]]:hpr = nnan ninf contract VADDH killed [[VADDH]], killed [[VMOVHR2]], 14, $noreg
+  ; CHECK-FP16-NEXT:   [[VADDH1:%[0-9]+]]:hpr = nnan ninf contract nofpexcept VADDH killed [[VADDH]], killed [[VMOVHR2]], 14, $noreg, implicit $fpscr
   ; CHECK-FP16-NEXT:   $r0 = COPY [[VADDH1]]
   ; CHECK-FP16-NEXT:   MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
 entry:
@@ -211,19 +211,19 @@ define half @ninf_fadd_sequence(half %x, half %y, half %z) {
   ; CHECK-CVT-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $r0
   ; CHECK-CVT-NEXT:   [[COPY3:%[0-9]+]]:spr = COPY [[COPY2]]
   ; CHECK-CVT-NEXT:   [[COPY4:%[0-9]+]]:spr = COPY [[COPY1]]
-  ; CHECK-CVT-NEXT:   [[VCVTBHS:%[0-9]+]]:spr = ninf VCVTBHS killed [[COPY4]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[VCVTBHS1:%[0-9]+]]:spr = ninf VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[VADDS:%[0-9]+]]:spr = ninf VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBHS:%[0-9]+]]:spr = ninf nofpexcept VCVTBHS killed [[COPY4]], 14 /* CC::al */, $noreg, implicit $fpscr
+  ; CHECK-CVT-NEXT:   [[VCVTBHS1:%[0-9]+]]:spr = ninf nofpexcept VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg, implicit $fpscr
+  ; CHECK-CVT-NEXT:   [[VADDS:%[0-9]+]]:spr = ninf nofpexcept VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[COPY5:%[0-9]+]]:spr = COPY [[COPY]]
-  ; CHECK-CVT-NEXT:   [[VCVTBHS2:%[0-9]+]]:spr = ninf VCVTBHS killed [[COPY5]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBHS2:%[0-9]+]]:spr = ninf nofpexcept VCVTBHS killed [[COPY5]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[DEF:%[0-9]+]]:spr = IMPLICIT_DEF
-  ; CHECK-CVT-NEXT:   [[VCVTBSH:%[0-9]+]]:spr = VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBSH:%[0-9]+]]:spr = nofpexcept VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[COPY6:%[0-9]+]]:gpr = COPY killed [[VCVTBSH]]
   ; CHECK-CVT-NEXT:   [[COPY7:%[0-9]+]]:spr = COPY killed [[COPY6]]
-  ; CHECK-CVT-NEXT:   [[VCVTBHS3:%[0-9]+]]:spr = ninf VCVTBHS killed [[COPY7]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[VADDS1:%[0-9]+]]:spr = ninf VADDS killed [[VCVTBHS3]], killed [[VCVTBHS2]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBHS3:%[0-9]+]]:spr = ninf nofpexcept VCVTBHS killed [[COPY7]], 14 /* CC::al */, $noreg, implicit $fpscr
+  ; CHECK-CVT-NEXT:   [[VADDS1:%[0-9]+]]:spr = ninf nofpexcept VADDS killed [[VCVTBHS3]], killed [[VCVTBHS2]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[DEF1:%[0-9]+]]:spr = IMPLICIT_DEF
-  ; CHECK-CVT-NEXT:   [[VCVTBSH1:%[0-9]+]]:spr = VCVTBSH [[DEF1]], killed [[VADDS1]], 14 /* CC::al */, $noreg
+  ; CHECK-CVT-NEXT:   [[VCVTBSH1:%[0-9]+]]:spr = nofpexcept VCVTBSH [[DEF1]], killed [[VADDS1]], 14 /* CC::al */, $noreg, implicit $fpscr
   ; CHECK-CVT-NEXT:   [[COPY8:%[0-9]+]]:gpr = COPY killed [[VCVTBSH1]]
   ; CHECK-CVT-NEXT:   $r0 = COPY [[COPY8]]
   ; CHECK-CVT-NEXT:   MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
@@ -237,9 +237,9 @@ define half @ninf_fadd_sequence(half %x, half %y, half %z) {
   ; CHECK-FP16-NEXT:   [[COPY2:%[0-9]+]]:rgpr = COPY $r0
   ; CHECK-FP16-NEXT:   [[VMOVHR:%[0-9]+]]:hpr = VMOVHR [[COPY1]], 14, $noreg
   ; CHECK-FP16-NEXT:   [[VMOVHR1:%[0-9]+]]:hpr = VMOVHR [[COPY2]], 14, $noreg
-  ; CHECK-FP16-NEXT:   [[VADDH:%[0-9]+]]:hpr = ninf VADDH killed [[VMOVHR1]], killed [[VMOVHR]], 14, $noreg
+  ; CHECK-FP16-NEXT:   [[VADDH:%[0-9]+]]:hpr = ninf nofpexcept VADDH killed [[VMOVHR1]], killed [[VMOVHR]], 14, $noreg, implicit $fpscr
   ; CHECK-FP16-NEXT:   [[VMOVHR2:%[0-9]+]]:hpr = VMOVHR [[COPY]], 14, $noreg
-  ; CHECK-FP16-NEXT:   [[VADDH1:%[0-9]+]]:hpr = ninf VADDH killed [[VADDH]], killed [[VMOVHR2]], 14, $noreg
+  ; CHECK-FP16-NEXT:   [[VADDH1:%[0-9]+]]:hpr = ninf nofpexcept VADDH killed [[VADDH]], killed [[VMOVHR2]], 14, $noreg, implicit $fpscr
   ; CHECK-FP16-NEXT:   $r0 = COPY [[VADDH1]]
   ; CHECK-FP16-NEXT:   MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
 entry:
diff --git a/llvm/test/CodeGen/ARM/ipra-reg-usage.ll b/llvm/test/CodeGen/ARM/ipra-reg-usage.ll
index c92839020f832..90142cbf6bff9 100644
--- a/llvm/test/CodeGen/ARM/ipra-reg-usage.ll
+++ b/llvm/test/CodeGen/ARM/ipra-reg-usage.ll
@@ -6,7 +6,7 @@ target triple = "armv7-eabi"
 
 declare void @bar1()
 define void @foo()#0 {
-; CHECK: foo Clobbered Registers: $apsr $apsr_nzcv $cpsr $fpcxtns $fpcxts $fpexc $fpinst $fpscr $fpscr_nzcv $fpscr_nzcvqc $fpsid $itstate $pc $ra_auth_code $sp $spsr $vpr $zr $d0 $d1 $d2 $d3 $d4 $d5 $d6 $d7 $d16 $d17 $d18 $d19 $d20 $d21 $d22 $d23 $d24 $d25 $d26 $d27 $d28 $d29 $d30 $d31 $fpinst2 $mvfr0 $mvfr1 $mvfr2 $p0 $q0 $q1 $q2 $q3 $q8 $q9 $q10 $q11 $q12 $q13 $q14 $q15 $r0 $r1 $r2 $r3 $r12 $s0 $s1 $s2 $s3 $s4 $s5 $s6 $s7 $s8 $s9 $s10 $s11 $s12 $s13 $s14 $s15 $d0_d2 $d1_d3 $d2_d4 $d3_d5 $d4_d6 $d5_d7 $d6_d8 $d7_d9 $d14_d16 $d15_d17 $d16_d18 $d17_d19 $d18_d20 $d19_d21 $d20_d22 $d21_d23 $d22_d24 $d23_d25 $d24_d26 $d25_d27 $d26_d28 $d27_d29 $d28_d30 $d29_d31 $q0_q1 $q1_q2 $q2_q3 $q3_q4 $q7_q8 $q8_q9 $q9_q10 $q10_q11 $q11_q12 $q12_q13 $q13_q14 $q14_q15 $q0_q1_q2_q3 $q1_q2_q3_q4 $q2_q3_q4_q5 $q3_q4_q5_q6 $q5_q6_q7_q8 $q6_q7_q8_q9 $q7_q8_q9_q10 $q8_q9_q10_q11 $q9_q10_q11_q12 $q10_q11_q12_q13 $q11_q12_q13_q14 $q12_q13_q14_q15 $r0_r1 $r2_r3 $r12_sp $d0_d1_d2 $d1_d2_d3 $d2_d3_d4 $d3_d4_d5 $d4_d5_d6 $d5_d6_d7 $d6_d7_d8 $d7_d8_d9 $d14_d15_d16 $d15_d16_d17 $d16_d17_d18 $d17_d18_d19 $d18_d19_d20 $d19_d20_d21 $d20_d21_d22 $d21_d22_d23 $d22_d23_d24 $d23_d24_d25 $d24_d25_d26 $d25_d26_d27 $d26_d27_d28 $d27_d28_d29 $d28_d29_d30 $d29_d30_d31 $d0_d2_d4 $d1_d3_d5 $d2_d4_d6 $d3_d5_d7 $d4_d6_d8 $d5_d7_d9 $d6_d8_d10 $d7_d9_d11 $d12_d14_d16 $d13_d15_d17 $d14_d16_d18 $d15_d17_d19 $d16_d18_d20 $d17_d19_d21 $d18_d20_d22 $d19_d21_d23 $d20_d22_d24 $d21_d23_d25 $d22_d24_d26 $d23_d25_d27 $d24_d26_d28 $d25_d27_d29 $d26_d28_d30 $d27_d29_d31 $d0_d2_d4_d6 $d1_d3_d5_d7 $d2_d4_d6_d8 $d3_d5_d7_d9 $d4_d6_d8_d10 $d5_d7_d9_d11 $d6_d8_d10_d12 $d7_d9_d11_d13 $d10_d12_d14_d16 $d11_d13_d15_d17 $d12_d14_d16_d18 $d13_d15_d17_d19 $d14_d16_d18_d20 $d15_d17_d19_d21 $d16_d18_d20_d22 $d17_d19_d21_d23 $d18_d20_d22_d24 $d19_d21_d23_d25 $d20_d22_d24_d26 $d21_d23_d25_d27 $d22_d24_d26_d28 $d23_d25_d27_d29 $d24_d26_d28_d30 $d25_d27_d29_d31 $d1_d2 $d3_d4 $d5_d6 $d7_d8 $d15_d16 $d17_d18 $d19_d20 $d21_d22 $d23_d24 $d25_d26 $d27_d28 $d29_d30 $d1_d2_d3_d4 $d3_d4_d5_d6 $d5_d6_d7_d8 $d7_d8_d9_d10 $d13_d14_d15_d16 $d15_d16_d17_d18 $d17_d18_d19_d20 $d19_d20_d21_d22 $d21_d22_d23_d24 $d23_d24_d25_d26 $d25_d26_d27_d28 $d27_d28_d29_d30
+; CHECK: foo Clobbered Registers: $apsr $apsr_nzcv $cpsr $fpcxtns $fpcxts $fpexc $fpinst $fpscr $fpscr_nzcv $fpscr_nzcvqc $fpscr_rm $fpsid $itstate $pc $ra_auth_code $sp $spsr $vpr $zr $d0 $d1 $d2 $d3 $d4 $d5 $d6 $d7 $d16 $d17 $d18 $d19 $d20 $d21 $d22 $d23 $d24 $d25 $d26 $d27 $d28 $d29 $d30 $d31 $fpinst2 $mvfr0 $mvfr1 $mvfr2 $p0 $q0 $q1 $q2 $q3 $q8 $q9 $q10 $q11 $q12 $q13 $q14 $q15 $r0 $r1 $r2 $r3 $r12 $s0 $s1 $s2 $s3 $s4 $s5 $s6 $s7 $s8 $s9 $s10 $s11 $s12 $s13 $s14 $s15 $d0_d2 $d1_d3 $d2_d4 $d3_d5 $d4_d6 $d5_d7 $d6_d8 $d7_d9 $d14_d16 $d15_d17 $d16_d18 $d17_d19 $d18_d20 $d19_d21 $d20_d22 $d21_d23 $d22_d24 $d23_d25 $d24_d26 $d25_d27 $d26_d28 $d27_d29 $d28_d30 $d29_d31 $q0_q1 $q1_q2 $q2_q3 $q3_q4 $q7_q8 $q8_q9 $q9_q10 $q10_q11 $q11_q12 $q12_q13 $q13_q14 $q14_q15 $q0_q1_q2_q3 $q1_q2_q3_q4 $q2_q3_q4_q5 $q3_q4_q5_q6 $q5_q6_q7_q8 $q6_q7_q8_q9 $q7_q8_q9_q10 $q8_q9_q10_q11 $q9_q10_q11_q12 $q10_q11_q12_q13 $q11_q12_q13_q14 $q12_q13_q14_q15 $r0_r1 $r2_r3 $r12_sp $d0_d1_d2 $d1_d2_d3 $d2_d3_d4 $d3_d4_d5 $d4_d5_d6 $d5_d6_d7 $d6_d7_d8 $d7_d8_d9 $d14_d15_d16 $d15_d16_d17 $d16_d17_d18 $d17_d18_d19 $d18_d19_d20 $d19_d20_d21 $d20_d21_d22 $d21_d22_d23 $d22_d23_d24 $d23_d24_d25 $d24_d25_d26 $d25_d26_d27 $d26_d27_d28 $d27_d28_d29 $d28_d29_d30 $d29_d30_d31 $d0_d2_d4 $d1_d3_d5 $d2_d4_d6 $d3_d5_d7 $d4_d6_d8 $d5_d7_d9 $d6_d8_d10 $d7_d9_d11 $d12_d14_d16 $d13_d15_d17 $d14_d16_d18 $d15_d17_d19 $d16_d18_d20 $d17_d19_d21 $d18_d20_d22 $d19_d21_d23 $d20_d22_d24 $d21_d23_d25 $d22_d24_d26 $d23_d25_d27 $d24_d26_d28 $d25_d27_d29 $d26_d28_d30 $d27_d29_d31 $d0_d2_d4_d6 $d1_d3_d5_d7 $d2_d4_d6_d8 $d3_d5_d7_d9 $d4_d6_d8_d10 $d5_d7_d9_d11 $d6_d8_d10_d12 $d7_d9_d11_d13 $d10_d12_d14_d16 $d11_d13_d15_d17 $d12_d14_d16_d18 $d13_d15_d17_d19 $d14_d16_d18_d20 $d15_d17_d19_d21 $d16_d18_d20_d22 $d17_d19_d21_d23 $d18_d20_d22_d24 $d19_d21_d23_d25 $d20_d22_d24_d26 $d21_d23_d25_d27 $d22_d24_d26_d28 $d23_d25_d27_d29 $d24_d26_d28_d30 $d25_d27_d29_d31 $d1_d2 $d3_d4 $d5_d6 $d7_d8 $d15_d16 $d17_d18 $d19_d20 $d21_d22 $d23_d24 $d25_d26 $d27_d28 $d29_d30 $d1_d2_d3_d4 $d3_d4_d5_d6 $d5_d6_d7_d8 $d7_d8_d9_d10 $d13_d14_d15_d16 $d15_d16_d17_d18 $d17_d18_d19_d20 $d19_d20_d21_d22 $d21_d22_d23_d24 $d23_d24_d25_d26 $d25_d26_d27_d28 $d27_d28_d29_d30
   call void @bar1()
   call void @bar2()
   ret void
diff --git a/llvm/test/CodeGen/ARM/misched-prevent-erase-history-of-subunits.mir b/llvm/test/CodeGen/ARM/misched-prevent-erase-history-of-subunits.mir
index 46f3e4b08559a..17d66196505a8 100644
--- a/llvm/test/CodeGen/ARM/misched-prevent-erase-history-of-subunits.mir
+++ b/llvm/test/CodeGen/ARM/misched-prevent-erase-history-of-subunits.mir
@@ -14,7 +14,7 @@
 # CHECK: SU(1):   %1:dpr = VABSD %0:dpr, 14, $noreg
 # CHECK: SU(2):   %2:dpr = VLDRD %const.0, 0, 14, $noreg :: (load (s64) from constant-pool)
 # CHECK: SU(4):   %3:rgpr = t2MOVi 0, 14, $noreg, $noreg
-# CHECK: SU(3):   VCMPD %1:dpr, %2:dpr, 14, $noreg, implicit-def $fpscr_nzcv
+# CHECK: SU(3):   VCMPD %1:dpr, %2:dpr, 14, $noreg, implicit-def $fpscr_nzcv, implicit $fpscr_rm
 # CHECK: SU(5):   $r0 = COPY %3:rgpr
 ---
 name:            test
@@ -29,7 +29,7 @@ body:             |
     %0:dpr = COPY $d0
     %1:dpr = VABSD %0, 14 /* CC::al */, $noreg
     %2:dpr = VLDRD %const.0, 0, 14 /* CC::al */, $noreg :: (load (s64) from constant-pool)
-    VCMPD %1, %2, 14 /* CC::al */, $noreg, implicit-def $fpscr_nzcv
+    VCMPD %1, %2, 14 /* CC::al */, $noreg, implicit-def $fpscr_nzcv, implicit $fpscr_rm
     %4:rgpr = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
     $r0 = COPY %4
     tBX_RET 14 /* CC::al */, $noreg, implicit killed $r0
diff --git a/llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir b/llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir
index 8fa9337eae6cd..03cb8e37844c2 100644
--- a/llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir
+++ b/llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir
@@ -60,9 +60,9 @@ body:             |
     $sp = t2STMDB_UPD $sp, 14, $noreg, $r4, killed $r5, killed $r6, killed $r7, killed $r8, killed $r9, killed $r10, killed $r11
     $r4 = t2BICri $r4, 1, 14, $noreg, $noreg
     $sp = tSUBspi $sp, 34, 14, $noreg
-    VLSTM $sp, 14 /* CC::al */, $noreg, 0, implicit-def $vpr, implicit-def $fpscr, implicit-def $fpscr_nzcv, implicit undef $vpr, implicit undef $fpscr, implicit undef $fpscr_nzcv, implicit undef $d0, implicit undef $d1, implicit undef $d2, implicit undef $d3, implicit undef $d4, implicit undef $d5, implicit undef $d6, implicit undef $d7, implicit $d8, implicit $d9, implicit $d10, implicit $d11, implicit $d12, implicit $d13, implicit $d14, implicit $d15
+    VLSTM $sp, 14 /* CC::al */, $noreg, 0, implicit-def $vpr, implicit-def $fpscr, implicit-def $fpscr_nzcv, implicit-def $fpscr_rm, implicit undef $vpr, implicit undef $fpscr, implicit undef $fpscr_nzcv, implicit undef $fpscr_rm, implicit undef $d0, implicit undef $d1, implicit undef $d2, implicit undef $d3, implicit undef $d4, implicit undef $d5, implicit undef $d6, implicit undef $d7, implicit $d8, implicit $d9, implicit $d10, implicit $d11, implicit $d12, implicit $d13, implicit $d14, implicit $d15
     tBLXNSr 14, $noreg, killed $r4, csr_aapcs, implicit-def $lr, implicit $sp, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $q0, implicit-def $q1, implicit-def $q2, implicit-def $q3, implicit-def $q4, implicit-def $q5, implicit-def $q6, implicit-def $q7
-    VLLDM $sp, 14 /* CC::al */, $noreg, 0, implicit-def $vpr, implicit-def $fpscr, implicit-def $fpscr_nzcv, implicit-def $d0, implicit-def $d1, implicit-def $d2, implicit-def $d3, implicit-def $d4, implicit-def $d5, implicit-def $d6, implicit-def $d7, implicit-def $d8, implicit-def $d9, implicit-def $d10, implicit-def $d11, implicit-def $d12, implicit-def $d13, implicit-def $d14, implicit-def $d15
+    VLLDM $sp, 14 /* CC::al */, $noreg, 0, implicit-def $vpr, implicit-def $fpscr, implicit-def $fpscr_nzcv, implicit-def $fpscr_rm, implicit-def $d0, implicit-def $d1, implicit-def $d2, implicit-def $d3, implicit-def $d4, implicit-def $d5, implicit-def $d6, implicit-def $d7, implicit-def $d8, implicit-def $d9, implicit-def $d10, implicit-def $d11, implicit-def $d12, implicit-def $d13, implicit-def $d14, implicit-def $d15
     $sp = tADDspi $sp, 34, 14, $noreg
     $sp = t2LDMIA_UPD $sp, 14, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11
     $sp = t2LDMIA_RET $sp, 14, $noreg, def $r4, def $pc
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/emptyblock.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/emptyblock.mir
index 021cb4c89f301..8abe5c58e6875 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/emptyblock.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/emptyblock.mir
@@ -8,7 +8,7 @@
 
 --- |
   %struct.DCT_InstanceTypeDef = type { ptr, i32, i32 }
-  
+
   ; Function Attrs: nofree nounwind
   define hidden arm_aapcs_vfpcc void @test(ptr nocapture readonly %S, ptr %pIn, ptr nocapture %pOut) {
   entry:
@@ -41,7 +41,7 @@
     %13 = call i32 @llvm.loop.decrement.reg.i32(i32 %8, i32 1)
     %14 = icmp ne i32 %13, 0
     br i1 %14, label %do.body, label %do.end
-  
+
   do.end:                                           ; preds = %do.body
     %15 = extractelement <4 x float> %11, i32 0
     %16 = extractelement <4 x float> %11, i32 1
@@ -56,7 +56,7 @@
     %sub4 = add i32 %1, -4
     %cmp5201 = icmp ugt i32 %sub4, 1
     br i1 %cmp5201, label %for.body.lr.ph, label %for.cond54.preheader
-  
+
   for.body.lr.ph:                                   ; preds = %do.end
     %scevgep = getelementptr float, ptr %pIn, i32 4
     %20 = add i32 %0, 4
@@ -161,7 +161,7 @@
     %63 = call i32 @llvm.loop.decrement.reg.i32(i32 %53, i32 1)
     %64 = icmp ne i32 %63, 0
     br i1 %64, label %do.body24, label %do.end33
-  
+
   do.end33:                                         ; preds = %do.body24
     %65 = bitcast ptr %lsr.iv27 to ptr
     %66 = bitcast ptr %lsr.iv20 to ptr
@@ -254,7 +254,7 @@
     %inc = add nuw i32 %k.1200, 1
     %exitcond.not = icmp eq i32 %inc, %1
     br i1 %exitcond.not, label %for.end72, label %for.body56
-  
+
   for.end72:                                        ; preds = %do.end66, %for.cond54.preheader
     ret void
   }
@@ -428,28 +428,28 @@ body:             |
     renamable $lr = t2LoopDec killed renamable $lr, 1
     t2LoopEnd renamable $lr, %bb.1, implicit-def dead $cpsr
     tB %bb.2, 14 /* CC::al */, $noreg
-  
+
   bb.2.do.end:
     successors: %bb.3(0x40000000), %bb.7(0x40000000)
     liveins: $q0, $r2, $r3, $r4, $r5, $r11
-  
-    renamable $s4 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s0, renamable $s1, 14 /* CC::al */, $noreg
+
+    renamable $s4 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s0, renamable $s1, 14 /* CC::al */, $noreg, implicit $fpscr_rm
     renamable $r0, dead $cpsr = tSUBi3 renamable $r3, 4, 14 /* CC::al */, $noreg
     tSTRspi killed renamable $r3, $sp, 1, 14 /* CC::al */, $noreg :: (store (s32) into %stack.8)
-    renamable $s4 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, renamable $s2, 14 /* CC::al */, $noreg
+    renamable $s4 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, renamable $s2, 14 /* CC::al */, $noreg, implicit $fpscr_rm
     tSTRspi renamable $r0, $sp, 8, 14 /* CC::al */, $noreg :: (store (s32) into %stack.1)
-    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, killed renamable $s3, 14 /* CC::al */, $noreg, implicit $q0
+    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, killed renamable $s3, 14 /* CC::al */, $noreg, implicit $q0, implicit $fpscr_rm
     renamable $s2 = VLDRS renamable $r11, 0, 14 /* CC::al */, $noreg :: (load (s32) from %ir.2)
     tCMPi8 killed renamable $r0, 2, 14 /* CC::al */, $noreg, implicit-def $cpsr
     renamable $r0 = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
-    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VMULS killed renamable $s2, killed renamable $s0, 14 /* CC::al */, $noreg
+    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VMULS killed renamable $s2, killed renamable $s0, 14 /* CC::al */, $noreg, implicit $fpscr_rm
     VSTRS killed renamable $s0, renamable $r2, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.pOut)
     t2Bcc %bb.7, 3 /* CC::lo */, killed $cpsr
-  
+
   bb.3.for.body.lr.ph:
     successors: %bb.4(0x80000000)
     liveins: $r0, $r2, $r4, $r5, $r11
-  
+
     renamable $r6 = t2ADDri renamable $r5, 16, 14 /* CC::al */, $noreg, $noreg
     renamable $r1, dead $cpsr = tSUBi3 renamable $r4, 4, 14 /* CC::al */, $noreg
     tSTRspi killed renamable $r6, $sp, 4, 14 /* CC::al */, $noreg :: (store (s32) into %stack.5)
@@ -523,26 +523,26 @@ body:             |
     renamable $lr = t2LoopDec killed renamable $lr, 1
     t2LoopEnd renamable $lr, %bb.5, implicit-def dead $cpsr
     tB %bb.6, 14 /* CC::al */, $noreg
-  
+
   bb.6.do.end33:
     successors: %bb.4(0x7c000000), %bb.7(0x04000000)
     liveins: $q0, $q1, $q2, $q3, $r0, $r1, $r2, $r6, $r8, $r9, $r10, $r12
-  
-    renamable $s16 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s12, renamable $s13, 14 /* CC::al */, $noreg
-    renamable $s18 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s8, renamable $s9, 14 /* CC::al */, $noreg
-    renamable $s16 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s16, renamable $s14, 14 /* CC::al */, $noreg
-    renamable $s18 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s18, renamable $s10, 14 /* CC::al */, $noreg
-    renamable $s12 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s16, killed renamable $s15, 14 /* CC::al */, $noreg, implicit $q3
-    renamable $s8 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s18, killed renamable $s11, 14 /* CC::al */, $noreg, implicit $q2
-    renamable $s10 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s4, renamable $s5, 14 /* CC::al */, $noreg
-    renamable $s14 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s0, renamable $s1, 14 /* CC::al */, $noreg
+
+    renamable $s16 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s12, renamable $s13, 14 /* CC::al */, $noreg, implicit $fpscr_rm
+    renamable $s18 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s8, renamable $s9, 14 /* CC::al */, $noreg, implicit $fpscr_rm
+    renamable $s16 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s16, renamable $s14, 14 /* CC::al */, $noreg, implicit $fpscr_rm
+    renamable $s18 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s18, renamable $s10, 14 /* CC::al */, $noreg, implicit $fpscr_rm
+    renamable $s12 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s16, killed renamable $s15, 14 /* CC::al */, $noreg, implicit $q3, implicit $fpscr_rm
+    renamable $s8 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s18, killed renamable $s11, 14 /* CC::al */, $noreg, implicit $q2, implicit $fpscr_rm
+    renamable $s10 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s4, renamable $s5, 14 /* CC::al */, $noreg, implicit $fpscr_rm
+    renamable $s14 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s0, renamable $s1, 14 /* CC::al */, $noreg, implicit $fpscr_rm
     renamable $r7 = tLDRspi $sp, 9, 14 /* CC::al */, $noreg :: (load (s32) from %stack.0)
-    renamable $s10 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s10, renamable $s6, 14 /* CC::al */, $noreg
-    renamable $s14 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s14, renamable $s2, 14 /* CC::al */, $noreg
+    renamable $s10 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s10, renamable $s6, 14 /* CC::al */, $noreg, implicit $fpscr_rm
+    renamable $s14 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s14, renamable $s2, 14 /* CC::al */, $noreg, implicit $fpscr_rm
     renamable $r3 = t2ADDrs renamable $r2, renamable $r0, 18, 14 /* CC::al */, $noreg, $noreg
     renamable $r7 = t2ADDrs renamable $r2, killed renamable $r7, 18, 14 /* CC::al */, $noreg, $noreg
-    renamable $s4 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s10, killed renamable $s7, 14 /* CC::al */, $noreg, implicit $q1
-    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s14, killed renamable $s3, 14 /* CC::al */, $noreg, implicit $q0
+    renamable $s4 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s10, killed renamable $s7, 14 /* CC::al */, $noreg, implicit $q1, implicit $fpscr_rm
+    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s14, killed renamable $s3, 14 /* CC::al */, $noreg, implicit $q0, implicit $fpscr_rm
     VSTRS killed renamable $s12, killed renamable $r3, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.arrayidx37)
     VSTRS killed renamable $s8, killed renamable $r7, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.arrayidx42)
     renamable $r3 = t2ADDrs renamable $r2, killed renamable $r8, 18, 14 /* CC::al */, $noreg, $noreg
@@ -597,7 +597,7 @@ body:             |
   bb.13:
     successors: %bb.10(0x80000000)
     liveins: $lr, $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r11, $r12
-  
+
   bb.10.do.body59 (align 4):
     successors: %bb.10(0x7c000000), %bb.11(0x04000000)
     liveins: $lr, $q0, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r7, $r11, $r12
@@ -611,20 +611,20 @@ body:             |
     renamable $lr = t2LoopDec killed renamable $lr, 1
     t2LoopEnd renamable $lr, %bb.10, implicit-def dead $cpsr
     tB %bb.11, 14 /* CC::al */, $noreg
-  
+
   bb.11.do.end66:
     successors: %bb.12(0x04000000), %bb.9(0x7c000000)
     liveins: $q0, $r0, $r2, $r3, $r4, $r5, $r11, $r12
-  
-    renamable $s4 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s0, renamable $s1, 14 /* CC::al */, $noreg
+
+    renamable $s4 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s0, renamable $s1, 14 /* CC::al */, $noreg, implicit $fpscr_rm
     renamable $r1 = t2ADDrs renamable $r2, renamable $r0, 18, 14 /* CC::al */, $noreg, $noreg
-    renamable $s4 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, renamable $s2, 14 /* CC::al */, $noreg
+    renamable $s4 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, renamable $s2, 14 /* CC::al */, $noreg, implicit $fpscr_rm
     renamable $r0, dead $cpsr = nuw tADDi8 killed renamable $r0, 1, 14 /* CC::al */, $noreg
-    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, killed renamable $s3, 14 /* CC::al */, $noreg, implicit $q0
+    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, killed renamable $s3, 14 /* CC::al */, $noreg, implicit $q0, implicit $fpscr_rm
     tCMPhir renamable $r0, renamable $r12, 14 /* CC::al */, $noreg, implicit-def $cpsr
     VSTRS killed renamable $s0, killed renamable $r1, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.arrayidx70)
     tBcc %bb.9, 1 /* CC::ne */, killed $cpsr
-  
+
   bb.12.for.end72:
     $sp = frame-destroy tADDspi $sp, 10, 14 /* CC::al */, $noreg
     $sp = frame-destroy VLDMDIA_UPD $sp, 14 /* CC::al */, $noreg, def $d8, def $d9, def $d10, def $d11
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-mov.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-mov.mir
index 31e88ea49a1a0..85b826a775c93 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-mov.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-mov.mir
@@ -185,15 +185,15 @@ body:             |
     successors: %bb.5(0x80000000)
     liveins: $q0, $r0, $r1, $r2, $r4
 
-    renamable $s4 = nnan ninf nsz VADDS renamable $s0, renamable $s1, 14, $noreg
+    renamable $s4 = nnan ninf nsz VADDS renamable $s0, renamable $s1, 14, $noreg, implicit $fpscr_rm
     $lr = tMOVr $r4, 14, $noreg
     $r3 = tMOVr $r1, 14, $noreg
-    renamable $s4 = nnan ninf nsz VADDS renamable $s2, killed renamable $s4, 14, $noreg
-    renamable $s0 = nnan ninf nsz VADDS killed renamable $s3, killed renamable $s4, 14, $noreg, implicit $q0
+    renamable $s4 = nnan ninf nsz VADDS renamable $s2, killed renamable $s4, 14, $noreg, implicit $fpscr_rm
+    renamable $s0 = nnan ninf nsz VADDS killed renamable $s3, killed renamable $s4, 14, $noreg, implicit $q0, implicit $fpscr_rm
     $s2 = VMOVSR $r1, 14, $noreg
     renamable $s2 = VUITOS killed renamable $s2, 14, $noreg
     $lr = t2DoLoopStart killed $r4
-    renamable $s4 = nnan ninf nsz VDIVS killed renamable $s0, killed renamable $s2, 14, $noreg
+    renamable $s4 = nnan ninf nsz VDIVS killed renamable $s0, killed renamable $s2, 14, $noreg, implicit $fpscr_rm
     renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
 
   bb.5:
@@ -215,13 +215,13 @@ body:             |
   bb.6:
     liveins: $q0, $r1, $r2
 
-    renamable $s4 = nnan ninf nsz VADDS renamable $s0, renamable $s1, 14, $noreg
+    renamable $s4 = nnan ninf nsz VADDS renamable $s0, renamable $s1, 14, $noreg, implicit $fpscr_rm
     renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 1, 14, $noreg
-    renamable $s4 = nnan ninf nsz VADDS renamable $s2, killed renamable $s4, 14, $noreg
-    renamable $s0 = nnan ninf nsz VADDS killed renamable $s3, killed renamable $s4, 14, $noreg, implicit $q0
+    renamable $s4 = nnan ninf nsz VADDS renamable $s2, killed renamable $s4, 14, $noreg, implicit $fpscr_rm
+    renamable $s0 = nnan ninf nsz VADDS killed renamable $s3, killed renamable $s4, 14, $noreg, implicit $q0, implicit $fpscr_rm
     $s2 = VMOVSR killed $r0, 14, $noreg
     renamable $s2 = VUITOS killed renamable $s2, 14, $noreg
-    renamable $s0 = nnan ninf nsz VDIVS killed renamable $s0, killed renamable $s2, 14, $noreg
+    renamable $s0 = nnan ninf nsz VDIVS killed renamable $s0, killed renamable $s2, 14, $noreg, implicit $fpscr_rm
     VSTRS killed renamable $s0, killed renamable $r2, 0, 14, $noreg
     tPOP_RET 14, $noreg, def $r4, def $pc
 
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/lstp-insertion-position.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/lstp-insertion-position.mir
index f5da7acb3e970..780831c8faeb7 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/lstp-insertion-position.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/lstp-insertion-position.mir
@@ -232,9 +232,9 @@ body:             |
   bb.3.middle.block:
     liveins: $q1
 
-    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s6, renamable $s7, 14 /* CC::al */, $noreg
-    renamable $s2 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, killed renamable $s5, 14 /* CC::al */, $noreg, implicit $q1
-    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s2, killed renamable $s0, 14 /* CC::al */, $noreg
+    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s6, renamable $s7, 14 /* CC::al */, $noreg, implicit $fpscr_rm
+    renamable $s2 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, killed renamable $s5, 14 /* CC::al */, $noreg, implicit $q1, implicit $fpscr_rm
+    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s2, killed renamable $s0, 14 /* CC::al */, $noreg, implicit $fpscr_rm
     $sp = frame-destroy t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r7, def $lr
     tBX_RET 14 /* CC::al */, $noreg, implicit killed $s0
 
@@ -376,9 +376,9 @@ body:             |
   bb.3.middle.block:
     liveins: $q1
 
-    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s6, renamable $s7, 14 /* CC::al */, $noreg
-    renamable $s2 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, killed renamable $s5, 14 /* CC::al */, $noreg, implicit $q1
-    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s2, killed renamable $s0, 14 /* CC::al */, $noreg
+    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s6, renamable $s7, 14 /* CC::al */, $noreg, implicit $fpscr_rm
+    renamable $s2 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, killed renamable $s5, 14 /* CC::al */, $noreg, implicit $q1, implicit $fpscr_rm
+    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s2, killed renamable $s0, 14 /* CC::al */, $noreg, implicit $fpscr_rm
     $sp = frame-destroy t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r7, def $lr
     tBX_RET 14 /* CC::al */, $noreg, implicit killed $s0
 
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dlstp.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dlstp.mir
index c331612faf31f..5dcd0a195a830 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dlstp.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dlstp.mir
@@ -240,10 +240,10 @@ body:             |
 
     $s4 = VMOVSR $r1, 14 /* CC::al */, $noreg
     $lr = tMOVr $r4, 14 /* CC::al */, $noreg
-    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s3, renamable $s3, 14 /* CC::al */, $noreg, implicit $q0
+    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s3, renamable $s3, 14 /* CC::al */, $noreg, implicit $q0, implicit $fpscr_rm
     $lr = t2DoLoopStart killed $r4
     renamable $s4 = VUITOS killed renamable $s4, 14 /* CC::al */, $noreg
-    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VDIVS killed renamable $s0, killed renamable $s4, 14 /* CC::al */, $noreg
+    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VDIVS killed renamable $s0, killed renamable $s4, 14 /* CC::al */, $noreg, implicit $fpscr_rm
     renamable $r3 = VMOVRS killed renamable $s0, 14 /* CC::al */, $noreg
     renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q0
     renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, $noreg, undef renamable $q1
@@ -267,10 +267,10 @@ body:             |
     liveins: $q0, $r1, $r2
 
     renamable $r0, dead $cpsr = tSUBi3 killed renamable $r1, 1, 14 /* CC::al */, $noreg
-    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s3, renamable $s3, 14 /* CC::al */, $noreg, implicit $q0
+    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s3, renamable $s3, 14 /* CC::al */, $noreg, implicit $q0, implicit $fpscr_rm
     $s2 = VMOVSR killed $r0, 14 /* CC::al */, $noreg
     renamable $s2 = VUITOS killed renamable $s2, 14 /* CC::al */, $noreg
-    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VDIVS killed renamable $s0, killed renamable $s2, 14 /* CC::al */, $noreg
+    renamable $s0 = nnan ninf nsz arcp contract afn reassoc VDIVS killed renamable $s0, killed renamable $s2, 14 /* CC::al */, $noreg, implicit $fpscr_rm
     VSTRS killed renamable $s0, killed renamable $r2, 0, 14 /* CC::al */, $noreg :: (store (s32) into %ir.pResult)
     frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
 
diff --git a/llvm/test/CodeGen/Thumb2/pipeliner-inlineasm.mir b/llvm/test/CodeGen/Thumb2/pipeliner-inlineasm.mir
index 522120559c8b0..d9d2f254909bb 100644
--- a/llvm/test/CodeGen/Thumb2/pipeliner-inlineasm.mir
+++ b/llvm/test/CodeGen/Thumb2/pipeliner-inlineasm.mir
@@ -96,7 +96,7 @@ body:             |
   ; CHECK-NEXT: bb.6.for.body:
   ; CHECK-NEXT:   successors: %bb.7(0x80000000), %bb.8(0x00000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[VMULS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS3]], %30, 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VMULS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS3]], %30, 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   [[t2ADDri2:%[0-9]+]]:rgpr = t2ADDri [[COPY7]], 4, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NEXT:   [[VLDRS4:%[0-9]+]]:spr = VLDRS [[COPY7]], 1, 14 /* CC::al */, $noreg :: (load unknown-size from %ir.scevgep7, align 4)
   ; CHECK-NEXT:   [[t2ADDri3:%[0-9]+]]:rgpr = t2ADDri [[COPY6]], 4, 14 /* CC::al */, $noreg, $noreg
@@ -119,13 +119,13 @@ body:             |
   ; CHECK-NEXT:   [[PHI4:%[0-9]+]]:spr = PHI [[VLDRS5]], %bb.6, %47, %bb.7
   ; CHECK-NEXT:   [[PHI5:%[0-9]+]]:spr = PHI %40, %bb.6, %55, %bb.7
   ; CHECK-NEXT:   [[PHI6:%[0-9]+]]:spr = PHI [[VMULS]], %bb.6, %45, %bb.7
-  ; CHECK-NEXT:   [[VMULS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[PHI4]], [[PHI5]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VMULS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[PHI4]], [[PHI5]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   [[t2SUBri4:%[0-9]+]]:rgpr = t2SUBri [[PHI2]], 1, 14 /* CC::al */, $noreg, def $cpsr
   ; CHECK-NEXT:   [[VLDRS6:%[0-9]+]]:spr = VLDRS [[PHI1]], 1, 14 /* CC::al */, $noreg :: (load unknown-size from %ir.scevgep3, align 4)
   ; CHECK-NEXT:   [[VLDRS7:%[0-9]+]]:spr = VLDRS [[PHI]], 1, 14 /* CC::al */, $noreg :: (load unknown-size from %ir.scevgep7, align 4)
   ; CHECK-NEXT:   [[t2ADDri4:%[0-9]+]]:rgpr = t2ADDri [[PHI]], 4, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NEXT:   [[t2ADDri5:%[0-9]+]]:rgpr = t2ADDri [[PHI1]], 4, 14 /* CC::al */, $noreg, $noreg
-  ; CHECK-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI6]], [[PHI3]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI6]], [[PHI3]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:gpr = COPY [[t2ADDri4]]
   ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:gpr = COPY [[t2ADDri5]]
   ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:gpr = COPY [[t2SUBri4]]
@@ -140,7 +140,7 @@ body:             |
   ; CHECK-NEXT:   [[PHI8:%[0-9]+]]:spr = PHI [[VLDRS5]], %bb.6, [[VLDRS6]], %bb.7
   ; CHECK-NEXT:   [[PHI9:%[0-9]+]]:spr = PHI %40, %bb.6, %55, %bb.7
   ; CHECK-NEXT:   [[PHI10:%[0-9]+]]:spr = PHI [[VMULS]], %bb.6, [[VMULS1]], %bb.7
-  ; CHECK-NEXT:   [[VADDS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI10]], [[PHI7]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VADDS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI10]], [[PHI7]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.9:
   ; CHECK-NEXT:   successors: %bb.4(0x80000000)
@@ -148,8 +148,8 @@ body:             |
   ; CHECK-NEXT:   [[PHI11:%[0-9]+]]:spr = PHI [[VLDRS1]], %bb.5, [[VADDS1]], %bb.8
   ; CHECK-NEXT:   [[PHI12:%[0-9]+]]:spr = PHI [[VLDRS3]], %bb.5, [[PHI8]], %bb.8
   ; CHECK-NEXT:   [[PHI13:%[0-9]+]]:spr = PHI %30, %bb.5, [[PHI9]], %bb.8
-  ; CHECK-NEXT:   [[VMULS2:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[PHI12]], [[PHI13]], 14 /* CC::al */, $noreg
-  ; CHECK-NEXT:   [[VADDS2:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[VMULS2]], [[PHI11]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VMULS2:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[PHI12]], [[PHI13]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
+  ; CHECK-NEXT:   [[VADDS2:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[VMULS2]], [[PHI11]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   t2B %bb.4, 14 /* CC::al */, $noreg
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.4.for.end:
@@ -194,8 +194,8 @@ body:             |
     %20:rgpr = t2ADDri %3, 4, 14 /* CC::al */, $noreg, $noreg
     %21:spr = VLDRS %3, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
     INLINEASM &nop, 0 /* attdialect */, 196618 /* regdef:SPR */, def %25, 2147483657 /* reguse tiedto:$0 */, %19(tied-def 3)
-    %22:spr = nnan ninf nsz arcp contract afn reassoc VMULS killed %21, killed %25, 14 /* CC::al */, $noreg
-    %6:spr = nnan ninf nsz arcp contract afn reassoc VADDS killed %22, %5, 14 /* CC::al */, $noreg
+    %22:spr = nnan ninf nsz arcp contract afn reassoc VMULS killed %21, killed %25, 14 /* CC::al */, $noreg, implicit $fpscr_rm
+    %6:spr = nnan ninf nsz arcp contract afn reassoc VADDS killed %22, %5, 14 /* CC::al */, $noreg, implicit $fpscr_rm
     %23:rgpr = t2SUBri %4, 1, 14 /* CC::al */, $noreg, def $cpsr
     %7:gpr = COPY %23
     %8:gpr = COPY %20
diff --git a/llvm/test/CodeGen/Thumb2/scavenge-lr.mir b/llvm/test/CodeGen/Thumb2/scavenge-lr.mir
index 5513bed542831..bfe55a5bc3cb4 100644
--- a/llvm/test/CodeGen/Thumb2/scavenge-lr.mir
+++ b/llvm/test/CodeGen/Thumb2/scavenge-lr.mir
@@ -147,10 +147,10 @@ body:             |
     $q5 = VLD1q64 $r3, 16, 14 /* CC::al */, $noreg :: (load (s128) from %ir.zzz..sroa_cast241, align 32)
     $q1 = VMLAfq killed $q1, $q5, killed $q8, 14 /* CC::al */, $noreg
     $s8 = VLDRS %const.0, 0, 14 /* CC::al */, $noreg :: (load (s32) from constant-pool)
-    $s3 = VDIVS $s8, $s7, 14 /* CC::al */, $noreg, implicit-def $q0
-    $s2 = VDIVS $s8, $s6, 14 /* CC::al */, $noreg, implicit killed $q0, implicit-def $q0
-    $s1 = VDIVS $s8, $s5, 14 /* CC::al */, $noreg, implicit killed $q0, implicit-def $q0
-    $s0 = VDIVS $s8, $s4, 14 /* CC::al */, $noreg, implicit killed $q1, implicit killed $q0, implicit-def $q0
+    $s3 = VDIVS $s8, $s7, 14 /* CC::al */, $noreg, implicit-def $q0, implicit $fpscr_rm
+    $s2 = VDIVS $s8, $s6, 14 /* CC::al */, $noreg, implicit killed $q0, implicit-def $q0, implicit $fpscr_rm
+    $s1 = VDIVS $s8, $s5, 14 /* CC::al */, $noreg, implicit killed $q0, implicit-def $q0, implicit $fpscr_rm
+    $s0 = VDIVS $s8, $s4, 14 /* CC::al */, $noreg, implicit killed $q1, implicit killed $q0, implicit-def $q0, implicit $fpscr_rm
     $r7 = t2SUBri $r0, 64, 14 /* CC::al */, $noreg, $noreg
     $q8 = VLD1q64 $r7, 16, 14 /* CC::al */, $noreg :: (load (s128) from %ir.yyy..sroa_cast244, align 32)
     VSTMQIA $q8, %stack.1, 14 /* CC::al */, $noreg :: (store (s128) into %stack.1)
@@ -185,10 +185,10 @@ body:             |
     $r3 = VST1q32wb_fixed killed $r3, 16, killed $q10, 14 /* CC::al */, $noreg :: (store (s128) into %ir.zzz..sroa_cast241, align 32)
     $q10 = VLD1q64 $r3, 16, 14 /* CC::al */, $noreg :: (load (s128) from %ir.zzz..sroa_cast241 + 16, basealign 32)
     $q1 = VMLAfq killed $q1, $q10, killed $q8, 14 /* CC::al */, $noreg
-    $s23 = VDIVS $s8, $s7, 14 /* CC::al */, $noreg, implicit-def $q5
-    $s22 = VDIVS $s8, $s6, 14 /* CC::al */, $noreg, implicit killed $q5, implicit-def $q5
-    $s21 = VDIVS $s8, $s5, 14 /* CC::al */, $noreg, implicit killed $q5, implicit-def $q5
-    $s20 = VDIVS killed $s8, $s4, 14 /* CC::al */, $noreg, implicit killed $q1, implicit killed $q5, implicit-def $q5
+    $s23 = VDIVS $s8, $s7, 14 /* CC::al */, $noreg, implicit-def $q5, implicit $fpscr_rm
+    $s22 = VDIVS $s8, $s6, 14 /* CC::al */, $noreg, implicit killed $q5, implicit-def $q5, implicit $fpscr_rm
+    $s21 = VDIVS $s8, $s5, 14 /* CC::al */, $noreg, implicit killed $q5, implicit-def $q5, implicit $fpscr_rm
+    $s20 = VDIVS killed $s8, $s4, 14 /* CC::al */, $noreg, implicit killed $q1, implicit killed $q5, implicit-def $q5, implicit $fpscr_rm
     VST1q64 killed $r5, 16, $q5, 14 /* CC::al */, $noreg :: (store (s128) into %ir.xxx..sroa_cast248 + 16, basealign 32)
     VST1q64 killed $r6, 16, $q5, 14 /* CC::al */, $noreg :: (store (s128) into %ir.vvv..sroa_cast230 + 16, basealign 32)
     $q8 = VLDMQIA %stack.0, 14 /* CC::al */, $noreg :: (load (s128) from %stack.0)
diff --git a/llvm/test/CodeGen/Thumb2/swp-exitbranchdir.mir b/llvm/test/CodeGen/Thumb2/swp-exitbranchdir.mir
index ba1004515fd54..20f044abb7e83 100644
--- a/llvm/test/CodeGen/Thumb2/swp-exitbranchdir.mir
+++ b/llvm/test/CodeGen/Thumb2/swp-exitbranchdir.mir
@@ -83,7 +83,7 @@ body:             |
   ; CHECK-NEXT:   [[VLDRS2:%[0-9]+]]:spr = VLDRS [[COPY4]], 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
   ; CHECK-NEXT:   [[t2ADDri1:%[0-9]+]]:rgpr = t2ADDri [[COPY3]], 4, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NEXT:   [[VLDRS3:%[0-9]+]]:spr = VLDRS [[COPY3]], 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
-  ; CHECK-NEXT:   [[VMULS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS3]], [[VLDRS2]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VMULS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS3]], [[VLDRS2]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   [[t2SUBri2:%[0-9]+]]:rgpr = t2SUBri [[COPY]], 1, 14 /* CC::al */, $noreg, def $cpsr
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:gprnopc = COPY [[t2SUBri2]]
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:gprnopc = COPY [[t2ADDri1]]
@@ -98,7 +98,7 @@ body:             |
   ; CHECK-NEXT:   [[VLDRS4:%[0-9]+]]:spr = VLDRS [[COPY7]], 1, 14 /* CC::al */, $noreg :: (load unknown-size from %ir.scevgep7, align 4)
   ; CHECK-NEXT:   [[t2ADDri3:%[0-9]+]]:rgpr = t2ADDri [[COPY6]], 4, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NEXT:   [[VLDRS5:%[0-9]+]]:spr = VLDRS [[COPY6]], 1, 14 /* CC::al */, $noreg :: (load unknown-size from %ir.scevgep3, align 4)
-  ; CHECK-NEXT:   [[VMULS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS5]], [[VLDRS4]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VMULS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS5]], [[VLDRS4]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   [[t2SUBri3:%[0-9]+]]:rgpr = t2SUBri [[COPY5]], 1, 14 /* CC::al */, $noreg, def $cpsr
   ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:gpr = COPY [[t2SUBri3]]
   ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:gpr = COPY [[t2ADDri3]]
@@ -115,7 +115,7 @@ body:             |
   ; CHECK-NEXT:   [[PHI3:%[0-9]+]]:spr = PHI [[VLDRS1]], %bb.6, %43, %bb.7
   ; CHECK-NEXT:   [[PHI4:%[0-9]+]]:spr = PHI [[VMULS1]], %bb.6, %52, %bb.7
   ; CHECK-NEXT:   [[PHI5:%[0-9]+]]:spr = PHI [[VMULS]], %bb.6, [[PHI4]], %bb.7
-  ; CHECK-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI5]], [[PHI3]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI5]], [[PHI3]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   [[t2SUBri4:%[0-9]+]]:rgpr = t2SUBri [[PHI2]], 1, 14 /* CC::al */, $noreg, def $cpsr
   ; CHECK-NEXT:   [[VLDRS6:%[0-9]+]]:spr = VLDRS [[PHI1]], 1, 14 /* CC::al */, $noreg :: (load unknown-size from %ir.scevgep3, align 4)
   ; CHECK-NEXT:   [[VLDRS7:%[0-9]+]]:spr = VLDRS [[PHI]], 1, 14 /* CC::al */, $noreg :: (load unknown-size from %ir.scevgep7, align 4)
@@ -124,7 +124,7 @@ body:             |
   ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:gpr = COPY [[t2ADDri4]]
   ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:gpr = COPY [[t2ADDri5]]
   ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:gpr = COPY [[t2SUBri4]]
-  ; CHECK-NEXT:   [[VMULS2:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS6]], [[VLDRS7]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VMULS2:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS6]], [[VLDRS7]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   t2Bcc %bb.8, 0 /* CC::eq */, $cpsr
   ; CHECK-NEXT:   t2B %bb.7, 14 /* CC::al */, $noreg
   ; CHECK-NEXT: {{  $}}
@@ -134,14 +134,14 @@ body:             |
   ; CHECK-NEXT:   [[PHI6:%[0-9]+]]:spr = PHI [[VLDRS1]], %bb.6, [[VADDS]], %bb.7
   ; CHECK-NEXT:   [[PHI7:%[0-9]+]]:spr = PHI [[VMULS1]], %bb.6, [[VMULS2]], %bb.7
   ; CHECK-NEXT:   [[PHI8:%[0-9]+]]:spr = PHI [[VMULS]], %bb.6, [[PHI4]], %bb.7
-  ; CHECK-NEXT:   [[VADDS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI8]], [[PHI6]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VADDS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI8]], [[PHI6]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.9:
   ; CHECK-NEXT:   successors: %bb.4(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[PHI9:%[0-9]+]]:spr = PHI [[VLDRS1]], %bb.5, [[VADDS1]], %bb.8
   ; CHECK-NEXT:   [[PHI10:%[0-9]+]]:spr = PHI [[VMULS]], %bb.5, [[PHI7]], %bb.8
-  ; CHECK-NEXT:   [[VADDS2:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI10]], [[PHI9]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VADDS2:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI10]], [[PHI9]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   t2B %bb.4, 14 /* CC::al */, $noreg
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.4.for.end:
@@ -185,8 +185,8 @@ body:             |
     %19:spr = VLDRS %2, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
     %20:rgpr = t2ADDri %3, 4, 14 /* CC::al */, $noreg, $noreg
     %21:spr = VLDRS %3, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
-    %22:spr = nnan ninf nsz arcp contract afn reassoc VMULS killed %21, killed %19, 14 /* CC::al */, $noreg
-    %6:spr = nnan ninf nsz arcp contract afn reassoc VADDS killed %22, %5, 14 /* CC::al */, $noreg
+    %22:spr = nnan ninf nsz arcp contract afn reassoc VMULS killed %21, killed %19, 14 /* CC::al */, $noreg, implicit $fpscr_rm
+    %6:spr = nnan ninf nsz arcp contract afn reassoc VADDS killed %22, %5, 14 /* CC::al */, $noreg, implicit $fpscr_rm
     %23:rgpr = t2SUBri %4, 1, 14 /* CC::al */, $noreg, def $cpsr
     %7:gpr = COPY %23
     %8:gpr = COPY %20
diff --git a/llvm/test/CodeGen/Thumb2/swp-fixedii-le.mir b/llvm/test/CodeGen/Thumb2/swp-fixedii-le.mir
index 854c5b8249328..177c94ecd6b8c 100644
--- a/llvm/test/CodeGen/Thumb2/swp-fixedii-le.mir
+++ b/llvm/test/CodeGen/Thumb2/swp-fixedii-le.mir
@@ -84,7 +84,7 @@ body:             |
   ; CHECK-NEXT:   [[VLDRS2:%[0-9]+]]:spr = VLDRS [[COPY4]], 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
   ; CHECK-NEXT:   [[t2ADDri1:%[0-9]+]]:rgpr = t2ADDri [[COPY3]], 4, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NEXT:   [[VLDRS3:%[0-9]+]]:spr = VLDRS [[COPY3]], 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
-  ; CHECK-NEXT:   [[VMULS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS3]], [[VLDRS2]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VMULS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS3]], [[VLDRS2]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:gprlr = COPY [[t2DoLoopStart]]
   ; CHECK-NEXT:   [[t2LoopDec:%[0-9]+]]:gprlr = t2LoopDec [[COPY5]], 1
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:gpr = COPY [[t2LoopDec]]
@@ -110,8 +110,8 @@ body:             |
   ; CHECK-NEXT:   [[t2ADDri3:%[0-9]+]]:rgpr = t2ADDri [[PHI1]], 4, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:gpr = COPY [[t2ADDri2]]
   ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:gpr = COPY [[t2ADDri3]]
-  ; CHECK-NEXT:   [[VMULS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS4]], [[VLDRS5]], 14 /* CC::al */, $noreg
-  ; CHECK-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI4]], [[PHI3]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VMULS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS4]], [[VLDRS5]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
+  ; CHECK-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI4]], [[PHI3]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:gpr = COPY [[t2LoopDec1]]
   ; CHECK-NEXT:   t2LoopEnd [[t2LoopDec1]], %bb.6, implicit-def $cpsr
   ; CHECK-NEXT:   t2B %bb.7, 14 /* CC::al */, $noreg
@@ -121,7 +121,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[PHI5:%[0-9]+]]:spr = PHI [[VLDRS1]], %bb.5, [[VADDS]], %bb.6
   ; CHECK-NEXT:   [[PHI6:%[0-9]+]]:spr = PHI [[VMULS]], %bb.5, [[VMULS1]], %bb.6
-  ; CHECK-NEXT:   [[VADDS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI6]], [[PHI5]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VADDS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI6]], [[PHI5]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   t2B %bb.4, 14 /* CC::al */, $noreg
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.4.for.end:
@@ -166,8 +166,8 @@ body:             |
     %19:spr = VLDRS %2, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
     %20:rgpr = t2ADDri %3, 4, 14 /* CC::al */, $noreg, $noreg
     %21:spr = VLDRS %3, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
-    %22:spr = nnan ninf nsz arcp contract afn reassoc VMULS killed %21, killed %19, 14 /* CC::al */, $noreg
-    %6:spr = nnan ninf nsz arcp contract afn reassoc VADDS killed %22, %5, 14 /* CC::al */, $noreg
+    %22:spr = nnan ninf nsz arcp contract afn reassoc VMULS killed %21, killed %19, 14 /* CC::al */, $noreg, implicit $fpscr_rm
+    %6:spr = nnan ninf nsz arcp contract afn reassoc VADDS killed %22, %5, 14 /* CC::al */, $noreg, implicit $fpscr_rm
     %42:gprlr = COPY %4
     %23:gprlr = t2LoopDec %42:gprlr, 1
     %7:gpr = COPY %23
diff --git a/llvm/test/CodeGen/Thumb2/swp-fixedii.mir b/llvm/test/CodeGen/Thumb2/swp-fixedii.mir
index dd02703c4b2a3..79397174d753c 100644
--- a/llvm/test/CodeGen/Thumb2/swp-fixedii.mir
+++ b/llvm/test/CodeGen/Thumb2/swp-fixedii.mir
@@ -83,7 +83,7 @@ body:             |
   ; CHECK-NEXT:   [[VLDRS2:%[0-9]+]]:spr = VLDRS [[COPY4]], 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
   ; CHECK-NEXT:   [[t2ADDri1:%[0-9]+]]:rgpr = t2ADDri [[COPY3]], 4, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NEXT:   [[VLDRS3:%[0-9]+]]:spr = VLDRS [[COPY3]], 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
-  ; CHECK-NEXT:   [[VMULS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS3]], [[VLDRS2]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VMULS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS3]], [[VLDRS2]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   [[t2SUBri2:%[0-9]+]]:rgpr = t2SUBri [[COPY]], 1, 14 /* CC::al */, $noreg, def $cpsr
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:gprnopc = COPY [[t2SUBri2]]
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:gprnopc = COPY [[t2ADDri1]]
@@ -98,7 +98,7 @@ body:             |
   ; CHECK-NEXT:   [[VLDRS4:%[0-9]+]]:spr = VLDRS [[COPY7]], 1, 14 /* CC::al */, $noreg :: (load unknown-size from %ir.scevgep7, align 4)
   ; CHECK-NEXT:   [[t2ADDri3:%[0-9]+]]:rgpr = t2ADDri [[COPY6]], 4, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NEXT:   [[VLDRS5:%[0-9]+]]:spr = VLDRS [[COPY6]], 1, 14 /* CC::al */, $noreg :: (load unknown-size from %ir.scevgep3, align 4)
-  ; CHECK-NEXT:   [[VMULS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS5]], [[VLDRS4]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VMULS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS5]], [[VLDRS4]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   [[t2SUBri3:%[0-9]+]]:rgpr = t2SUBri [[COPY5]], 1, 14 /* CC::al */, $noreg, def $cpsr
   ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:gpr = COPY [[t2SUBri3]]
   ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:gpr = COPY [[t2ADDri3]]
@@ -115,7 +115,7 @@ body:             |
   ; CHECK-NEXT:   [[PHI3:%[0-9]+]]:spr = PHI [[VLDRS1]], %bb.6, %43, %bb.7
   ; CHECK-NEXT:   [[PHI4:%[0-9]+]]:spr = PHI [[VMULS1]], %bb.6, %52, %bb.7
   ; CHECK-NEXT:   [[PHI5:%[0-9]+]]:spr = PHI [[VMULS]], %bb.6, [[PHI4]], %bb.7
-  ; CHECK-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI5]], [[PHI3]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI5]], [[PHI3]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   [[t2SUBri4:%[0-9]+]]:rgpr = t2SUBri [[PHI2]], 1, 14 /* CC::al */, $noreg, def $cpsr
   ; CHECK-NEXT:   [[VLDRS6:%[0-9]+]]:spr = VLDRS [[PHI1]], 1, 14 /* CC::al */, $noreg :: (load unknown-size from %ir.scevgep3, align 4)
   ; CHECK-NEXT:   [[VLDRS7:%[0-9]+]]:spr = VLDRS [[PHI]], 1, 14 /* CC::al */, $noreg :: (load unknown-size from %ir.scevgep7, align 4)
@@ -124,7 +124,7 @@ body:             |
   ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:gpr = COPY [[t2ADDri4]]
   ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:gpr = COPY [[t2ADDri5]]
   ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:gpr = COPY [[t2SUBri4]]
-  ; CHECK-NEXT:   [[VMULS2:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS6]], [[VLDRS7]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VMULS2:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS6]], [[VLDRS7]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   t2Bcc %bb.7, 1 /* CC::ne */, $cpsr
   ; CHECK-NEXT:   t2B %bb.8, 14 /* CC::al */, $noreg
   ; CHECK-NEXT: {{  $}}
@@ -134,14 +134,14 @@ body:             |
   ; CHECK-NEXT:   [[PHI6:%[0-9]+]]:spr = PHI [[VLDRS1]], %bb.6, [[VADDS]], %bb.7
   ; CHECK-NEXT:   [[PHI7:%[0-9]+]]:spr = PHI [[VMULS1]], %bb.6, [[VMULS2]], %bb.7
   ; CHECK-NEXT:   [[PHI8:%[0-9]+]]:spr = PHI [[VMULS]], %bb.6, [[PHI4]], %bb.7
-  ; CHECK-NEXT:   [[VADDS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI8]], [[PHI6]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VADDS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI8]], [[PHI6]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.9:
   ; CHECK-NEXT:   successors: %bb.4(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[PHI9:%[0-9]+]]:spr = PHI [[VLDRS1]], %bb.5, [[VADDS1]], %bb.8
   ; CHECK-NEXT:   [[PHI10:%[0-9]+]]:spr = PHI [[VMULS]], %bb.5, [[PHI7]], %bb.8
-  ; CHECK-NEXT:   [[VADDS2:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI10]], [[PHI9]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VADDS2:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI10]], [[PHI9]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   t2B %bb.4, 14 /* CC::al */, $noreg
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.4.for.end:
@@ -185,8 +185,8 @@ body:             |
     %19:spr = VLDRS %2, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
     %20:rgpr = t2ADDri %3, 4, 14 /* CC::al */, $noreg, $noreg
     %21:spr = VLDRS %3, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
-    %22:spr = nnan ninf nsz arcp contract afn reassoc VMULS killed %21, killed %19, 14 /* CC::al */, $noreg
-    %6:spr = nnan ninf nsz arcp contract afn reassoc VADDS killed %22, %5, 14 /* CC::al */, $noreg
+    %22:spr = nnan ninf nsz arcp contract afn reassoc VMULS killed %21, killed %19, 14 /* CC::al */, $noreg, implicit $fpscr_rm
+    %6:spr = nnan ninf nsz arcp contract afn reassoc VADDS killed %22, %5, 14 /* CC::al */, $noreg, implicit $fpscr_rm
     %23:rgpr = t2SUBri %4, 1, 14 /* CC::al */, $noreg, def $cpsr
     %7:gpr = COPY %23
     %8:gpr = COPY %20
diff --git a/llvm/test/CodeGen/Thumb2/swp-regpressure.mir b/llvm/test/CodeGen/Thumb2/swp-regpressure.mir
index 2bcb0c92909e5..955b53dfbf3ad 100644
--- a/llvm/test/CodeGen/Thumb2/swp-regpressure.mir
+++ b/llvm/test/CodeGen/Thumb2/swp-regpressure.mir
@@ -148,8 +148,8 @@ body:             |
   ; CHECK-NEXT:   [[VLDRS2:%[0-9]+]]:spr = VLDRS [[PHI]], 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
   ; CHECK-NEXT:   [[t2ADDri1:%[0-9]+]]:rgpr = t2ADDri [[PHI1]], 4, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NEXT:   [[VLDRS3:%[0-9]+]]:spr = VLDRS [[PHI1]], 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
-  ; CHECK-NEXT:   [[VMULS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS3]], [[VLDRS2]], 14 /* CC::al */, $noreg
-  ; CHECK-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[VMULS]], [[PHI3]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VMULS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS3]], [[VLDRS2]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
+  ; CHECK-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[VMULS]], [[PHI3]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   [[t2SUBri2:%[0-9]+]]:rgpr = t2SUBri [[PHI2]], 1, 14 /* CC::al */, $noreg, def $cpsr
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:gpr = COPY [[t2SUBri2]]
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:gpr = COPY [[t2ADDri1]]
@@ -236,8 +236,8 @@ body:             |
     %19:spr = VLDRS %2, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
     %20:rgpr = t2ADDri %3, 4, 14 /* CC::al */, $noreg, $noreg
     %21:spr = VLDRS %3, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
-    %22:spr = nnan ninf nsz arcp contract afn reassoc VMULS killed %21, killed %19, 14 /* CC::al */, $noreg
-    %6:spr = nnan ninf nsz arcp contract afn reassoc VADDS killed %22, %5, 14 /* CC::al */, $noreg
+    %22:spr = nnan ninf nsz arcp contract afn reassoc VMULS killed %21, killed %19, 14 /* CC::al */, $noreg, implicit $fpscr_rm
+    %6:spr = nnan ninf nsz arcp contract afn reassoc VADDS killed %22, %5, 14 /* CC::al */, $noreg, implicit $fpscr_rm
     %23:rgpr = t2SUBri %4, 1, 14 /* CC::al */, $noreg, def $cpsr
     %7:gpr = COPY %23
     %8:gpr = COPY %20
@@ -314,24 +314,24 @@ body:             |
   ; CHECK-NEXT:   [[t2SUBri2:%[0-9]+]]:rgpr = t2SUBri [[COPY]], 1, 14 /* CC::al */, $noreg, def $cpsr
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:gprnopc = COPY [[t2SUBri2]]
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:rgpr = COPY [[COPY4]]
-  ; CHECK-NEXT:   dead %66:rgpr = COPY [[COPY4]]
-  ; CHECK-NEXT:   dead %67:rgpr = COPY [[COPY4]]
-  ; CHECK-NEXT:   dead %68:rgpr = COPY [[COPY4]]
-  ; CHECK-NEXT:   dead %69:rgpr = COPY [[COPY4]]
-  ; CHECK-NEXT:   dead %70:rgpr = COPY [[COPY4]]
-  ; CHECK-NEXT:   dead %71:rgpr = COPY [[COPY4]]
-  ; CHECK-NEXT:   dead %72:rgpr = COPY [[COPY4]]
-  ; CHECK-NEXT:   dead %73:rgpr = COPY [[COPY4]]
-  ; CHECK-NEXT:   dead %74:rgpr = COPY [[COPY4]]
-  ; CHECK-NEXT:   dead %75:rgpr = COPY [[COPY4]]
-  ; CHECK-NEXT:   dead %76:rgpr = COPY [[COPY4]]
-  ; CHECK-NEXT:   dead %77:rgpr = COPY [[COPY4]]
-  ; CHECK-NEXT:   dead %78:rgpr = COPY [[COPY4]]
-  ; CHECK-NEXT:   dead %79:rgpr = COPY [[COPY4]]
-  ; CHECK-NEXT:   dead %80:rgpr = COPY [[COPY4]]
-  ; CHECK-NEXT:   dead %81:rgpr = COPY [[COPY4]]
-  ; CHECK-NEXT:   dead %82:rgpr = COPY [[COPY4]]
-  ; CHECK-NEXT:   dead %83:rgpr = COPY [[COPY4]]
+  ; CHECK-NEXT:   dead [[COPY7:%[0-9]+]]:rgpr = COPY [[COPY4]]
+  ; CHECK-NEXT:   dead [[COPY8:%[0-9]+]]:rgpr = COPY [[COPY4]]
+  ; CHECK-NEXT:   dead [[COPY9:%[0-9]+]]:rgpr = COPY [[COPY4]]
+  ; CHECK-NEXT:   dead [[COPY10:%[0-9]+]]:rgpr = COPY [[COPY4]]
+  ; CHECK-NEXT:   dead [[COPY11:%[0-9]+]]:rgpr = COPY [[COPY4]]
+  ; CHECK-NEXT:   dead [[COPY12:%[0-9]+]]:rgpr = COPY [[COPY4]]
+  ; CHECK-NEXT:   dead [[COPY13:%[0-9]+]]:rgpr = COPY [[COPY4]]
+  ; CHECK-NEXT:   dead [[COPY14:%[0-9]+]]:rgpr = COPY [[COPY4]]
+  ; CHECK-NEXT:   dead [[COPY15:%[0-9]+]]:rgpr = COPY [[COPY4]]
+  ; CHECK-NEXT:   dead [[COPY16:%[0-9]+]]:rgpr = COPY [[COPY4]]
+  ; CHECK-NEXT:   dead [[COPY17:%[0-9]+]]:rgpr = COPY [[COPY4]]
+  ; CHECK-NEXT:   dead [[COPY18:%[0-9]+]]:rgpr = COPY [[COPY4]]
+  ; CHECK-NEXT:   dead [[COPY19:%[0-9]+]]:rgpr = COPY [[COPY4]]
+  ; CHECK-NEXT:   dead [[COPY20:%[0-9]+]]:rgpr = COPY [[COPY4]]
+  ; CHECK-NEXT:   dead [[COPY21:%[0-9]+]]:rgpr = COPY [[COPY4]]
+  ; CHECK-NEXT:   dead [[COPY22:%[0-9]+]]:rgpr = COPY [[COPY4]]
+  ; CHECK-NEXT:   dead [[COPY23:%[0-9]+]]:rgpr = COPY [[COPY4]]
+  ; CHECK-NEXT:   dead [[COPY24:%[0-9]+]]:rgpr = COPY [[COPY4]]
   ; CHECK-NEXT:   t2Bcc %bb.9, 0 /* CC::eq */, $cpsr
   ; CHECK-NEXT:   t2B %bb.6, 14 /* CC::al */, $noreg
   ; CHECK-NEXT: {{  $}}
@@ -342,82 +342,82 @@ body:             |
   ; CHECK-NEXT:   [[VLDRS2:%[0-9]+]]:spr = VLDRS [[COPY4]], 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
   ; CHECK-NEXT:   [[t2ADDri1:%[0-9]+]]:rgpr = t2ADDri [[COPY3]], 4, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NEXT:   [[VLDRS3:%[0-9]+]]:spr = VLDRS [[COPY3]], 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
-  ; CHECK-NEXT:   [[VMULS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS3]], [[VLDRS2]], 14 /* CC::al */, $noreg
-  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:gpr = COPY [[t2ADDri1]]
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:gpr = COPY [[t2ADDri]]
+  ; CHECK-NEXT:   [[VMULS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS3]], [[VLDRS2]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
+  ; CHECK-NEXT:   [[COPY25:%[0-9]+]]:gpr = COPY [[t2ADDri1]]
+  ; CHECK-NEXT:   [[COPY26:%[0-9]+]]:gpr = COPY [[t2ADDri]]
   ; CHECK-NEXT:   [[t2SUBri3:%[0-9]+]]:rgpr = t2SUBri [[COPY5]], 1, 14 /* CC::al */, $noreg, def $cpsr
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:gpr = COPY [[t2SUBri3]]
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:rgpr = COPY [[COPY6]]
-  ; CHECK-NEXT:   dead %94:rgpr = COPY [[COPY6]]
-  ; CHECK-NEXT:   dead %95:rgpr = COPY [[COPY6]]
-  ; CHECK-NEXT:   dead %96:rgpr = COPY [[COPY6]]
-  ; CHECK-NEXT:   dead %97:rgpr = COPY [[COPY6]]
-  ; CHECK-NEXT:   dead %98:rgpr = COPY [[COPY6]]
-  ; CHECK-NEXT:   dead %99:rgpr = COPY [[COPY6]]
-  ; CHECK-NEXT:   dead %100:rgpr = COPY [[COPY6]]
-  ; CHECK-NEXT:   dead %101:rgpr = COPY [[COPY6]]
-  ; CHECK-NEXT:   dead %102:rgpr = COPY [[COPY6]]
-  ; CHECK-NEXT:   dead %103:rgpr = COPY [[COPY6]]
-  ; CHECK-NEXT:   dead %104:rgpr = COPY [[COPY6]]
-  ; CHECK-NEXT:   dead %105:rgpr = COPY [[COPY6]]
-  ; CHECK-NEXT:   dead %106:rgpr = COPY [[COPY6]]
-  ; CHECK-NEXT:   dead %107:rgpr = COPY [[COPY6]]
-  ; CHECK-NEXT:   dead %108:rgpr = COPY [[COPY6]]
-  ; CHECK-NEXT:   dead %109:rgpr = COPY [[COPY6]]
-  ; CHECK-NEXT:   dead %110:rgpr = COPY [[COPY6]]
-  ; CHECK-NEXT:   dead %111:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   [[COPY27:%[0-9]+]]:gpr = COPY [[t2SUBri3]]
+  ; CHECK-NEXT:   [[COPY28:%[0-9]+]]:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   dead [[COPY29:%[0-9]+]]:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   dead [[COPY30:%[0-9]+]]:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   dead [[COPY31:%[0-9]+]]:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   dead [[COPY32:%[0-9]+]]:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   dead [[COPY33:%[0-9]+]]:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   dead [[COPY34:%[0-9]+]]:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   dead [[COPY35:%[0-9]+]]:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   dead [[COPY36:%[0-9]+]]:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   dead [[COPY37:%[0-9]+]]:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   dead [[COPY38:%[0-9]+]]:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   dead [[COPY39:%[0-9]+]]:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   dead [[COPY40:%[0-9]+]]:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   dead [[COPY41:%[0-9]+]]:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   dead [[COPY42:%[0-9]+]]:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   dead [[COPY43:%[0-9]+]]:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   dead [[COPY44:%[0-9]+]]:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   dead [[COPY45:%[0-9]+]]:rgpr = COPY [[COPY6]]
+  ; CHECK-NEXT:   dead [[COPY46:%[0-9]+]]:rgpr = COPY [[COPY6]]
   ; CHECK-NEXT:   t2Bcc %bb.8, 0 /* CC::eq */, $cpsr
   ; CHECK-NEXT:   t2B %bb.7, 14 /* CC::al */, $noreg
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.7.for.body:
   ; CHECK-NEXT:   successors: %bb.8(0x04000000), %bb.7(0x7c000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gprnopc = PHI [[COPY8]], %bb.6, %116, %bb.7
-  ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:gprnopc = PHI [[COPY7]], %bb.6, %117, %bb.7
-  ; CHECK-NEXT:   [[PHI2:%[0-9]+]]:gprnopc = PHI [[COPY9]], %bb.6, %140, %bb.7
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gprnopc = PHI [[COPY26]], %bb.6, %116, %bb.7
+  ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:gprnopc = PHI [[COPY25]], %bb.6, %117, %bb.7
+  ; CHECK-NEXT:   [[PHI2:%[0-9]+]]:gprnopc = PHI [[COPY27]], %bb.6, %140, %bb.7
   ; CHECK-NEXT:   [[PHI3:%[0-9]+]]:spr = PHI [[VLDRS1]], %bb.6, %137, %bb.7
-  ; CHECK-NEXT:   [[PHI4:%[0-9]+]]:gprnopc = PHI [[COPY10]], %bb.6, %139, %bb.7
+  ; CHECK-NEXT:   [[PHI4:%[0-9]+]]:gprnopc = PHI [[COPY28]], %bb.6, %139, %bb.7
   ; CHECK-NEXT:   [[PHI5:%[0-9]+]]:spr = PHI [[VMULS]], %bb.6, %118, %bb.7
   ; CHECK-NEXT:   [[VLDRS4:%[0-9]+]]:spr = VLDRS [[PHI1]], 1, 14 /* CC::al */, $noreg :: (load unknown-size from %ir.scevgep3, align 4)
   ; CHECK-NEXT:   [[VLDRS5:%[0-9]+]]:spr = VLDRS [[PHI]], 1, 14 /* CC::al */, $noreg :: (load unknown-size from %ir.scevgep7, align 4)
   ; CHECK-NEXT:   [[t2ADDri2:%[0-9]+]]:rgpr = t2ADDri [[PHI]], 4, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-NEXT:   [[t2ADDri3:%[0-9]+]]:rgpr = t2ADDri [[PHI1]], 4, 14 /* CC::al */, $noreg, $noreg
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:gpr = COPY [[t2ADDri2]]
-  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:gpr = COPY [[t2ADDri3]]
-  ; CHECK-NEXT:   [[VMULS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS4]], [[VLDRS5]], 14 /* CC::al */, $noreg
-  ; CHECK-NEXT:   dead %119:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   dead %120:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   dead %121:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   dead %122:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   dead %123:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   dead %124:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   dead %125:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   dead %126:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   dead %127:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   dead %128:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   dead %129:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   dead %130:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   dead %131:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   dead %132:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   dead %133:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   dead %134:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   dead %135:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   dead %136:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI5]], [[PHI3]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[COPY47:%[0-9]+]]:gpr = COPY [[t2ADDri2]]
+  ; CHECK-NEXT:   [[COPY48:%[0-9]+]]:gpr = COPY [[t2ADDri3]]
+  ; CHECK-NEXT:   [[VMULS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS4]], [[VLDRS5]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
+  ; CHECK-NEXT:   dead [[COPY49:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   dead [[COPY50:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   dead [[COPY51:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   dead [[COPY52:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   dead [[COPY53:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   dead [[COPY54:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   dead [[COPY55:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   dead [[COPY56:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   dead [[COPY57:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   dead [[COPY58:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   dead [[COPY59:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   dead [[COPY60:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   dead [[COPY61:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   dead [[COPY62:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   dead [[COPY63:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   dead [[COPY64:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   dead [[COPY65:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   dead [[COPY66:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI5]], [[PHI3]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   [[t2SUBri4:%[0-9]+]]:rgpr = t2SUBri [[PHI2]], 1, 14 /* CC::al */, $noreg, def $cpsr
-  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:rgpr = COPY [[PHI4]]
-  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:gpr = COPY [[t2SUBri4]]
+  ; CHECK-NEXT:   [[COPY67:%[0-9]+]]:rgpr = COPY [[PHI4]]
+  ; CHECK-NEXT:   [[COPY68:%[0-9]+]]:gpr = COPY [[t2SUBri4]]
   ; CHECK-NEXT:   t2Bcc %bb.7, 1 /* CC::ne */, $cpsr
   ; CHECK-NEXT:   t2B %bb.8, 14 /* CC::al */, $noreg
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.8:
   ; CHECK-NEXT:   successors: %bb.9(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[PHI6:%[0-9]+]]:gprnopc = PHI [[COPY8]], %bb.6, [[COPY11]], %bb.7
-  ; CHECK-NEXT:   [[PHI7:%[0-9]+]]:gprnopc = PHI [[COPY7]], %bb.6, [[COPY12]], %bb.7
+  ; CHECK-NEXT:   [[PHI6:%[0-9]+]]:gprnopc = PHI [[COPY26]], %bb.6, [[COPY47]], %bb.7
+  ; CHECK-NEXT:   [[PHI7:%[0-9]+]]:gprnopc = PHI [[COPY25]], %bb.6, [[COPY48]], %bb.7
   ; CHECK-NEXT:   [[PHI8:%[0-9]+]]:spr = PHI [[VLDRS1]], %bb.6, [[VADDS]], %bb.7
   ; CHECK-NEXT:   [[PHI9:%[0-9]+]]:spr = PHI [[VMULS]], %bb.6, [[VMULS1]], %bb.7
-  ; CHECK-NEXT:   [[VADDS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI9]], [[PHI8]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VADDS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[PHI9]], [[PHI8]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.9:
   ; CHECK-NEXT:   successors: %bb.4(0x80000000)
@@ -427,8 +427,8 @@ body:             |
   ; CHECK-NEXT:   [[PHI12:%[0-9]+]]:spr = PHI [[VLDRS1]], %bb.5, [[VADDS1]], %bb.8
   ; CHECK-NEXT:   [[VLDRS6:%[0-9]+]]:spr = VLDRS [[PHI10]], 1, 14 /* CC::al */, $noreg :: (load unknown-size from %ir.scevgep7, align 4)
   ; CHECK-NEXT:   [[VLDRS7:%[0-9]+]]:spr = VLDRS [[PHI11]], 1, 14 /* CC::al */, $noreg :: (load unknown-size from %ir.scevgep3, align 4)
-  ; CHECK-NEXT:   [[VMULS2:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS7]], [[VLDRS6]], 14 /* CC::al */, $noreg
-  ; CHECK-NEXT:   [[VADDS2:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[VMULS2]], [[PHI12]], 14 /* CC::al */, $noreg
+  ; CHECK-NEXT:   [[VMULS2:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VMULS [[VLDRS7]], [[VLDRS6]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
+  ; CHECK-NEXT:   [[VADDS2:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS [[VMULS2]], [[PHI12]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
   ; CHECK-NEXT:   t2B %bb.4, 14 /* CC::al */, $noreg
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.4.for.end:
@@ -491,8 +491,8 @@ body:             |
     %19:spr = VLDRS %2, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep7)
     %20:rgpr = t2ADDri %3, 4, 14 /* CC::al */, $noreg, $noreg
     %21:spr = VLDRS %3, 1, 14 /* CC::al */, $noreg :: (load (s32) from %ir.scevgep3)
-    %22:spr = nnan ninf nsz arcp contract afn reassoc VMULS killed %21, killed %19, 14 /* CC::al */, $noreg
-    %6:spr = nnan ninf nsz arcp contract afn reassoc VADDS killed %22, %5, 14 /* CC::al */, $noreg
+    %22:spr = nnan ninf nsz arcp contract afn reassoc VMULS killed %21, killed %19, 14 /* CC::al */, $noreg, implicit $fpscr_rm
+    %6:spr = nnan ninf nsz arcp contract afn reassoc VADDS killed %22, %5, 14 /* CC::al */, $noreg, implicit $fpscr_rm
     %23:rgpr = t2SUBri %4, 1, 14 /* CC::al */, $noreg, def $cpsr
     %7:gpr = COPY %23
     %8:gpr = COPY %20

>From 86bd15299ce38e101d66eae5507f71362fdc843d Mon Sep 17 00:00:00 2001
From: Erik Enikeev <evonatarius at gmail.com>
Date: Mon, 29 Sep 2025 18:50:40 +0300
Subject: [PATCH 2/3] fixup: remove unnecessary FPSCR_RM usage

---
 llvm/lib/Target/ARM/ARMInstrVFP.td              | 12 ++++++------
 llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir |  4 ++--
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/ARM/ARMInstrVFP.td b/llvm/lib/Target/ARM/ARMInstrVFP.td
index bc51e99412422..d4a7f06ec332d 100644
--- a/llvm/lib/Target/ARM/ARMInstrVFP.td
+++ b/llvm/lib/Target/ARM/ARMInstrVFP.td
@@ -338,7 +338,7 @@ def : MnemonicAlias<"vstm", "vstmia">;
 
 def VLLDM : AXSI4FR<"vlldm${p}\t$Rn, $regs", 0, 1>,
             Requires<[HasV8MMainline, Has8MSecExt]> {
-    let Defs = [VPR, FPSCR, FPSCR_NZCV, FPSCR_RM, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15];
+    let Defs = [VPR, FPSCR, FPSCR_NZCV, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15];
     let DecoderMethod = "DecodeLazyLoadStoreMul";
 }
 // T1: assembly does not contains the register list.
@@ -348,7 +348,7 @@ def : InstAlias<"vlldm${p}\t$Rn", (VLLDM GPRnopc:$Rn, pred:$p, 0)>,
 // The register list has no effect on the encoding, it is for assembly/disassembly purposes only.
 def VLLDM_T2 : AXSI4FR<"vlldm${p}\t$Rn, $regs", 1, 1>,
             Requires<[HasV8_1MMainline, Has8MSecExt]> {
-    let Defs = [VPR, FPSCR, FPSCR_NZCV, FPSCR_RM, D0,  D1,  D2,  D3,  D4,  D5,  D6,  D7,  D8,  D9,  D10, D11, D12, D13, D14, D15,
+    let Defs = [VPR, FPSCR, FPSCR_NZCV, D0,  D1,  D2,  D3,  D4,  D5,  D6,  D7,  D8,  D9,  D10, D11, D12, D13, D14, D15,
                                         D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26, D27, D28, D29, D30, D31];
     let DecoderMethod = "DecodeLazyLoadStoreMul";
 }
@@ -356,8 +356,8 @@ def VLLDM_T2 : AXSI4FR<"vlldm${p}\t$Rn, $regs", 1, 1>,
 // The register list has no effect on the encoding, it is for assembly/disassembly purposes only.
 def VLSTM : AXSI4FR<"vlstm${p}\t$Rn, $regs", 0, 0>,
             Requires<[HasV8MMainline, Has8MSecExt]> {
-    let Defs = [VPR, FPSCR, FPSCR_NZCV, FPSCR_RM];
-    let Uses = [VPR, FPSCR, FPSCR_NZCV, FPSCR_RM, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15];
+    let Defs = [VPR, FPSCR, FPSCR_NZCV];
+    let Uses = [VPR, FPSCR, FPSCR_NZCV, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15];
     let DecoderMethod = "DecodeLazyLoadStoreMul";
 }
 // T1: assembly does not contain the register list.
@@ -367,8 +367,8 @@ def : InstAlias<"vlstm${p}\t$Rn", (VLSTM GPRnopc:$Rn, pred:$p, 0)>,
 // The register list has no effect on the encoding, it is for assembly/disassembly purposes only.
 def VLSTM_T2 : AXSI4FR<"vlstm${p}\t$Rn, $regs", 1, 0>,
             Requires<[HasV8_1MMainline, Has8MSecExt]> {
-    let Defs = [VPR, FPSCR, FPSCR_NZCV, FPSCR_RM];
-    let Uses = [VPR, FPSCR, FPSCR_NZCV, FPSCR_RM, D0,  D1,  D2,  D3,  D4,  D5,  D6,  D7,  D8,  D9,  D10, D11, D12, D13, D14, D15,
+    let Defs = [VPR, FPSCR, FPSCR_NZCV];
+    let Uses = [VPR, FPSCR, FPSCR_NZCV, D0,  D1,  D2,  D3,  D4,  D5,  D6,  D7,  D8,  D9,  D10, D11, D12, D13, D14, D15,
                                         D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26, D27, D28, D29, D30, D31];
     let DecoderMethod = "DecodeLazyLoadStoreMul";
 }
diff --git a/llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir b/llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir
index 674ed04616fa5..9bc335c2ab09a 100644
--- a/llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir
+++ b/llvm/test/CodeGen/ARM/cmse-vlldm-no-reorder.mir
@@ -90,7 +90,7 @@ body:             |
 # CHECK-NEXT:  $r0 = t2BICri $r0, 1, 14 /* CC::al */, $noreg, $noreg
 # CHECK-NEXT:  $sp = tSUBspi $sp, 34, 14 /* CC::al */, $noreg
 # CHECK-NEXT:  dead $s0 = VMOVS undef $s0, 14 /* CC::al */, $noreg
-# CHECK-NEXT:  VLSTM $sp, 14 /* CC::al */, $noreg, 0, implicit-def $vpr, implicit-def $fpscr, implicit-def $fpscr_nzcv, implicit-def $fpscr_rm, implicit undef $vpr, implicit undef $fpscr, implicit undef $fpscr_nzcv, implicit undef $fpscr_rm, implicit undef $d0, implicit undef $d1, implicit undef $d2, implicit undef $d3, implicit undef $d4, implicit undef $d5, implicit undef $d6, implicit undef $d7, implicit $d8, implicit $d9, implicit $d10, implicit $d11, implicit $d12, implicit $d13, implicit $d14, implicit $d15
+# CHECK-NEXT:  VLSTM $sp, 14 /* CC::al */, $noreg, 0, implicit-def $vpr, implicit-def $fpscr, implicit-def $fpscr_nzcv, implicit undef $vpr, implicit undef $fpscr, implicit undef $fpscr_nzcv, implicit undef $d0, implicit undef $d1, implicit undef $d2, implicit undef $d3, implicit undef $d4, implicit undef $d5, implicit undef $d6, implicit undef $d7, implicit $d8, implicit $d9, implicit $d10, implicit $d11, implicit $d12, implicit $d13, implicit $d14, implicit $d15
 # CHECK-NEXT:  $r1 = tMOVr $r0, 14 /* CC::al */, $noreg
 # CHECK-NEXT:  $r2 = tMOVr $r0, 14 /* CC::al */, $noreg
 # CHECK-NEXT:  $r3 = tMOVr $r0, 14 /* CC::al */, $noreg
@@ -106,7 +106,7 @@ body:             |
 # CHECK-NEXT:  t2MSR_M 3072, $r0, 14 /* CC::al */, $noreg, implicit-def $cpsr
 # CHECK-NEXT:  tBLXNSr 14 /* CC::al */, $noreg, killed $r0, csr_aapcs, implicit-def $lr, implicit $sp, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $s0
 # CHECK-NEXT:  $r12 = VMOVRS $s0, 14 /* CC::al */, $noreg
-# CHECK-NEXT:  VLLDM $sp, 14 /* CC::al */, $noreg, 0, implicit-def $vpr, implicit-def $fpscr, implicit-def $fpscr_nzcv, implicit-def $fpscr_rm, implicit-def $d0, implicit-def $d1, implicit-def $d2, implicit-def $d3, implicit-def $d4, implicit-def $d5, implicit-def $d6, implicit-def $d7, implicit-def $d8, implicit-def $d9, implicit-def $d10, implicit-def $d11, implicit-def $d12, implicit-def $d13, implicit-def $d14, implicit-def $d15
+# CHECK-NEXT:  VLLDM $sp, 14 /* CC::al */, $noreg, 0, implicit-def $vpr, implicit-def $fpscr, implicit-def $fpscr_nzcv, implicit-def $d0, implicit-def $d1, implicit-def $d2, implicit-def $d3, implicit-def $d4, implicit-def $d5, implicit-def $d6, implicit-def $d7, implicit-def $d8, implicit-def $d9, implicit-def $d10, implicit-def $d11, implicit-def $d12, implicit-def $d13, implicit-def $d14, implicit-def $d15
 # CHECK-NEXT:  $s0 = VMOVSR $r12, 14 /* CC::al */, $noreg
 # CHECK-NEXT:  $sp = tADDspi $sp, 34, 14 /* CC::al */, $noreg
 # CHECK-NEXT:  $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $r8, def $r9, def $r10, def $r11

>From 1c34ba97d53a06518d0c94f3130de8a42b36ca19 Mon Sep 17 00:00:00 2001
From: Erik Enikeev <evonatarius at gmail.com>
Date: Thu, 2 Oct 2025 16:44:21 +0300
Subject: [PATCH 3/3] [ARM] Process additional instructions

---
 llvm/lib/Target/ARM/ARMInstrVFP.td            | 30 +++++++++++------
 .../arm-select-copy_to_regclass-of-fptosi.mir |  2 +-
 .../test/CodeGen/ARM/GlobalISel/select-fp.mir | 32 +++++++++----------
 3 files changed, 38 insertions(+), 26 deletions(-)

diff --git a/llvm/lib/Target/ARM/ARMInstrVFP.td b/llvm/lib/Target/ARM/ARMInstrVFP.td
index d4a7f06ec332d..0c3ec6039636a 100644
--- a/llvm/lib/Target/ARM/ARMInstrVFP.td
+++ b/llvm/lib/Target/ARM/ARMInstrVFP.td
@@ -589,7 +589,7 @@ defm VSELVS : vsel_inst<"vs", 0b01, 6>;
 
 multiclass vmaxmin_inst<string op, bit opc, SDNode SD> {
   let DecoderNamespace = "VFPV8", PostEncoderMethod = "",
-      isUnpredicable = 1 in {
+      isUnpredicable = 1, mayRaiseFPException = 1 in {
     def H : AHbInp<0b11101, 0b00, opc,
                    (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
                    NoItinerary, !strconcat(op, ".f16\t$Sd, $Sn, $Sm"),
@@ -940,7 +940,8 @@ def VCVTTDH : ADuI<0b11101, 0b11, 0b0011, 0b11, 0,
 
 multiclass vcvt_inst<string opc, bits<2> rm,
                      SDPatternOperator node = null_frag> {
-  let PostEncoderMethod = "", DecoderNamespace = "VFPV8", hasSideEffects = 0 in {
+  let PostEncoderMethod = "", DecoderNamespace = "VFPV8", hasSideEffects = 0, 
+      mayRaiseFPException = 1 in {
     def SH : AHuInp<0b11101, 0b11, 0b1100, 0b11, 0,
                     (outs SPR:$Sd), (ins HPR:$Sm),
                     NoItinerary, !strconcat("vcvt", opc, ".s32.f16\t$Sd, $Sm"),
@@ -1061,7 +1062,9 @@ def VNEGH  : AHuI<0b11101, 0b11, 0b0001, 0b01, 0,
                   IIC_fpUNA16, "vneg", ".f16\t$Sd, $Sm",
                   [(set (f16 HPR:$Sd), (fneg (f16 HPR:$Sm)))]>;
 
-multiclass vrint_inst_zrx<string opc, bit op, bit op2, SDPatternOperator node> {
+multiclass vrint_inst_zrx<string opc, bit op, bit op2, SDPatternOperator node,
+                          list<Register> uses = [], bit fpexc = 0> {
+  let Uses = uses, mayRaiseFPException = fpexc in {
   def H : AHuI<0b11101, 0b11, 0b0110, 0b11, 0,
                (outs HPR:$Sd), (ins HPR:$Sm),
                NoItinerary, !strconcat("vrint", opc), ".f16\t$Sd, $Sm",
@@ -1087,6 +1090,7 @@ multiclass vrint_inst_zrx<string opc, bit op, bit op2, SDPatternOperator node> {
     let Inst{7} = op2;
     let Inst{16} = op;
   }
+  }
 
   def : InstAlias<!strconcat("vrint", opc, "$p.f16.f16\t$Sd, $Sm"),
                   (!cast<Instruction>(NAME#"H") SPR:$Sd, SPR:$Sm, pred:$p), 0>,
@@ -1099,9 +1103,9 @@ multiclass vrint_inst_zrx<string opc, bit op, bit op2, SDPatternOperator node> {
         Requires<[HasFPARMv8,HasDPVFP]>;
 }
 
-defm VRINTZ : vrint_inst_zrx<"z", 0, 1, ftrunc>;
-defm VRINTR : vrint_inst_zrx<"r", 0, 0, fnearbyint>;
-defm VRINTX : vrint_inst_zrx<"x", 1, 0, frint>;
+defm VRINTZ : vrint_inst_zrx<"z", 0, 1, ftrunc, [], 0>;
+defm VRINTR : vrint_inst_zrx<"r", 0, 0, fnearbyint, [FPSCR_RM], 0>;
+defm VRINTX : vrint_inst_zrx<"x", 1, 0, frint, [FPSCR_RM], 1>;
 
 multiclass vrint_inst_anpm<string opc, bits<2> rm,
                            SDPatternOperator node = null_frag> {
@@ -1649,6 +1653,7 @@ class AVConv1IsH_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
 }
 
 // Always set Z bit in the instruction, i.e. "round towards zero" variants.
+let mayRaiseFPException = 1 in
 def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
                                 (outs SPR:$Sd), (ins DPR:$Dm),
                                 IIC_fpCVTDI, "vcvt", ".s32.f64\t$Sd, $Dm",
@@ -1669,6 +1674,7 @@ let Predicates=[HasVFP2, HasDPVFP] in {
                (VSTRS (VTOSIZD DPR:$a), addrmode5:$ptr)>;
 }
 
+let mayRaiseFPException = 1 in
 def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
                                  (outs SPR:$Sd), (ins SPR:$Sm),
                                  IIC_fpCVTSI, "vcvt", ".s32.f32\t$Sd, $Sm",
@@ -1693,6 +1699,7 @@ def : VFPPat<(alignedstore32 (i32 (fp_to_sint_sat (f32 SPR:$a), i32)),
                                    addrmode5:$ptr),
              (VSTRS (VTOSIZS SPR:$a), addrmode5:$ptr)>;
 
+let mayRaiseFPException = 1 in
 def VTOSIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001,
                                  (outs SPR:$Sd), (ins HPR:$Sm),
                                  IIC_fpCVTHI, "vcvt", ".s32.f16\t$Sd, $Sm",
@@ -1707,6 +1714,7 @@ def : VFPNoNEONPat<(i32 (fp_to_sint (f16 HPR:$a))),
 def : VFPPat<(i32 (fp_to_sint_sat (f16 HPR:$a), i32)),
              (COPY_TO_REGCLASS (VTOSIZH (f16 HPR:$a)), GPR)>;
 
+let mayRaiseFPException = 1 in
 def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
                                (outs SPR:$Sd), (ins DPR:$Dm),
                                IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm",
@@ -1727,6 +1735,7 @@ let Predicates=[HasVFP2, HasDPVFP] in {
                (VSTRS (VTOUIZD DPR:$a), addrmode5:$ptr)>;
 }
 
+let mayRaiseFPException = 1 in
 def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
                                  (outs SPR:$Sd), (ins SPR:$Sm),
                                  IIC_fpCVTSI, "vcvt", ".u32.f32\t$Sd, $Sm",
@@ -1751,6 +1760,7 @@ def : VFPPat<(alignedstore32 (i32 (fp_to_uint_sat (f32 SPR:$a), i32)),
                                    addrmode5:$ptr),
              (VSTRS (VTOUIZS SPR:$a), addrmode5:$ptr)>;
 
+let mayRaiseFPException = 1 in
 def VTOUIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001,
                                  (outs SPR:$Sd), (ins HPR:$Sm),
                                  IIC_fpCVTHI, "vcvt", ".u32.f16\t$Sd, $Sm",
@@ -1766,7 +1776,7 @@ def : VFPPat<(i32 (fp_to_uint_sat (f16 HPR:$a), i32)),
              (COPY_TO_REGCLASS (VTOUIZH (f16 HPR:$a)), GPR)>;
 
 // And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
-let Uses = [FPSCR_RM] in {
+let mayRaiseFPException = 1, Uses = [FPSCR_RM] in {
 def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
                                 (outs SPR:$Sd), (ins DPR:$Dm),
                                 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm",
@@ -1865,7 +1875,7 @@ class AVConv1XInsD_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
   let Predicates = [HasVFP2, HasDPVFP];
 }
 
-let isUnpredicable = 1 in {
+let isUnpredicable = 1, mayRaiseFPException = 1 in {
 
 def VTOSHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 0,
                        (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
@@ -1891,8 +1901,9 @@ def VTOULH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 1,
              Requires<[HasFullFP16]>,
              Sched<[WriteFPCVT]>;
 
-} // End of 'let isUnpredicable = 1 in'
+} // End of 'let isUnpredicable = 1, mayRaiseFPException = 1 in'
 
+let mayRaiseFPException = 1 in {
 def VTOSHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 0,
                        (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
                  IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits", []>,
@@ -1948,6 +1959,7 @@ def VTOULD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 1,
                        (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
                  IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits", []>,
              Sched<[WriteFPCVT]>;
+} // let mayRaiseFPException = 1
 
 // Fixed-Point to FP:
 
diff --git a/llvm/test/CodeGen/ARM/GlobalISel/arm-select-copy_to_regclass-of-fptosi.mir b/llvm/test/CodeGen/ARM/GlobalISel/arm-select-copy_to_regclass-of-fptosi.mir
index 45a846b5b8779..4cded131190f4 100644
--- a/llvm/test/CodeGen/ARM/GlobalISel/arm-select-copy_to_regclass-of-fptosi.mir
+++ b/llvm/test/CodeGen/ARM/GlobalISel/arm-select-copy_to_regclass-of-fptosi.mir
@@ -19,7 +19,7 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: test_fptosi
     ; CHECK: [[COPY:%[0-9]+]]:spr = COPY $s0
-    ; CHECK: [[VTOSIZS:%[0-9]+]]:spr = VTOSIZS [[COPY]], 14 /* CC::al */, $noreg
+    ; CHECK: [[VTOSIZS:%[0-9]+]]:spr = nofpexcept VTOSIZS [[COPY]], 14 /* CC::al */, $noreg
     ; CHECK: [[COPY1:%[0-9]+]]:gpr = COPY [[VTOSIZS]]
     ; CHECK: $r0 = COPY [[COPY1]]
     ; CHECK: MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
diff --git a/llvm/test/CodeGen/ARM/GlobalISel/select-fp.mir b/llvm/test/CodeGen/ARM/GlobalISel/select-fp.mir
index 07a683842b59f..0dbb65d383907 100644
--- a/llvm/test/CodeGen/ARM/GlobalISel/select-fp.mir
+++ b/llvm/test/CodeGen/ARM/GlobalISel/select-fp.mir
@@ -93,7 +93,7 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:spr = COPY $s0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:spr = COPY $s1
-    ; CHECK-NEXT: [[VADDS:%[0-9]+]]:spr = nofpexcept VADDS [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: [[VADDS:%[0-9]+]]:spr = nofpexcept VADDS [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
     ; CHECK-NEXT: $s0 = COPY [[VADDS]]
     ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s32) = COPY $s0
@@ -124,7 +124,7 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:dpr = COPY $d0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:dpr = COPY $d1
-    ; CHECK-NEXT: [[VADDD:%[0-9]+]]:dpr = nofpexcept VADDD [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: [[VADDD:%[0-9]+]]:dpr = nofpexcept VADDD [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
     ; CHECK-NEXT: $d0 = COPY [[VADDD]]
     ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(s64) = COPY $d0
@@ -155,7 +155,7 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:spr = COPY $s0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:spr = COPY $s1
-    ; CHECK-NEXT: [[VSUBS:%[0-9]+]]:spr = nofpexcept VSUBS [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: [[VSUBS:%[0-9]+]]:spr = nofpexcept VSUBS [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
     ; CHECK-NEXT: $s0 = COPY [[VSUBS]]
     ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s32) = COPY $s0
@@ -186,7 +186,7 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:dpr = COPY $d0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:dpr = COPY $d1
-    ; CHECK-NEXT: [[VSUBD:%[0-9]+]]:dpr = nofpexcept VSUBD [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: [[VSUBD:%[0-9]+]]:dpr = nofpexcept VSUBD [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
     ; CHECK-NEXT: $d0 = COPY [[VSUBD]]
     ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(s64) = COPY $d0
@@ -217,7 +217,7 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:spr = COPY $s0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:spr = COPY $s1
-    ; CHECK-NEXT: [[VMULS:%[0-9]+]]:spr = nofpexcept VMULS [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: [[VMULS:%[0-9]+]]:spr = nofpexcept VMULS [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
     ; CHECK-NEXT: $s0 = COPY [[VMULS]]
     ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s32) = COPY $s0
@@ -248,7 +248,7 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:dpr = COPY $d0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:dpr = COPY $d1
-    ; CHECK-NEXT: [[VMULD:%[0-9]+]]:dpr = nofpexcept VMULD [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: [[VMULD:%[0-9]+]]:dpr = nofpexcept VMULD [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
     ; CHECK-NEXT: $d0 = COPY [[VMULD]]
     ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(s64) = COPY $d0
@@ -279,7 +279,7 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:spr = COPY $s0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:spr = COPY $s1
-    ; CHECK-NEXT: [[VDIVS:%[0-9]+]]:spr = nofpexcept VDIVS [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: [[VDIVS:%[0-9]+]]:spr = nofpexcept VDIVS [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
     ; CHECK-NEXT: $s0 = COPY [[VDIVS]]
     ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s32) = COPY $s0
@@ -310,7 +310,7 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:dpr = COPY $d0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:dpr = COPY $d1
-    ; CHECK-NEXT: [[VDIVD:%[0-9]+]]:dpr = nofpexcept VDIVD [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: [[VDIVD:%[0-9]+]]:dpr = nofpexcept VDIVD [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
     ; CHECK-NEXT: $d0 = COPY [[VDIVD]]
     ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(s64) = COPY $d0
@@ -398,7 +398,7 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:spr = COPY $s0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:spr = COPY $s1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:spr = COPY $s2
-    ; CHECK-NEXT: [[VFMAS:%[0-9]+]]:spr = nofpexcept VFMAS [[COPY2]], [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: [[VFMAS:%[0-9]+]]:spr = nofpexcept VFMAS [[COPY2]], [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
     ; CHECK-NEXT: $s0 = COPY [[VFMAS]]
     ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s32) = COPY $s0
@@ -433,7 +433,7 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:dpr = COPY $d0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:dpr = COPY $d1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:dpr = COPY $d2
-    ; CHECK-NEXT: [[VFMAD:%[0-9]+]]:dpr = nofpexcept VFMAD [[COPY2]], [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: [[VFMAD:%[0-9]+]]:dpr = nofpexcept VFMAD [[COPY2]], [[COPY]], [[COPY1]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
     ; CHECK-NEXT: $d0 = COPY [[VFMAD]]
     ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(s64) = COPY $d0
@@ -464,7 +464,7 @@ body:             |
     ; CHECK: liveins: $s0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:spr = COPY $s0
-    ; CHECK-NEXT: [[VCVTDS:%[0-9]+]]:dpr = nofpexcept VCVTDS [[COPY]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: [[VCVTDS:%[0-9]+]]:dpr = nofpexcept VCVTDS [[COPY]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
     ; CHECK-NEXT: $d0 = COPY [[VCVTDS]]
     ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $d0
     %0(s32) = COPY $s0
@@ -491,7 +491,7 @@ body:             |
     ; CHECK: liveins: $d0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:dpr = COPY $d0
-    ; CHECK-NEXT: [[VCVTSD:%[0-9]+]]:spr = nofpexcept VCVTSD [[COPY]], 14 /* CC::al */, $noreg, implicit $fpscr
+    ; CHECK-NEXT: [[VCVTSD:%[0-9]+]]:spr = nofpexcept VCVTSD [[COPY]], 14 /* CC::al */, $noreg, implicit $fpscr_rm
     ; CHECK-NEXT: $s0 = COPY [[VCVTSD]]
     ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $s0
     %0(s64) = COPY $d0
@@ -518,7 +518,7 @@ body:             |
     ; CHECK: liveins: $s0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:spr = COPY $s0
-    ; CHECK-NEXT: [[VTOSIZS:%[0-9]+]]:spr = VTOSIZS [[COPY]], 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: [[VTOSIZS:%[0-9]+]]:spr = nofpexcept VTOSIZS [[COPY]], 14 /* CC::al */, $noreg
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[VTOSIZS]]
     ; CHECK-NEXT: $r0 = COPY [[COPY1]]
     ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $r0
@@ -546,7 +546,7 @@ body:             |
     ; CHECK: liveins: $d0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:dpr = COPY $d0
-    ; CHECK-NEXT: [[VTOSIZD:%[0-9]+]]:spr = VTOSIZD [[COPY]], 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: [[VTOSIZD:%[0-9]+]]:spr = nofpexcept VTOSIZD [[COPY]], 14 /* CC::al */, $noreg
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[VTOSIZD]]
     ; CHECK-NEXT: $r0 = COPY [[COPY1]]
     ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $r0
@@ -574,7 +574,7 @@ body:             |
     ; CHECK: liveins: $s0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:spr = COPY $s0
-    ; CHECK-NEXT: [[VTOUIZS:%[0-9]+]]:spr = VTOUIZS [[COPY]], 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: [[VTOUIZS:%[0-9]+]]:spr = nofpexcept VTOUIZS [[COPY]], 14 /* CC::al */, $noreg
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[VTOUIZS]]
     ; CHECK-NEXT: $r0 = COPY [[COPY1]]
     ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $r0
@@ -602,7 +602,7 @@ body:             |
     ; CHECK: liveins: $d0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:dpr = COPY $d0
-    ; CHECK-NEXT: [[VTOUIZD:%[0-9]+]]:spr = VTOUIZD [[COPY]], 14 /* CC::al */, $noreg
+    ; CHECK-NEXT: [[VTOUIZD:%[0-9]+]]:spr = nofpexcept VTOUIZD [[COPY]], 14 /* CC::al */, $noreg
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[VTOUIZD]]
     ; CHECK-NEXT: $r0 = COPY [[COPY1]]
     ; CHECK-NEXT: BX_RET 14 /* CC::al */, $noreg, implicit $r0



More information about the llvm-commits mailing list