[llvm] [X86] Add missing immediate qualifier to the (V)CMPSS/D instructions (PR #84496)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 8 07:14:50 PST 2024


https://github.com/RKSimon created https://github.com/llvm/llvm-project/pull/84496

Matches (V)CMPPS/D and makes it easier to algorithmically recreate the instruction name in various analysis scripts I'm working on

>From 9872cae51f75f769b6433ba0d3dae48b6e623465 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Fri, 8 Mar 2024 15:13:19 +0000
Subject: [PATCH] [X86] Add missing immediate qualifier to the (V)CMPSS/D
 instructions

Matches (V)CMPPS/D and makes it easier to algorithmically recreate the instruction name in various analysis scripts I'm working on
---
 .../X86/MCTargetDesc/X86ATTInstPrinter.cpp    | 112 ++++----
 .../MCTargetDesc/X86EncodingOptimization.cpp  |   4 +-
 .../X86/MCTargetDesc/X86InstPrinterCommon.cpp |  40 +--
 .../X86/MCTargetDesc/X86IntelInstPrinter.cpp  | 112 ++++----
 llvm/lib/Target/X86/X86FastISel.cpp           |   8 +-
 llvm/lib/Target/X86/X86InstrAVX512.td         |  98 +++----
 llvm/lib/Target/X86/X86InstrInfo.cpp          |  40 +--
 llvm/lib/Target/X86/X86InstrSSE.td            |  46 ++--
 llvm/lib/Target/X86/X86SchedSapphireRapids.td |   8 +-
 .../CodeGen/X86/apx/kmov-domain-assignment.ll |   8 +-
 llvm/test/CodeGen/X86/domain-reassignment.mir | 246 +++++++++---------
 llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll    |   8 +-
 12 files changed, 365 insertions(+), 365 deletions(-)

diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp
index e96f9279826beb..33104524c5a890 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp
@@ -89,12 +89,12 @@ bool X86ATTInstPrinter::printVecCompareInstr(const MCInst *MI,
   // Custom print the vector compare instructions to get the immediate
   // translated into the mnemonic.
   switch (MI->getOpcode()) {
-  case X86::CMPPDrmi:    case X86::CMPPDrri:
-  case X86::CMPPSrmi:    case X86::CMPPSrri:
-  case X86::CMPSDrm:     case X86::CMPSDrr:
-  case X86::CMPSDrm_Int: case X86::CMPSDrr_Int:
-  case X86::CMPSSrm:     case X86::CMPSSrr:
-  case X86::CMPSSrm_Int: case X86::CMPSSrr_Int:
+  case X86::CMPPDrmi:     case X86::CMPPDrri:
+  case X86::CMPPSrmi:     case X86::CMPPSrri:
+  case X86::CMPSDrmi:     case X86::CMPSDrri:
+  case X86::CMPSDrmi_Int: case X86::CMPSDrri_Int:
+  case X86::CMPSSrmi:     case X86::CMPSSrri:
+  case X86::CMPSSrmi_Int: case X86::CMPSSrri_Int:
     if (Imm >= 0 && Imm <= 7) {
       OS << '\t';
       printCMPMnemonic(MI, /*IsVCMP*/false, OS);
@@ -117,56 +117,56 @@ bool X86ATTInstPrinter::printVecCompareInstr(const MCInst *MI,
     }
     break;
 
-  case X86::VCMPPDrmi:      case X86::VCMPPDrri:
-  case X86::VCMPPDYrmi:     case X86::VCMPPDYrri:
-  case X86::VCMPPDZ128rmi:  case X86::VCMPPDZ128rri:
-  case X86::VCMPPDZ256rmi:  case X86::VCMPPDZ256rri:
-  case X86::VCMPPDZrmi:     case X86::VCMPPDZrri:
-  case X86::VCMPPSrmi:      case X86::VCMPPSrri:
-  case X86::VCMPPSYrmi:     case X86::VCMPPSYrri:
-  case X86::VCMPPSZ128rmi:  case X86::VCMPPSZ128rri:
-  case X86::VCMPPSZ256rmi:  case X86::VCMPPSZ256rri:
-  case X86::VCMPPSZrmi:     case X86::VCMPPSZrri:
-  case X86::VCMPSDrm:       case X86::VCMPSDrr:
-  case X86::VCMPSDZrm:      case X86::VCMPSDZrr:
-  case X86::VCMPSDrm_Int:   case X86::VCMPSDrr_Int:
-  case X86::VCMPSDZrm_Int:  case X86::VCMPSDZrr_Int:
-  case X86::VCMPSSrm:       case X86::VCMPSSrr:
-  case X86::VCMPSSZrm:      case X86::VCMPSSZrr:
-  case X86::VCMPSSrm_Int:   case X86::VCMPSSrr_Int:
-  case X86::VCMPSSZrm_Int:  case X86::VCMPSSZrr_Int:
-  case X86::VCMPPDZ128rmik: case X86::VCMPPDZ128rrik:
-  case X86::VCMPPDZ256rmik: case X86::VCMPPDZ256rrik:
-  case X86::VCMPPDZrmik:    case X86::VCMPPDZrrik:
-  case X86::VCMPPSZ128rmik: case X86::VCMPPSZ128rrik:
-  case X86::VCMPPSZ256rmik: case X86::VCMPPSZ256rrik:
-  case X86::VCMPPSZrmik:    case X86::VCMPPSZrrik:
-  case X86::VCMPSDZrm_Intk: case X86::VCMPSDZrr_Intk:
-  case X86::VCMPSSZrm_Intk: case X86::VCMPSSZrr_Intk:
-  case X86::VCMPPDZ128rmbi: case X86::VCMPPDZ128rmbik:
-  case X86::VCMPPDZ256rmbi: case X86::VCMPPDZ256rmbik:
-  case X86::VCMPPDZrmbi:    case X86::VCMPPDZrmbik:
-  case X86::VCMPPSZ128rmbi: case X86::VCMPPSZ128rmbik:
-  case X86::VCMPPSZ256rmbi: case X86::VCMPPSZ256rmbik:
-  case X86::VCMPPSZrmbi:    case X86::VCMPPSZrmbik:
-  case X86::VCMPPDZrrib:    case X86::VCMPPDZrribk:
-  case X86::VCMPPSZrrib:    case X86::VCMPPSZrribk:
-  case X86::VCMPSDZrrb_Int: case X86::VCMPSDZrrb_Intk:
-  case X86::VCMPSSZrrb_Int: case X86::VCMPSSZrrb_Intk:
-  case X86::VCMPPHZ128rmi:  case X86::VCMPPHZ128rri:
-  case X86::VCMPPHZ256rmi:  case X86::VCMPPHZ256rri:
-  case X86::VCMPPHZrmi:     case X86::VCMPPHZrri:
-  case X86::VCMPSHZrm:      case X86::VCMPSHZrr:
-  case X86::VCMPSHZrm_Int:  case X86::VCMPSHZrr_Int:
-  case X86::VCMPPHZ128rmik: case X86::VCMPPHZ128rrik:
-  case X86::VCMPPHZ256rmik: case X86::VCMPPHZ256rrik:
-  case X86::VCMPPHZrmik:    case X86::VCMPPHZrrik:
-  case X86::VCMPSHZrm_Intk: case X86::VCMPSHZrr_Intk:
-  case X86::VCMPPHZ128rmbi: case X86::VCMPPHZ128rmbik:
-  case X86::VCMPPHZ256rmbi: case X86::VCMPPHZ256rmbik:
-  case X86::VCMPPHZrmbi:    case X86::VCMPPHZrmbik:
-  case X86::VCMPPHZrrib:    case X86::VCMPPHZrribk:
-  case X86::VCMPSHZrrb_Int: case X86::VCMPSHZrrb_Intk:
+  case X86::VCMPPDrmi:       case X86::VCMPPDrri:
+  case X86::VCMPPDYrmi:      case X86::VCMPPDYrri:
+  case X86::VCMPPDZ128rmi:   case X86::VCMPPDZ128rri:
+  case X86::VCMPPDZ256rmi:   case X86::VCMPPDZ256rri:
+  case X86::VCMPPDZrmi:      case X86::VCMPPDZrri:
+  case X86::VCMPPSrmi:       case X86::VCMPPSrri:
+  case X86::VCMPPSYrmi:      case X86::VCMPPSYrri:
+  case X86::VCMPPSZ128rmi:   case X86::VCMPPSZ128rri:
+  case X86::VCMPPSZ256rmi:   case X86::VCMPPSZ256rri:
+  case X86::VCMPPSZrmi:      case X86::VCMPPSZrri:
+  case X86::VCMPSDrmi:       case X86::VCMPSDrri:
+  case X86::VCMPSDZrmi:      case X86::VCMPSDZrri:
+  case X86::VCMPSDrmi_Int:   case X86::VCMPSDrri_Int:
+  case X86::VCMPSDZrmi_Int:  case X86::VCMPSDZrri_Int:
+  case X86::VCMPSSrmi:       case X86::VCMPSSrri:
+  case X86::VCMPSSZrmi:      case X86::VCMPSSZrri:
+  case X86::VCMPSSrmi_Int:   case X86::VCMPSSrri_Int:
+  case X86::VCMPSSZrmi_Int:  case X86::VCMPSSZrri_Int:
+  case X86::VCMPPDZ128rmik:  case X86::VCMPPDZ128rrik:
+  case X86::VCMPPDZ256rmik:  case X86::VCMPPDZ256rrik:
+  case X86::VCMPPDZrmik:     case X86::VCMPPDZrrik:
+  case X86::VCMPPSZ128rmik:  case X86::VCMPPSZ128rrik:
+  case X86::VCMPPSZ256rmik:  case X86::VCMPPSZ256rrik:
+  case X86::VCMPPSZrmik:     case X86::VCMPPSZrrik:
+  case X86::VCMPSDZrmi_Intk: case X86::VCMPSDZrri_Intk:
+  case X86::VCMPSSZrmi_Intk: case X86::VCMPSSZrri_Intk:
+  case X86::VCMPPDZ128rmbi:  case X86::VCMPPDZ128rmbik:
+  case X86::VCMPPDZ256rmbi:  case X86::VCMPPDZ256rmbik:
+  case X86::VCMPPDZrmbi:     case X86::VCMPPDZrmbik:
+  case X86::VCMPPSZ128rmbi:  case X86::VCMPPSZ128rmbik:
+  case X86::VCMPPSZ256rmbi:  case X86::VCMPPSZ256rmbik:
+  case X86::VCMPPSZrmbi:     case X86::VCMPPSZrmbik:
+  case X86::VCMPPDZrrib:     case X86::VCMPPDZrribk:
+  case X86::VCMPPSZrrib:     case X86::VCMPPSZrribk:
+  case X86::VCMPSDZrrib_Int: case X86::VCMPSDZrrib_Intk:
+  case X86::VCMPSSZrrib_Int: case X86::VCMPSSZrrib_Intk:
+  case X86::VCMPPHZ128rmi:   case X86::VCMPPHZ128rri:
+  case X86::VCMPPHZ256rmi:   case X86::VCMPPHZ256rri:
+  case X86::VCMPPHZrmi:      case X86::VCMPPHZrri:
+  case X86::VCMPSHZrmi:      case X86::VCMPSHZrri:
+  case X86::VCMPSHZrmi_Int:  case X86::VCMPSHZrri_Int:
+  case X86::VCMPPHZ128rmik:  case X86::VCMPPHZ128rrik:
+  case X86::VCMPPHZ256rmik:  case X86::VCMPPHZ256rrik:
+  case X86::VCMPPHZrmik:     case X86::VCMPPHZrrik:
+  case X86::VCMPSHZrmi_Intk: case X86::VCMPSHZrri_Intk:
+  case X86::VCMPPHZ128rmbi:  case X86::VCMPPHZ128rmbik:
+  case X86::VCMPPHZ256rmbi:  case X86::VCMPPHZ256rmbik:
+  case X86::VCMPPHZrmbi:     case X86::VCMPPHZrmbik:
+  case X86::VCMPPHZrrib:     case X86::VCMPPHZrribk:
+  case X86::VCMPSHZrrib_Int: case X86::VCMPSHZrrib_Intk:
     if (Imm >= 0 && Imm <= 31) {
       OS << '\t';
       printCMPMnemonic(MI, /*IsVCMP*/true, OS);
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp
index 134206466c542f..001a9d4d4d3c1e 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp
@@ -52,8 +52,8 @@ bool X86::optimizeInstFromVEX3ToVEX2(MCInst &MI, const MCInstrDesc &Desc) {
   case X86::VCMPPDYrri:
   case X86::VCMPPSrri:
   case X86::VCMPPSYrri:
-  case X86::VCMPSDrr:
-  case X86::VCMPSSrr: {
+  case X86::VCMPSDrri:
+  case X86::VCMPSSrri: {
     switch (MI.getOperand(3).getImm() & 0x7) {
     default:
       return false;
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp
index fd46e4e1df821a..29a1866bf01ab0 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp
@@ -272,24 +272,24 @@ void X86InstPrinterCommon::printCMPMnemonic(const MCInst *MI, bool IsVCmp,
   case X86::VCMPPSZrrib:    case X86::VCMPPSZrribk:
     OS << "ps\t";
     break;
-  case X86::CMPSDrm:        case X86::CMPSDrr:
-  case X86::CMPSDrm_Int:    case X86::CMPSDrr_Int:
-  case X86::VCMPSDrm:       case X86::VCMPSDrr:
-  case X86::VCMPSDrm_Int:   case X86::VCMPSDrr_Int:
-  case X86::VCMPSDZrm:      case X86::VCMPSDZrr:
-  case X86::VCMPSDZrm_Int:  case X86::VCMPSDZrr_Int:
-  case X86::VCMPSDZrm_Intk: case X86::VCMPSDZrr_Intk:
-  case X86::VCMPSDZrrb_Int: case X86::VCMPSDZrrb_Intk:
+  case X86::CMPSDrmi:        case X86::CMPSDrri:
+  case X86::CMPSDrmi_Int:    case X86::CMPSDrri_Int:
+  case X86::VCMPSDrmi:       case X86::VCMPSDrri:
+  case X86::VCMPSDrmi_Int:   case X86::VCMPSDrri_Int:
+  case X86::VCMPSDZrmi:      case X86::VCMPSDZrri:
+  case X86::VCMPSDZrmi_Int:  case X86::VCMPSDZrri_Int:
+  case X86::VCMPSDZrmi_Intk: case X86::VCMPSDZrri_Intk:
+  case X86::VCMPSDZrrib_Int: case X86::VCMPSDZrrib_Intk:
     OS << "sd\t";
     break;
-  case X86::CMPSSrm:        case X86::CMPSSrr:
-  case X86::CMPSSrm_Int:    case X86::CMPSSrr_Int:
-  case X86::VCMPSSrm:       case X86::VCMPSSrr:
-  case X86::VCMPSSrm_Int:   case X86::VCMPSSrr_Int:
-  case X86::VCMPSSZrm:      case X86::VCMPSSZrr:
-  case X86::VCMPSSZrm_Int:  case X86::VCMPSSZrr_Int:
-  case X86::VCMPSSZrm_Intk: case X86::VCMPSSZrr_Intk:
-  case X86::VCMPSSZrrb_Int: case X86::VCMPSSZrrb_Intk:
+  case X86::CMPSSrmi:        case X86::CMPSSrri:
+  case X86::CMPSSrmi_Int:    case X86::CMPSSrri_Int:
+  case X86::VCMPSSrmi:       case X86::VCMPSSrri:
+  case X86::VCMPSSrmi_Int:   case X86::VCMPSSrri_Int:
+  case X86::VCMPSSZrmi:      case X86::VCMPSSZrri:
+  case X86::VCMPSSZrmi_Int:  case X86::VCMPSSZrri_Int:
+  case X86::VCMPSSZrmi_Intk: case X86::VCMPSSZrri_Intk:
+  case X86::VCMPSSZrrib_Int: case X86::VCMPSSZrrib_Intk:
     OS << "ss\t";
     break;
   case X86::VCMPPHZ128rmi:  case X86::VCMPPHZ128rri:
@@ -304,10 +304,10 @@ void X86InstPrinterCommon::printCMPMnemonic(const MCInst *MI, bool IsVCmp,
   case X86::VCMPPHZrrib:    case X86::VCMPPHZrribk:
     OS << "ph\t";
     break;
-  case X86::VCMPSHZrm:      case X86::VCMPSHZrr:
-  case X86::VCMPSHZrm_Int:  case X86::VCMPSHZrr_Int:
-  case X86::VCMPSHZrrb_Int: case X86::VCMPSHZrrb_Intk:
-  case X86::VCMPSHZrm_Intk: case X86::VCMPSHZrr_Intk:
+  case X86::VCMPSHZrmi:      case X86::VCMPSHZrri:
+  case X86::VCMPSHZrmi_Int:  case X86::VCMPSHZrri_Int:
+  case X86::VCMPSHZrrib_Int: case X86::VCMPSHZrrib_Intk:
+  case X86::VCMPSHZrmi_Intk: case X86::VCMPSHZrri_Intk:
     OS << "sh\t";
     break;
   }
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp
index 0705700c78173a..7c8459a546516e 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp
@@ -69,12 +69,12 @@ bool X86IntelInstPrinter::printVecCompareInstr(const MCInst *MI, raw_ostream &OS
   // Custom print the vector compare instructions to get the immediate
   // translated into the mnemonic.
   switch (MI->getOpcode()) {
-  case X86::CMPPDrmi:    case X86::CMPPDrri:
-  case X86::CMPPSrmi:    case X86::CMPPSrri:
-  case X86::CMPSDrm:     case X86::CMPSDrr:
-  case X86::CMPSDrm_Int: case X86::CMPSDrr_Int:
-  case X86::CMPSSrm:     case X86::CMPSSrr:
-  case X86::CMPSSrm_Int: case X86::CMPSSrr_Int:
+  case X86::CMPPDrmi:     case X86::CMPPDrri:
+  case X86::CMPPSrmi:     case X86::CMPPSrri:
+  case X86::CMPSDrmi:     case X86::CMPSDrri:
+  case X86::CMPSDrmi_Int: case X86::CMPSDrri_Int:
+  case X86::CMPSSrmi:     case X86::CMPSSrri:
+  case X86::CMPSSrmi_Int: case X86::CMPSSrri_Int:
     if (Imm >= 0 && Imm <= 7) {
       OS << '\t';
       printCMPMnemonic(MI, /*IsVCMP*/false, OS);
@@ -96,56 +96,56 @@ bool X86IntelInstPrinter::printVecCompareInstr(const MCInst *MI, raw_ostream &OS
     }
     break;
 
-  case X86::VCMPPDrmi:      case X86::VCMPPDrri:
-  case X86::VCMPPDYrmi:     case X86::VCMPPDYrri:
-  case X86::VCMPPDZ128rmi:  case X86::VCMPPDZ128rri:
-  case X86::VCMPPDZ256rmi:  case X86::VCMPPDZ256rri:
-  case X86::VCMPPDZrmi:     case X86::VCMPPDZrri:
-  case X86::VCMPPSrmi:      case X86::VCMPPSrri:
-  case X86::VCMPPSYrmi:     case X86::VCMPPSYrri:
-  case X86::VCMPPSZ128rmi:  case X86::VCMPPSZ128rri:
-  case X86::VCMPPSZ256rmi:  case X86::VCMPPSZ256rri:
-  case X86::VCMPPSZrmi:     case X86::VCMPPSZrri:
-  case X86::VCMPSDrm:       case X86::VCMPSDrr:
-  case X86::VCMPSDZrm:      case X86::VCMPSDZrr:
-  case X86::VCMPSDrm_Int:   case X86::VCMPSDrr_Int:
-  case X86::VCMPSDZrm_Int:  case X86::VCMPSDZrr_Int:
-  case X86::VCMPSSrm:       case X86::VCMPSSrr:
-  case X86::VCMPSSZrm:      case X86::VCMPSSZrr:
-  case X86::VCMPSSrm_Int:   case X86::VCMPSSrr_Int:
-  case X86::VCMPSSZrm_Int:  case X86::VCMPSSZrr_Int:
-  case X86::VCMPPDZ128rmik: case X86::VCMPPDZ128rrik:
-  case X86::VCMPPDZ256rmik: case X86::VCMPPDZ256rrik:
-  case X86::VCMPPDZrmik:    case X86::VCMPPDZrrik:
-  case X86::VCMPPSZ128rmik: case X86::VCMPPSZ128rrik:
-  case X86::VCMPPSZ256rmik: case X86::VCMPPSZ256rrik:
-  case X86::VCMPPSZrmik:    case X86::VCMPPSZrrik:
-  case X86::VCMPSDZrm_Intk: case X86::VCMPSDZrr_Intk:
-  case X86::VCMPSSZrm_Intk: case X86::VCMPSSZrr_Intk:
-  case X86::VCMPPDZ128rmbi: case X86::VCMPPDZ128rmbik:
-  case X86::VCMPPDZ256rmbi: case X86::VCMPPDZ256rmbik:
-  case X86::VCMPPDZrmbi:    case X86::VCMPPDZrmbik:
-  case X86::VCMPPSZ128rmbi: case X86::VCMPPSZ128rmbik:
-  case X86::VCMPPSZ256rmbi: case X86::VCMPPSZ256rmbik:
-  case X86::VCMPPSZrmbi:    case X86::VCMPPSZrmbik:
-  case X86::VCMPPDZrrib:    case X86::VCMPPDZrribk:
-  case X86::VCMPPSZrrib:    case X86::VCMPPSZrribk:
-  case X86::VCMPSDZrrb_Int: case X86::VCMPSDZrrb_Intk:
-  case X86::VCMPSSZrrb_Int: case X86::VCMPSSZrrb_Intk:
-  case X86::VCMPPHZ128rmi:  case X86::VCMPPHZ128rri:
-  case X86::VCMPPHZ256rmi:  case X86::VCMPPHZ256rri:
-  case X86::VCMPPHZrmi:     case X86::VCMPPHZrri:
-  case X86::VCMPSHZrm:      case X86::VCMPSHZrr:
-  case X86::VCMPSHZrm_Int:  case X86::VCMPSHZrr_Int:
-  case X86::VCMPPHZ128rmik: case X86::VCMPPHZ128rrik:
-  case X86::VCMPPHZ256rmik: case X86::VCMPPHZ256rrik:
-  case X86::VCMPPHZrmik:    case X86::VCMPPHZrrik:
-  case X86::VCMPSHZrm_Intk: case X86::VCMPSHZrr_Intk:
-  case X86::VCMPPHZ128rmbi: case X86::VCMPPHZ128rmbik:
-  case X86::VCMPPHZ256rmbi: case X86::VCMPPHZ256rmbik:
-  case X86::VCMPPHZrmbi:    case X86::VCMPPHZrmbik:
-  case X86::VCMPPHZrrib:    case X86::VCMPPHZrribk:
-  case X86::VCMPSHZrrb_Int: case X86::VCMPSHZrrb_Intk:
+  case X86::VCMPPDrmi:       case X86::VCMPPDrri:
+  case X86::VCMPPDYrmi:      case X86::VCMPPDYrri:
+  case X86::VCMPPDZ128rmi:   case X86::VCMPPDZ128rri:
+  case X86::VCMPPDZ256rmi:   case X86::VCMPPDZ256rri:
+  case X86::VCMPPDZrmi:      case X86::VCMPPDZrri:
+  case X86::VCMPPSrmi:       case X86::VCMPPSrri:
+  case X86::VCMPPSYrmi:      case X86::VCMPPSYrri:
+  case X86::VCMPPSZ128rmi:   case X86::VCMPPSZ128rri:
+  case X86::VCMPPSZ256rmi:   case X86::VCMPPSZ256rri:
+  case X86::VCMPPSZrmi:      case X86::VCMPPSZrri:
+  case X86::VCMPSDrmi:       case X86::VCMPSDrri:
+  case X86::VCMPSDZrmi:      case X86::VCMPSDZrri:
+  case X86::VCMPSDrmi_Int:   case X86::VCMPSDrri_Int:
+  case X86::VCMPSDZrmi_Int:  case X86::VCMPSDZrri_Int:
+  case X86::VCMPSSrmi:       case X86::VCMPSSrri:
+  case X86::VCMPSSZrmi:      case X86::VCMPSSZrri:
+  case X86::VCMPSSrmi_Int:   case X86::VCMPSSrri_Int:
+  case X86::VCMPSSZrmi_Int:  case X86::VCMPSSZrri_Int:
+  case X86::VCMPPDZ128rmik:  case X86::VCMPPDZ128rrik:
+  case X86::VCMPPDZ256rmik:  case X86::VCMPPDZ256rrik:
+  case X86::VCMPPDZrmik:     case X86::VCMPPDZrrik:
+  case X86::VCMPPSZ128rmik:  case X86::VCMPPSZ128rrik:
+  case X86::VCMPPSZ256rmik:  case X86::VCMPPSZ256rrik:
+  case X86::VCMPPSZrmik:     case X86::VCMPPSZrrik:
+  case X86::VCMPSDZrmi_Intk: case X86::VCMPSDZrri_Intk:
+  case X86::VCMPSSZrmi_Intk: case X86::VCMPSSZrri_Intk:
+  case X86::VCMPPDZ128rmbi:  case X86::VCMPPDZ128rmbik:
+  case X86::VCMPPDZ256rmbi:  case X86::VCMPPDZ256rmbik:
+  case X86::VCMPPDZrmbi:     case X86::VCMPPDZrmbik:
+  case X86::VCMPPSZ128rmbi:  case X86::VCMPPSZ128rmbik:
+  case X86::VCMPPSZ256rmbi:  case X86::VCMPPSZ256rmbik:
+  case X86::VCMPPSZrmbi:     case X86::VCMPPSZrmbik:
+  case X86::VCMPPDZrrib:     case X86::VCMPPDZrribk:
+  case X86::VCMPPSZrrib:     case X86::VCMPPSZrribk:
+  case X86::VCMPSDZrrib_Int: case X86::VCMPSDZrrib_Intk:
+  case X86::VCMPSSZrrib_Int: case X86::VCMPSSZrrib_Intk:
+  case X86::VCMPPHZ128rmi:   case X86::VCMPPHZ128rri:
+  case X86::VCMPPHZ256rmi:   case X86::VCMPPHZ256rri:
+  case X86::VCMPPHZrmi:      case X86::VCMPPHZrri:
+  case X86::VCMPSHZrmi:      case X86::VCMPSHZrri:
+  case X86::VCMPSHZrmi_Int:  case X86::VCMPSHZrri_Int:
+  case X86::VCMPPHZ128rmik:  case X86::VCMPPHZ128rrik:
+  case X86::VCMPPHZ256rmik:  case X86::VCMPPHZ256rrik:
+  case X86::VCMPPHZrmik:     case X86::VCMPPHZrrik:
+  case X86::VCMPSHZrmi_Intk: case X86::VCMPSHZrri_Intk:
+  case X86::VCMPPHZ128rmbi:  case X86::VCMPPHZ128rmbik:
+  case X86::VCMPPHZ256rmbi:  case X86::VCMPPHZ256rmbik:
+  case X86::VCMPPHZrmbi:     case X86::VCMPPHZrmbik:
+  case X86::VCMPPHZrrib:     case X86::VCMPPHZrribk:
+  case X86::VCMPSHZrrib_Int: case X86::VCMPSHZrrib_Intk:
     if (Imm >= 0 && Imm <= 31) {
       OS << '\t';
       printCMPMnemonic(MI, /*IsVCMP*/true, OS);
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index 9368de62817b3d..9f0b5f32df20a0 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -2198,7 +2198,7 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
     const TargetRegisterClass *VK1 = &X86::VK1RegClass;
 
     unsigned CmpOpcode =
-      (RetVT == MVT::f32) ? X86::VCMPSSZrr : X86::VCMPSDZrr;
+      (RetVT == MVT::f32) ? X86::VCMPSSZrri : X86::VCMPSDZrri;
     Register CmpReg = fastEmitInst_rri(CmpOpcode, VK1, CmpLHSReg, CmpRHSReg,
                                        CC);
 
@@ -2228,7 +2228,7 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
     // instructions as the AND/ANDN/OR sequence due to register moves, so
     // don't bother.
     unsigned CmpOpcode =
-      (RetVT == MVT::f32) ? X86::VCMPSSrr : X86::VCMPSDrr;
+      (RetVT == MVT::f32) ? X86::VCMPSSrri : X86::VCMPSDrri;
     unsigned BlendOpcode =
       (RetVT == MVT::f32) ? X86::VBLENDVPSrr : X86::VBLENDVPDrr;
 
@@ -2242,8 +2242,8 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
   } else {
     // Choose the SSE instruction sequence based on data type (float or double).
     static const uint16_t OpcTable[2][4] = {
-      { X86::CMPSSrr,  X86::ANDPSrr,  X86::ANDNPSrr,  X86::ORPSrr  },
-      { X86::CMPSDrr,  X86::ANDPDrr,  X86::ANDNPDrr,  X86::ORPDrr  }
+      { X86::CMPSSrri,  X86::ANDPSrr,  X86::ANDNPSrr,  X86::ORPSrr  },
+      { X86::CMPSDrri,  X86::ANDPDrr,  X86::ANDNPDrr,  X86::ORPDrr  }
     };
 
     const uint16_t *Opc = nullptr;
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index a76561f092c349..43a40f5e691ea3 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -1937,58 +1937,58 @@ defm VPBLENDMW : blendmask_bw<0x66, "vpblendmw", SchedWriteVarBlend,
 multiclass avx512_cmp_scalar<X86VectorVTInfo _, SDNode OpNode, SDNode OpNodeSAE,
                              PatFrag OpNode_su, PatFrag OpNodeSAE_su,
                              X86FoldableSchedWrite sched> {
-  defm  rr_Int  : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
-                      (outs _.KRC:$dst),
-                      (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
-                      "vcmp"#_.Suffix,
-                      "$cc, $src2, $src1", "$src1, $src2, $cc",
-                      (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2), timm:$cc),
-                      (OpNode_su (_.VT _.RC:$src1), (_.VT _.RC:$src2),
-                                 timm:$cc)>, EVEX, VVVV, VEX_LIG, Sched<[sched]>, SIMD_EXC;
+  defm  rri_Int  : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
+                                       (outs _.KRC:$dst),
+                                       (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
+                                       "vcmp"#_.Suffix,
+                                       "$cc, $src2, $src1", "$src1, $src2, $cc",
+                                       (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2), timm:$cc),
+                                       (OpNode_su (_.VT _.RC:$src1), (_.VT _.RC:$src2), timm:$cc)>,
+                                       EVEX, VVVV, VEX_LIG, Sched<[sched]>, SIMD_EXC;
   let mayLoad = 1 in
-  defm  rm_Int  : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
-                    (outs _.KRC:$dst),
-                    (ins _.RC:$src1, _.IntScalarMemOp:$src2, u8imm:$cc),
-                    "vcmp"#_.Suffix,
-                    "$cc, $src2, $src1", "$src1, $src2, $cc",
-                    (OpNode (_.VT _.RC:$src1), (_.ScalarIntMemFrags addr:$src2),
-                        timm:$cc),
-                    (OpNode_su (_.VT _.RC:$src1), (_.ScalarIntMemFrags addr:$src2),
-                        timm:$cc)>, EVEX, VVVV, VEX_LIG, EVEX_CD8<_.EltSize, CD8VT1>,
-                    Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
+  defm  rmi_Int  : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
+                                       (outs _.KRC:$dst),
+                                       (ins _.RC:$src1, _.IntScalarMemOp:$src2, u8imm:$cc),
+                                       "vcmp"#_.Suffix,
+                                       "$cc, $src2, $src1", "$src1, $src2, $cc",
+                                       (OpNode (_.VT _.RC:$src1), (_.ScalarIntMemFrags addr:$src2),
+                                           timm:$cc),
+                                       (OpNode_su (_.VT _.RC:$src1), (_.ScalarIntMemFrags addr:$src2),
+                                           timm:$cc)>, EVEX, VVVV, VEX_LIG, EVEX_CD8<_.EltSize, CD8VT1>,
+                                       Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
 
   let Uses = [MXCSR] in
-  defm  rrb_Int  : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
-                     (outs _.KRC:$dst),
-                     (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
-                     "vcmp"#_.Suffix,
-                     "$cc, {sae}, $src2, $src1","$src1, $src2, {sae}, $cc",
-                     (OpNodeSAE (_.VT _.RC:$src1), (_.VT _.RC:$src2),
-                                timm:$cc),
-                     (OpNodeSAE_su (_.VT _.RC:$src1), (_.VT _.RC:$src2),
-                                   timm:$cc)>,
-                     EVEX, VVVV, VEX_LIG, EVEX_B, Sched<[sched]>;
+  defm  rrib_Int  : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
+                                        (outs _.KRC:$dst),
+                                        (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
+                                        "vcmp"#_.Suffix,
+                                        "$cc, {sae}, $src2, $src1","$src1, $src2, {sae}, $cc",
+                                        (OpNodeSAE (_.VT _.RC:$src1), (_.VT _.RC:$src2),
+                                                   timm:$cc),
+                                        (OpNodeSAE_su (_.VT _.RC:$src1), (_.VT _.RC:$src2),
+                                                      timm:$cc)>,
+                                        EVEX, VVVV, VEX_LIG, EVEX_B, Sched<[sched]>;
 
   let isCodeGenOnly = 1 in {
     let isCommutable = 1 in
-    def rr : AVX512Ii8<0xC2, MRMSrcReg,
-                (outs _.KRC:$dst), (ins _.FRC:$src1, _.FRC:$src2, u8imm:$cc),
-                !strconcat("vcmp", _.Suffix,
-                           "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
-                [(set _.KRC:$dst, (OpNode _.FRC:$src1,
-                                          _.FRC:$src2,
-                                          timm:$cc))]>,
-                EVEX, VVVV, VEX_LIG, Sched<[sched]>, SIMD_EXC;
-    def rm : AVX512Ii8<0xC2, MRMSrcMem,
-              (outs _.KRC:$dst),
-              (ins _.FRC:$src1, _.ScalarMemOp:$src2, u8imm:$cc),
-              !strconcat("vcmp", _.Suffix,
-                         "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
-              [(set _.KRC:$dst, (OpNode _.FRC:$src1,
-                                        (_.ScalarLdFrag addr:$src2),
-                                        timm:$cc))]>,
-              EVEX, VVVV, VEX_LIG, EVEX_CD8<_.EltSize, CD8VT1>,
-              Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
+    def rri : AVX512Ii8<0xC2, MRMSrcReg,
+                        (outs _.KRC:$dst), (ins _.FRC:$src1, _.FRC:$src2, u8imm:$cc),
+                        !strconcat("vcmp", _.Suffix,
+                                   "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
+                        [(set _.KRC:$dst, (OpNode _.FRC:$src1,
+                                                  _.FRC:$src2,
+                                                  timm:$cc))]>,
+                        EVEX, VVVV, VEX_LIG, Sched<[sched]>, SIMD_EXC;
+    def rmi : AVX512Ii8<0xC2, MRMSrcMem,
+                        (outs _.KRC:$dst),
+                        (ins _.FRC:$src1, _.ScalarMemOp:$src2, u8imm:$cc),
+                        !strconcat("vcmp", _.Suffix,
+                                   "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
+                        [(set _.KRC:$dst, (OpNode _.FRC:$src1,
+                                                  (_.ScalarLdFrag addr:$src2),
+                                                  timm:$cc))]>,
+                        EVEX, VVVV, VEX_LIG, EVEX_CD8<_.EltSize, CD8VT1>,
+                        Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
   }
 }
 
@@ -2437,15 +2437,15 @@ defm VCMPPH : avx512_vcmp<SchedWriteFCmp, avx512vl_f16_info, HasFP16>,
 // Patterns to select fp compares with load as first operand.
 let Predicates = [HasAVX512] in {
   def : Pat<(v1i1 (X86cmpms (loadf64 addr:$src2), FR64X:$src1, timm:$cc)),
-            (VCMPSDZrm FR64X:$src1, addr:$src2, (X86cmpm_imm_commute timm:$cc))>;
+            (VCMPSDZrmi FR64X:$src1, addr:$src2, (X86cmpm_imm_commute timm:$cc))>;
 
   def : Pat<(v1i1 (X86cmpms (loadf32 addr:$src2), FR32X:$src1, timm:$cc)),
-            (VCMPSSZrm FR32X:$src1, addr:$src2, (X86cmpm_imm_commute timm:$cc))>;
+            (VCMPSSZrmi FR32X:$src1, addr:$src2, (X86cmpm_imm_commute timm:$cc))>;
 }
 
 let Predicates = [HasFP16] in {
   def : Pat<(v1i1 (X86cmpms (loadf16 addr:$src2), FR16X:$src1, timm:$cc)),
-            (VCMPSHZrm FR16X:$src1, addr:$src2, (X86cmpm_imm_commute timm:$cc))>;
+            (VCMPSHZrmi FR16X:$src1, addr:$src2, (X86cmpm_imm_commute timm:$cc))>;
 }
 
 // ----------------------------------------------------------------
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 3f0557e651f89b..af0ed071c29aba 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -2573,11 +2573,11 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
     WorkingMI->getOperand(3).setImm(
         X86::getSwappedVPCOMImm(MI.getOperand(3).getImm() & 0x7));
     break;
-  case X86::VCMPSDZrr:
-  case X86::VCMPSSZrr:
+  case X86::VCMPSDZrri:
+  case X86::VCMPSSZrri:
   case X86::VCMPPDZrri:
   case X86::VCMPPSZrri:
-  case X86::VCMPSHZrr:
+  case X86::VCMPSHZrri:
   case X86::VCMPPHZrri:
   case X86::VCMPPHZ128rri:
   case X86::VCMPPHZ256rri:
@@ -2820,21 +2820,21 @@ bool X86InstrInfo::findCommutedOpIndices(const MachineInstr &MI,
     return false;
 
   switch (MI.getOpcode()) {
-  case X86::CMPSDrr:
-  case X86::CMPSSrr:
+  case X86::CMPSDrri:
+  case X86::CMPSSrri:
   case X86::CMPPDrri:
   case X86::CMPPSrri:
-  case X86::VCMPSDrr:
-  case X86::VCMPSSrr:
+  case X86::VCMPSDrri:
+  case X86::VCMPSSrri:
   case X86::VCMPPDrri:
   case X86::VCMPPSrri:
   case X86::VCMPPDYrri:
   case X86::VCMPPSYrri:
-  case X86::VCMPSDZrr:
-  case X86::VCMPSSZrr:
+  case X86::VCMPSDZrri:
+  case X86::VCMPSSZrri:
   case X86::VCMPPDZrri:
   case X86::VCMPPSZrri:
-  case X86::VCMPSHZrr:
+  case X86::VCMPSHZrri:
   case X86::VCMPPHZrri:
   case X86::VCMPPHZ128rri:
   case X86::VCMPPHZ256rri:
@@ -7510,9 +7510,9 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
     case X86::ADDSSrr_Int:
     case X86::VADDSSrr_Int:
     case X86::VADDSSZrr_Int:
-    case X86::CMPSSrr_Int:
-    case X86::VCMPSSrr_Int:
-    case X86::VCMPSSZrr_Int:
+    case X86::CMPSSrri_Int:
+    case X86::VCMPSSrri_Int:
+    case X86::VCMPSSZrri_Int:
     case X86::DIVSSrr_Int:
     case X86::VDIVSSrr_Int:
     case X86::VDIVSSZrr_Int:
@@ -7533,7 +7533,7 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
     case X86::VSUBSSZrr_Int:
     case X86::VADDSSZrr_Intk:
     case X86::VADDSSZrr_Intkz:
-    case X86::VCMPSSZrr_Intk:
+    case X86::VCMPSSZrri_Intk:
     case X86::VDIVSSZrr_Intk:
     case X86::VDIVSSZrr_Intkz:
     case X86::VMAXSSZrr_Intk:
@@ -7679,9 +7679,9 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
     case X86::ADDSDrr_Int:
     case X86::VADDSDrr_Int:
     case X86::VADDSDZrr_Int:
-    case X86::CMPSDrr_Int:
-    case X86::VCMPSDrr_Int:
-    case X86::VCMPSDZrr_Int:
+    case X86::CMPSDrri_Int:
+    case X86::VCMPSDrri_Int:
+    case X86::VCMPSDZrri_Int:
     case X86::DIVSDrr_Int:
     case X86::VDIVSDrr_Int:
     case X86::VDIVSDZrr_Int:
@@ -7702,7 +7702,7 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
     case X86::VSUBSDZrr_Int:
     case X86::VADDSDZrr_Intk:
     case X86::VADDSDZrr_Intkz:
-    case X86::VCMPSDZrr_Intk:
+    case X86::VCMPSDZrri_Intk:
     case X86::VDIVSDZrr_Intk:
     case X86::VDIVSDZrr_Intkz:
     case X86::VMAXSDZrr_Intk:
@@ -7814,7 +7814,7 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
     // instruction isn't scalar (SH).
     switch (UserOpc) {
     case X86::VADDSHZrr_Int:
-    case X86::VCMPSHZrr_Int:
+    case X86::VCMPSHZrri_Int:
     case X86::VDIVSHZrr_Int:
     case X86::VMAXSHZrr_Int:
     case X86::VMINSHZrr_Int:
@@ -7822,7 +7822,7 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
     case X86::VSUBSHZrr_Int:
     case X86::VADDSHZrr_Intk:
     case X86::VADDSHZrr_Intkz:
-    case X86::VCMPSHZrr_Intk:
+    case X86::VCMPSHZrri_Intk:
     case X86::VDIVSHZrr_Intk:
     case X86::VDIVSHZrr_Intkz:
     case X86::VMAXSHZrr_Intk:
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index 459b5b03507c70..fd20090fe0973b 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -1830,29 +1830,29 @@ multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
                             PatFrag ld_frag, string asm,
                             X86FoldableSchedWrite sched,
                             PatFrags mem_frags> {
-  def rr_Int : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
-                    (ins VR128:$src1, VR128:$src2, u8imm:$cc), asm,
-                    [(set VR128:$dst, (OpNode (VT VR128:$src1),
-                                              VR128:$src2, timm:$cc))]>,
-           Sched<[sched]>, SIMD_EXC;
+  def rri_Int : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
+                     (ins VR128:$src1, VR128:$src2, u8imm:$cc), asm,
+                     [(set VR128:$dst, (OpNode (VT VR128:$src1),
+                                               VR128:$src2, timm:$cc))]>,
+                     Sched<[sched]>, SIMD_EXC;
   let mayLoad = 1 in
-  def rm_Int : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
-                    (ins VR128:$src1, memop:$src2, u8imm:$cc), asm,
-                    [(set VR128:$dst, (OpNode (VT VR128:$src1),
-                                              (mem_frags addr:$src2), timm:$cc))]>,
-           Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
+  def rmi_Int : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
+                     (ins VR128:$src1, memop:$src2, u8imm:$cc), asm,
+                     [(set VR128:$dst, (OpNode (VT VR128:$src1),
+                                               (mem_frags addr:$src2), timm:$cc))]>,
+                     Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
 
   let isCodeGenOnly = 1 in {
     let isCommutable = 1 in
-    def rr : SIi8<0xC2, MRMSrcReg,
-                  (outs RC:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc), asm,
-                  [(set RC:$dst, (OpNode RC:$src1, RC:$src2, timm:$cc))]>,
-                  Sched<[sched]>, SIMD_EXC;
-    def rm : SIi8<0xC2, MRMSrcMem,
-                  (outs RC:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc), asm,
-                  [(set RC:$dst, (OpNode RC:$src1,
-                                         (ld_frag addr:$src2), timm:$cc))]>,
-                  Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
+    def rri : SIi8<0xC2, MRMSrcReg,
+                   (outs RC:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc), asm,
+                   [(set RC:$dst, (OpNode RC:$src1, RC:$src2, timm:$cc))]>,
+                   Sched<[sched]>, SIMD_EXC;
+    def rmi : SIi8<0xC2, MRMSrcMem,
+                   (outs RC:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc), asm,
+                   [(set RC:$dst, (OpNode RC:$src1,
+                                          (ld_frag addr:$src2), timm:$cc))]>,
+                   Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
   }
 }
 
@@ -2023,11 +2023,11 @@ let Predicates = [HasAVX] in {
 
   def : Pat<(f64 (X86cmps (loadf64 addr:$src2), FR64:$src1,
                           CommutableCMPCC:$cc)),
-            (VCMPSDrm FR64:$src1, addr:$src2, timm:$cc)>;
+            (VCMPSDrmi FR64:$src1, addr:$src2, timm:$cc)>;
 
   def : Pat<(f32 (X86cmps (loadf32 addr:$src2), FR32:$src1,
                           CommutableCMPCC:$cc)),
-            (VCMPSSrm FR32:$src1, addr:$src2, timm:$cc)>;
+            (VCMPSSrmi FR32:$src1, addr:$src2, timm:$cc)>;
 }
 
 let Predicates = [UseSSE2] in {
@@ -2037,7 +2037,7 @@ let Predicates = [UseSSE2] in {
 
   def : Pat<(f64 (X86cmps (loadf64 addr:$src2), FR64:$src1,
                           CommutableCMPCC:$cc)),
-            (CMPSDrm FR64:$src1, addr:$src2, timm:$cc)>;
+            (CMPSDrmi FR64:$src1, addr:$src2, timm:$cc)>;
 }
 
 let Predicates = [UseSSE1] in {
@@ -2047,7 +2047,7 @@ let Predicates = [UseSSE1] in {
 
   def : Pat<(f32 (X86cmps (loadf32 addr:$src2), FR32:$src1,
                           CommutableCMPCC:$cc)),
-            (CMPSSrm FR32:$src1, addr:$src2, timm:$cc)>;
+            (CMPSSrmi FR32:$src1, addr:$src2, timm:$cc)>;
 }
 
 //===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/X86/X86SchedSapphireRapids.td b/llvm/lib/Target/X86/X86SchedSapphireRapids.td
index bf9e4b7dc6d9ae..78c5994ee96470 100644
--- a/llvm/lib/Target/X86/X86SchedSapphireRapids.td
+++ b/llvm/lib/Target/X86/X86SchedSapphireRapids.td
@@ -663,8 +663,8 @@ def : InstRW<[SPRWriteResGroup12], (instregex "^ADD_F(P?)rST0$",
                                               "^SUB(R?)_FST0r$",
                                               "^VALIGN(D|Q)Z256rri((k|kz)?)$",
                                               "^VCMPP(D|H|S)Z(128|256)rri(k?)$",
-                                              "^VCMPS(D|H|S)Zrr$",
-                                              "^VCMPS(D|H|S)Zrr(b?)_Int(k?)$",
+                                              "^VCMPS(D|H|S)Zrri$",
+                                              "^VCMPS(D|H|S)Zrr(b?)i_Int(k?)$",
                                               "^VFPCLASSP(D|H|S)Z(128|256)rr(k?)$",
                                               "^VFPCLASSS(D|H|S)Zrr(k?)$",
                                               "^VPACK(S|U)S(DW|WB)Yrr$",
@@ -2739,8 +2739,8 @@ def : InstRW<[SPRWriteResGroup263, ReadAfterVecYLd], (instregex "^VCMPP(D|H|S)Z(
                                                                 "^VPCMPUDZ((256)?)rmib(k?)$",
                                                                 "^VPTEST(N?)M(B|D|Q|W)Z((256)?)rm(k?)$",
                                                                 "^VPTEST(N?)M(D|Q)Z((256)?)rmb(k?)$")>;
-def : InstRW<[SPRWriteResGroup263, ReadAfterVecLd], (instregex "^VCMPS(D|H|S)Zrm$",
-                                                               "^VCMPS(D|H|S)Zrm_Int(k?)$",
+def : InstRW<[SPRWriteResGroup263, ReadAfterVecLd], (instregex "^VCMPS(D|H|S)Zrmi$",
+                                                               "^VCMPS(D|H|S)Zrmi_Int(k?)$",
                                                                "^VFPCLASSS(D|H|S)Zrmk$")>;
 
 def SPRWriteResGroup264 : SchedWriteRes<[SPRPort00, SPRPort02_03_11]> {
diff --git a/llvm/test/CodeGen/X86/apx/kmov-domain-assignment.ll b/llvm/test/CodeGen/X86/apx/kmov-domain-assignment.ll
index b09a14cee95742..e70e5ff80d9592 100644
--- a/llvm/test/CodeGen/X86/apx/kmov-domain-assignment.ll
+++ b/llvm/test/CodeGen/X86/apx/kmov-domain-assignment.ll
@@ -21,8 +21,8 @@ define void @test_fcmp_storei1(i1 %cond, ptr %fptr, ptr %iptr, float %f1, float
   ; CHECK-NEXT: bb.1.if:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[VCMPSSZrr:%[0-9]+]]:vk1 = nofpexcept VCMPSSZrr [[COPY3]], [[COPY2]], 0, implicit $mxcsr
-  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:vk16 = COPY [[VCMPSSZrr]]
+  ; CHECK-NEXT:   [[VCMPSSZrri:%[0-9]+]]:vk1 = nofpexcept VCMPSSZrri [[COPY3]], [[COPY2]], 0, implicit $mxcsr
+  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:vk16 = COPY [[VCMPSSZrri]]
   ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:vk32 = COPY [[COPY7]]
   ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:vk8 = COPY [[COPY8]]
   ; CHECK-NEXT:   JMP_1 %bb.3
@@ -30,8 +30,8 @@ define void @test_fcmp_storei1(i1 %cond, ptr %fptr, ptr %iptr, float %f1, float
   ; CHECK-NEXT: bb.2.else:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[VCMPSSZrr1:%[0-9]+]]:vk1 = nofpexcept VCMPSSZrr [[COPY1]], [[COPY]], 0, implicit $mxcsr
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:vk16 = COPY [[VCMPSSZrr1]]
+  ; CHECK-NEXT:   [[VCMPSSZrri1:%[0-9]+]]:vk1 = nofpexcept VCMPSSZrri [[COPY1]], [[COPY]], 0, implicit $mxcsr
+  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:vk16 = COPY [[VCMPSSZrri1]]
   ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:vk32 = COPY [[COPY10]]
   ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:vk8 = COPY [[COPY11]]
   ; CHECK-NEXT: {{  $}}
diff --git a/llvm/test/CodeGen/X86/domain-reassignment.mir b/llvm/test/CodeGen/X86/domain-reassignment.mir
index 8b2fbe04d14afb..8ef57574dfc301 100644
--- a/llvm/test/CodeGen/X86/domain-reassignment.mir
+++ b/llvm/test/CodeGen/X86/domain-reassignment.mir
@@ -133,14 +133,14 @@ body:             |
   ; CHECK:   JMP_1 %bb.1
   ; CHECK: bb.1.if:
   ; CHECK:   successors: %bb.3(0x80000000)
-  ; CHECK:   [[VCMPSSZrr:%[0-9]+]]:vk1 = VCMPSSZrr [[COPY3]], [[COPY2]], 0
-  ; CHECK:   [[COPY9:%[0-9]+]]:vk32 = COPY [[VCMPSSZrr]]
+  ; CHECK:   [[VCMPSSZrri:%[0-9]+]]:vk1 = VCMPSSZrr [[COPY3]], [[COPY2]], 0
+  ; CHECK:   [[COPY9:%[0-9]+]]:vk32 = COPY [[VCMPSSZrri]]
   ; CHECK:   [[COPY10:%[0-9]+]]:vk8 = COPY [[COPY9]]
   ; CHECK:   JMP_1 %bb.3
   ; CHECK: bb.2.else:
   ; CHECK:   successors: %bb.3(0x80000000)
-  ; CHECK:   [[VCMPSSZrr1:%[0-9]+]]:vk1 = VCMPSSZrr [[COPY1]], [[COPY]], 0
-  ; CHECK:   [[COPY11:%[0-9]+]]:vk32 = COPY [[VCMPSSZrr1]]
+  ; CHECK:   [[VCMPSSZrri1:%[0-9]+]]:vk1 = VCMPSSZrri [[COPY1]], [[COPY]], 0
+  ; CHECK:   [[COPY11:%[0-9]+]]:vk32 = COPY [[VCMPSSZrri1]]
   ; CHECK:   [[COPY12:%[0-9]+]]:vk8 = COPY [[COPY11]]
   ; CHECK: bb.3.exit:
   ; CHECK:   [[PHI:%[0-9]+]]:vk8 = PHI [[COPY12]], %bb.2, [[COPY10]], %bb.1
@@ -173,7 +173,7 @@ body:             |
   bb.1.if:
     successors: %bb.3(0x80000000)
 
-    %14 = VCMPSSZrr %7, %8, 0, implicit $mxcsr
+    %14 = VCMPSSZrri %7, %8, 0, implicit $mxcsr
 
     ; check that cross domain copies are replaced with same domain copies.
 
@@ -183,7 +183,7 @@ body:             |
 
   bb.2.else:
     successors: %bb.3(0x80000000)
-    %12 = VCMPSSZrr %9, %10, 0, implicit $mxcsr
+    %12 = VCMPSSZrri %9, %10, 0, implicit $mxcsr
 
     ; check that cross domain copies are replaced with same domain copies.
 
@@ -316,10 +316,6 @@ body:             |
     %11 = VMOVAPDZrrk %2, killed %10, %1
     VMOVAPDZmr %0, 1, $noreg, 0, $noreg, killed %11
 
-    ; FIXME We can't replace TEST with KTEST due to flag differences
-    ; TEST8rr %18, %18, implicit-def $eflags
-    ; JCC_1 %bb.1, 4, implicit $eflags
-    ; JMP_1 %bb.2
 
   bb.1:
 
@@ -383,31 +379,34 @@ constants:
 body:             |
   ; CHECK-LABEL: name: test_16bitops
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   liveins: $rdi, $zmm0, $zmm1, $zmm2, $zmm3
-  ; CHECK:   [[COPY:%[0-9]+]]:gr64 = COPY $rdi
-  ; CHECK:   [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
-  ; CHECK:   [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
-  ; CHECK:   [[COPY3:%[0-9]+]]:vr512 = COPY $zmm2
-  ; CHECK:   [[COPY4:%[0-9]+]]:vr512 = COPY $zmm3
-  ; CHECK:   [[VCMPPSZrri:%[0-9]+]]:vk16 = VCMPPSZrri [[COPY3]], [[COPY4]], 0
-  ; CHECK:   [[COPY5:%[0-9]+]]:vk32 = COPY [[VCMPPSZrri]]
-  ; CHECK:   [[COPY6:%[0-9]+]]:vk16 = COPY [[COPY5]]
-  ; CHECK:   [[KSHIFTRWri:%[0-9]+]]:vk16 = KSHIFTRWri [[COPY6]], 2
-  ; CHECK:   [[KSHIFTLWri:%[0-9]+]]:vk16 = KSHIFTLWri [[KSHIFTRWri]], 1
-  ; CHECK:   [[KNOTWrr:%[0-9]+]]:vk16 = KNOTWrr [[KSHIFTLWri]]
-  ; CHECK:   [[KORWrr:%[0-9]+]]:vk16 = KORWrr [[KNOTWrr]], [[KSHIFTRWri]]
-  ; CHECK:   [[KANDWrr:%[0-9]+]]:vk16 = KANDWrr [[KORWrr]], [[KSHIFTLWri]]
-  ; CHECK:   [[KXORWrr:%[0-9]+]]:vk16 = KXORWrr [[KANDWrr]], [[KSHIFTRWri]]
-  ; CHECK:   [[DEF:%[0-9]+]]:vk32 = IMPLICIT_DEF
-  ; CHECK:   [[COPY7:%[0-9]+]]:vk32 = COPY [[KXORWrr]]
-  ; CHECK:   [[COPY8:%[0-9]+]]:vk16wm = COPY [[COPY7]]
-  ; CHECK:   [[VMOVAPSZrrk:%[0-9]+]]:vr512 = VMOVAPSZrrk [[COPY2]], killed [[COPY8]], [[COPY1]]
-  ; CHECK:   VMOVAPSZmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVAPSZrrk]]
-  ; CHECK: bb.1:
-  ; CHECK:   successors: %bb.2(0x80000000)
-  ; CHECK: bb.2:
-  ; CHECK:   RET 0
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT:   liveins: $rdi, $zmm0, $zmm1, $zmm2, $zmm3
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr512 = COPY $zmm2
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr512 = COPY $zmm3
+  ; CHECK-NEXT:   [[VCMPPSZrri:%[0-9]+]]:vk16 = VCMPPSZrri [[COPY3]], [[COPY4]], 0, implicit $mxcsr
+  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:vk32 = COPY [[VCMPPSZrri]]
+  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:vk16 = COPY [[COPY5]]
+  ; CHECK-NEXT:   [[KSHIFTRWri:%[0-9]+]]:vk16 = KSHIFTRWri [[COPY6]], 2
+  ; CHECK-NEXT:   [[KSHIFTLWri:%[0-9]+]]:vk16 = KSHIFTLWri [[KSHIFTRWri]], 1
+  ; CHECK-NEXT:   [[KNOTWrr:%[0-9]+]]:vk16 = KNOTWrr [[KSHIFTLWri]]
+  ; CHECK-NEXT:   [[KORWrr:%[0-9]+]]:vk16 = KORWrr [[KNOTWrr]], [[KSHIFTRWri]]
+  ; CHECK-NEXT:   [[KANDWrr:%[0-9]+]]:vk16 = KANDWrr [[KORWrr]], [[KSHIFTLWri]]
+  ; CHECK-NEXT:   [[KXORWrr:%[0-9]+]]:vk16 = KXORWrr [[KANDWrr]], [[KSHIFTRWri]]
+  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:vk32 = IMPLICIT_DEF
+  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:vk32 = COPY [[KXORWrr]]
+  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:vk16wm = COPY [[COPY7]]
+  ; CHECK-NEXT:   [[VMOVAPSZrrk:%[0-9]+]]:vr512 = VMOVAPSZrrk [[COPY2]], killed [[COPY8]], [[COPY1]]
+  ; CHECK-NEXT:   VMOVAPSZmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVAPSZrrk]]
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   RET 0
   bb.0:
     liveins: $rdi, $zmm0, $zmm1, $zmm2, $zmm3
 
@@ -434,10 +433,6 @@ body:             |
     %11 = VMOVAPSZrrk %2, killed %10, %1
     VMOVAPSZmr %0, 1, $noreg, 0, $noreg, killed %11
 
-    ; FIXME We can't replace TEST with KTEST due to flag differences
-    ; FIXME TEST16rr %17, %17, implicit-def $eflags
-    ; FIXME JCC_1 %bb.1, 4, implicit $eflags
-    ; FIXME JMP_1 %bb.2
 
   bb.1:
 
@@ -495,27 +490,30 @@ constants:
 body:             |
   ; CHECK-LABEL: name: test_32bitops
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   liveins: $rdi, $zmm0, $zmm1
-  ; CHECK:   [[COPY:%[0-9]+]]:gr64 = COPY $rdi
-  ; CHECK:   [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
-  ; CHECK:   [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
-  ; CHECK:   [[KMOVDkm:%[0-9]+]]:vk32 = KMOVDkm [[COPY]], 1, $noreg, 0, $noreg
-  ; CHECK:   [[KSHIFTRDri:%[0-9]+]]:vk32 = KSHIFTRDri [[KMOVDkm]], 2
-  ; CHECK:   [[KSHIFTLDri:%[0-9]+]]:vk32 = KSHIFTLDri [[KSHIFTRDri]], 1
-  ; CHECK:   [[KNOTDrr:%[0-9]+]]:vk32 = KNOTDrr [[KSHIFTLDri]]
-  ; CHECK:   [[KORDrr:%[0-9]+]]:vk32 = KORDrr [[KNOTDrr]], [[KSHIFTRDri]]
-  ; CHECK:   [[KANDDrr:%[0-9]+]]:vk32 = KANDDrr [[KORDrr]], [[KSHIFTLDri]]
-  ; CHECK:   [[KXORDrr:%[0-9]+]]:vk32 = KXORDrr [[KANDDrr]], [[KSHIFTRDri]]
-  ; CHECK:   [[KANDNDrr:%[0-9]+]]:vk32 = KANDNDrr [[KXORDrr]], [[KORDrr]]
-  ; CHECK:   [[KADDDrr:%[0-9]+]]:vk32 = KADDDrr [[KANDNDrr]], [[KXORDrr]]
-  ; CHECK:   [[COPY3:%[0-9]+]]:vk32wm = COPY [[KADDDrr]]
-  ; CHECK:   [[VMOVDQU16Zrrk:%[0-9]+]]:vr512 = VMOVDQU16Zrrk [[COPY2]], killed [[COPY3]], [[COPY1]]
-  ; CHECK:   VMOVDQA32Zmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVDQU16Zrrk]]
-  ; CHECK: bb.1:
-  ; CHECK:   successors: %bb.2(0x80000000)
-  ; CHECK: bb.2:
-  ; CHECK:   RET 0
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT:   liveins: $rdi, $zmm0, $zmm1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+  ; CHECK-NEXT:   [[KMOVDkm:%[0-9]+]]:vk32 = KMOVDkm [[COPY]], 1, $noreg, 0, $noreg
+  ; CHECK-NEXT:   [[KSHIFTRDri:%[0-9]+]]:vk32 = KSHIFTRDri [[KMOVDkm]], 2
+  ; CHECK-NEXT:   [[KSHIFTLDri:%[0-9]+]]:vk32 = KSHIFTLDri [[KSHIFTRDri]], 1
+  ; CHECK-NEXT:   [[KNOTDrr:%[0-9]+]]:vk32 = KNOTDrr [[KSHIFTLDri]]
+  ; CHECK-NEXT:   [[KORDrr:%[0-9]+]]:vk32 = KORDrr [[KNOTDrr]], [[KSHIFTRDri]]
+  ; CHECK-NEXT:   [[KANDDrr:%[0-9]+]]:vk32 = KANDDrr [[KORDrr]], [[KSHIFTLDri]]
+  ; CHECK-NEXT:   [[KXORDrr:%[0-9]+]]:vk32 = KXORDrr [[KANDDrr]], [[KSHIFTRDri]]
+  ; CHECK-NEXT:   [[KANDNDrr:%[0-9]+]]:vk32 = KANDNDrr [[KXORDrr]], [[KORDrr]]
+  ; CHECK-NEXT:   [[KADDDrr:%[0-9]+]]:vk32 = KADDDrr [[KANDNDrr]], [[KXORDrr]]
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vk32wm = COPY [[KADDDrr]]
+  ; CHECK-NEXT:   [[VMOVDQU16Zrrk:%[0-9]+]]:vr512 = VMOVDQU16Zrrk [[COPY2]], killed [[COPY3]], [[COPY1]]
+  ; CHECK-NEXT:   VMOVDQA32Zmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVDQU16Zrrk]]
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   RET 0
   bb.0:
     liveins: $rdi, $zmm0, $zmm1
 
@@ -537,10 +535,6 @@ body:             |
     %4 = VMOVDQU16Zrrk %2, killed %3, %1
     VMOVDQA32Zmr %0, 1, $noreg, 0, $noreg, killed %4
 
-    ; FIXME We can't replace TEST with KTEST due to flag differences
-    ; FIXME TEST32rr %13, %13, implicit-def $eflags
-    ; FIXME JCC_1 %bb.1, 4, implicit $eflags
-    ; FIXME JMP_1 %bb.2
 
   bb.1:
 
@@ -598,27 +592,30 @@ constants:
 body:             |
   ; CHECK-LABEL: name: test_64bitops
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   liveins: $rdi, $zmm0, $zmm1
-  ; CHECK:   [[COPY:%[0-9]+]]:gr64 = COPY $rdi
-  ; CHECK:   [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
-  ; CHECK:   [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
-  ; CHECK:   [[KMOVQkm:%[0-9]+]]:vk64 = KMOVQkm [[COPY]], 1, $noreg, 0, $noreg
-  ; CHECK:   [[KSHIFTRQri:%[0-9]+]]:vk64 = KSHIFTRQri [[KMOVQkm]], 2
-  ; CHECK:   [[KSHIFTLQri:%[0-9]+]]:vk64 = KSHIFTLQri [[KSHIFTRQri]], 1
-  ; CHECK:   [[KNOTQrr:%[0-9]+]]:vk64 = KNOTQrr [[KSHIFTLQri]]
-  ; CHECK:   [[KORQrr:%[0-9]+]]:vk64 = KORQrr [[KNOTQrr]], [[KSHIFTRQri]]
-  ; CHECK:   [[KANDQrr:%[0-9]+]]:vk64 = KANDQrr [[KORQrr]], [[KSHIFTLQri]]
-  ; CHECK:   [[KXORQrr:%[0-9]+]]:vk64 = KXORQrr [[KANDQrr]], [[KSHIFTRQri]]
-  ; CHECK:   [[KANDNQrr:%[0-9]+]]:vk64 = KANDNQrr [[KXORQrr]], [[KORQrr]]
-  ; CHECK:   [[KADDQrr:%[0-9]+]]:vk64 = KADDQrr [[KANDNQrr]], [[KXORQrr]]
-  ; CHECK:   [[COPY3:%[0-9]+]]:vk64wm = COPY [[KADDQrr]]
-  ; CHECK:   [[VMOVDQU8Zrrk:%[0-9]+]]:vr512 = VMOVDQU8Zrrk [[COPY2]], killed [[COPY3]], [[COPY1]]
-  ; CHECK:   VMOVDQA32Zmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVDQU8Zrrk]]
-  ; CHECK: bb.1:
-  ; CHECK:   successors: %bb.2(0x80000000)
-  ; CHECK: bb.2:
-  ; CHECK:   RET 0
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT:   liveins: $rdi, $zmm0, $zmm1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+  ; CHECK-NEXT:   [[KMOVQkm:%[0-9]+]]:vk64 = KMOVQkm [[COPY]], 1, $noreg, 0, $noreg
+  ; CHECK-NEXT:   [[KSHIFTRQri:%[0-9]+]]:vk64 = KSHIFTRQri [[KMOVQkm]], 2
+  ; CHECK-NEXT:   [[KSHIFTLQri:%[0-9]+]]:vk64 = KSHIFTLQri [[KSHIFTRQri]], 1
+  ; CHECK-NEXT:   [[KNOTQrr:%[0-9]+]]:vk64 = KNOTQrr [[KSHIFTLQri]]
+  ; CHECK-NEXT:   [[KORQrr:%[0-9]+]]:vk64 = KORQrr [[KNOTQrr]], [[KSHIFTRQri]]
+  ; CHECK-NEXT:   [[KANDQrr:%[0-9]+]]:vk64 = KANDQrr [[KORQrr]], [[KSHIFTLQri]]
+  ; CHECK-NEXT:   [[KXORQrr:%[0-9]+]]:vk64 = KXORQrr [[KANDQrr]], [[KSHIFTRQri]]
+  ; CHECK-NEXT:   [[KANDNQrr:%[0-9]+]]:vk64 = KANDNQrr [[KXORQrr]], [[KORQrr]]
+  ; CHECK-NEXT:   [[KADDQrr:%[0-9]+]]:vk64 = KADDQrr [[KANDNQrr]], [[KXORQrr]]
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vk64wm = COPY [[KADDQrr]]
+  ; CHECK-NEXT:   [[VMOVDQU8Zrrk:%[0-9]+]]:vr512 = VMOVDQU8Zrrk [[COPY2]], killed [[COPY3]], [[COPY1]]
+  ; CHECK-NEXT:   VMOVDQA32Zmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVDQU8Zrrk]]
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   RET 0
   bb.0:
     liveins: $rdi, $zmm0, $zmm1
 
@@ -640,10 +637,6 @@ body:             |
     %4 = VMOVDQU8Zrrk %2, killed %3, %1
     VMOVDQA32Zmr %0, 1, $noreg, 0, $noreg, killed %4
 
-    ; FIXME We can't replace TEST with KTEST due to flag differences
-    ; FIXME TEST64rr %13, %13, implicit-def $eflags
-    ; FIXME JCC_1 %bb.1, 4, implicit $eflags
-    ; FIXME JMP_1 %bb.2
 
   bb.1:
 
@@ -697,16 +690,17 @@ body:             |
 
     ; CHECK-LABEL: name: test_16bitext
     ; CHECK: liveins: $rdi, $zmm0, $zmm1
-    ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
-    ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
-    ; CHECK: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
-    ; CHECK: [[KMOVBkm:%[0-9]+]]:vk8 = KMOVBkm [[COPY]], 1, $noreg, 0, $noreg
-    ; CHECK: [[COPY3:%[0-9]+]]:vk16 = COPY [[KMOVBkm]]
-    ; CHECK: [[KNOTWrr:%[0-9]+]]:vk16 = KNOTWrr [[COPY3]]
-    ; CHECK: [[COPY4:%[0-9]+]]:vk16wm = COPY [[KNOTWrr]]
-    ; CHECK: [[VMOVAPSZrrk:%[0-9]+]]:vr512 = VMOVAPSZrrk [[COPY2]], killed [[COPY4]], [[COPY1]]
-    ; CHECK: VMOVAPSZmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVAPSZrrk]]
-    ; CHECK: RET 0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+    ; CHECK-NEXT: [[KMOVBkm:%[0-9]+]]:vk8 = KMOVBkm [[COPY]], 1, $noreg, 0, $noreg
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vk16 = COPY [[KMOVBkm]]
+    ; CHECK-NEXT: [[KNOTWrr:%[0-9]+]]:vk16 = KNOTWrr [[COPY3]]
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vk16wm = COPY [[KNOTWrr]]
+    ; CHECK-NEXT: [[VMOVAPSZrrk:%[0-9]+]]:vr512 = VMOVAPSZrrk [[COPY2]], killed [[COPY4]], [[COPY1]]
+    ; CHECK-NEXT: VMOVAPSZmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVAPSZrrk]]
+    ; CHECK-NEXT: RET 0
     %0 = COPY $rdi
     %1 = COPY $zmm0
     %2 = COPY $zmm1
@@ -767,18 +761,19 @@ body:             |
 
     ; CHECK-LABEL: name: test_32bitext
     ; CHECK: liveins: $rdi, $zmm0, $zmm1
-    ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
-    ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
-    ; CHECK: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
-    ; CHECK: [[KMOVBkm:%[0-9]+]]:vk8 = KMOVBkm [[COPY]], 1, $noreg, 0, $noreg
-    ; CHECK: [[COPY3:%[0-9]+]]:vk32 = COPY [[KMOVBkm]]
-    ; CHECK: [[KMOVWkm:%[0-9]+]]:vk16 = KMOVWkm [[COPY]], 1, $noreg, 0, $noreg
-    ; CHECK: [[COPY4:%[0-9]+]]:vk32 = COPY [[KMOVWkm]]
-    ; CHECK: [[KADDDrr:%[0-9]+]]:vk32 = KADDDrr [[COPY3]], [[COPY4]]
-    ; CHECK: [[COPY5:%[0-9]+]]:vk64wm = COPY [[KADDDrr]]
-    ; CHECK: [[VMOVDQU16Zrrk:%[0-9]+]]:vr512 = VMOVDQU16Zrrk [[COPY2]], killed [[COPY5]], [[COPY1]]
-    ; CHECK: VMOVDQA32Zmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVDQU16Zrrk]]
-    ; CHECK: RET 0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+    ; CHECK-NEXT: [[KMOVBkm:%[0-9]+]]:vk8 = KMOVBkm [[COPY]], 1, $noreg, 0, $noreg
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vk32 = COPY [[KMOVBkm]]
+    ; CHECK-NEXT: [[KMOVWkm:%[0-9]+]]:vk16 = KMOVWkm [[COPY]], 1, $noreg, 0, $noreg
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vk32 = COPY [[KMOVWkm]]
+    ; CHECK-NEXT: [[KADDDrr:%[0-9]+]]:vk32 = KADDDrr [[COPY3]], [[COPY4]]
+    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vk64wm = COPY [[KADDDrr]]
+    ; CHECK-NEXT: [[VMOVDQU16Zrrk:%[0-9]+]]:vr512 = VMOVDQU16Zrrk [[COPY2]], killed [[COPY5]], [[COPY1]]
+    ; CHECK-NEXT: VMOVDQA32Zmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVDQU16Zrrk]]
+    ; CHECK-NEXT: RET 0
     %0 = COPY $rdi
     %1 = COPY $zmm0
     %2 = COPY $zmm1
@@ -840,18 +835,19 @@ body:             |
 
     ; CHECK-LABEL: name: test_64bitext
     ; CHECK: liveins: $rdi, $zmm0, $zmm1
-    ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
-    ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
-    ; CHECK: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
-    ; CHECK: [[KMOVBkm:%[0-9]+]]:vk8 = KMOVBkm [[COPY]], 1, $noreg, 0, $noreg
-    ; CHECK: [[COPY3:%[0-9]+]]:vk64 = COPY [[KMOVBkm]]
-    ; CHECK: [[KMOVWkm:%[0-9]+]]:vk16 = KMOVWkm [[COPY]], 1, $noreg, 0, $noreg
-    ; CHECK: [[COPY4:%[0-9]+]]:vk64 = COPY [[KMOVWkm]]
-    ; CHECK: [[KADDQrr:%[0-9]+]]:vk64 = KADDQrr [[COPY3]], [[COPY4]]
-    ; CHECK: [[COPY5:%[0-9]+]]:vk64wm = COPY [[KADDQrr]]
-    ; CHECK: [[VMOVDQU8Zrrk:%[0-9]+]]:vr512 = VMOVDQU8Zrrk [[COPY2]], killed [[COPY5]], [[COPY1]]
-    ; CHECK: VMOVDQA32Zmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVDQU8Zrrk]]
-    ; CHECK: RET 0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm0
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr512 = COPY $zmm1
+    ; CHECK-NEXT: [[KMOVBkm:%[0-9]+]]:vk8 = KMOVBkm [[COPY]], 1, $noreg, 0, $noreg
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vk64 = COPY [[KMOVBkm]]
+    ; CHECK-NEXT: [[KMOVWkm:%[0-9]+]]:vk16 = KMOVWkm [[COPY]], 1, $noreg, 0, $noreg
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vk64 = COPY [[KMOVWkm]]
+    ; CHECK-NEXT: [[KADDQrr:%[0-9]+]]:vk64 = KADDQrr [[COPY3]], [[COPY4]]
+    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vk64wm = COPY [[KADDQrr]]
+    ; CHECK-NEXT: [[VMOVDQU8Zrrk:%[0-9]+]]:vr512 = VMOVDQU8Zrrk [[COPY2]], killed [[COPY5]], [[COPY1]]
+    ; CHECK-NEXT: VMOVDQA32Zmr [[COPY]], 1, $noreg, 0, $noreg, killed [[VMOVDQU8Zrrk]]
+    ; CHECK-NEXT: RET 0
     %0 = COPY $rdi
     %1 = COPY $zmm0
     %2 = COPY $zmm1
@@ -924,6 +920,10 @@ body:             |
   bb.1 (%ir-block.1):
     liveins: $rdi
 
+    ; CHECK-LABEL: name: test_unused
+    ; CHECK: liveins: $rdi
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: RET 0
     RET 0
 
 ...
diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll b/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
index 8a7fea78702d89..2c7da100344b76 100644
--- a/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
+++ b/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
@@ -40,8 +40,8 @@ define float @sqrt_ieee_ninf(float %f) #0 {
   ; CHECK-NEXT:   [[VPBROADCASTDrm:%[0-9]+]]:vr128 = VPBROADCASTDrm $rip, 1, $noreg, %const.2, $noreg :: (load (s32) from constant-pool)
   ; CHECK-NEXT:   [[VPANDrr:%[0-9]+]]:vr128 = VPANDrr killed [[COPY2]], killed [[VPBROADCASTDrm]]
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:fr32 = COPY [[VPANDrr]]
-  ; CHECK-NEXT:   [[VCMPSSrm:%[0-9]+]]:fr32 = nofpexcept VCMPSSrm killed [[COPY3]], $rip, 1, $noreg, %const.3, $noreg, 1, implicit $mxcsr :: (load (s32) from constant-pool)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr128 = COPY [[VCMPSSrm]]
+  ; CHECK-NEXT:   [[VCMPSSrmi:%[0-9]+]]:fr32 = nofpexcept VCMPSSrmi killed [[COPY3]], $rip, 1, $noreg, %const.3, $noreg, 1, implicit $mxcsr :: (load (s32) from constant-pool)
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr128 = COPY [[VCMPSSrmi]]
   ; CHECK-NEXT:   [[VPANDNrr:%[0-9]+]]:vr128 = VPANDNrr killed [[COPY4]], killed [[COPY1]]
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:fr32 = COPY [[VPANDNrr]]
   ; CHECK-NEXT:   $xmm0 = COPY [[COPY5]]
@@ -84,8 +84,8 @@ define float @sqrt_daz_ninf(float %f) #1 {
   ; CHECK-NEXT:   [[VMULSSrr5:%[0-9]+]]:fr32 = ninf afn nofpexcept VMULSSrr killed [[VMULSSrr4]], killed [[VFMADD213SSr1]], implicit $mxcsr
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr128 = COPY [[VMULSSrr5]]
   ; CHECK-NEXT:   [[FsFLD0SS:%[0-9]+]]:fr32 = FsFLD0SS
-  ; CHECK-NEXT:   [[VCMPSSrr:%[0-9]+]]:fr32 = nofpexcept VCMPSSrr [[COPY]], killed [[FsFLD0SS]], 0, implicit $mxcsr
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr128 = COPY [[VCMPSSrr]]
+  ; CHECK-NEXT:   [[VCMPSSrri:%[0-9]+]]:fr32 = nofpexcept VCMPSSrri [[COPY]], killed [[FsFLD0SS]], 0, implicit $mxcsr
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr128 = COPY [[VCMPSSrri]]
   ; CHECK-NEXT:   [[VPANDNrr:%[0-9]+]]:vr128 = VPANDNrr killed [[COPY2]], killed [[COPY1]]
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:fr32 = COPY [[VPANDNrr]]
   ; CHECK-NEXT:   $xmm0 = COPY [[COPY3]]



More information about the llvm-commits mailing list