[llvm] 92d7aca - [X86] Add missing immediate qualifier to the (V)CMPSS/D instructions (#84496)

via llvm-commits llvm-commits at lists.llvm.org
Sat Mar 9 08:21:29 PST 2024


Author: Simon Pilgrim
Date: 2024-03-09T16:21:25Z
New Revision: 92d7aca441e09c85ab9355f99f93f3dbc35924a0

URL: https://github.com/llvm/llvm-project/commit/92d7aca441e09c85ab9355f99f93f3dbc35924a0
DIFF: https://github.com/llvm/llvm-project/commit/92d7aca441e09c85ab9355f99f93f3dbc35924a0.diff

LOG: [X86] Add missing immediate qualifier to the (V)CMPSS/D instructions (#84496)

Matches (V)CMPPS/D and makes it easier to algorithmically recreate the instruction name in various analysis scripts I'm working on

Added: 
    

Modified: 
    llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp
    llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp
    llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp
    llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp
    llvm/lib/Target/X86/X86FastISel.cpp
    llvm/lib/Target/X86/X86InstrAVX512.td
    llvm/lib/Target/X86/X86InstrInfo.cpp
    llvm/lib/Target/X86/X86InstrSSE.td
    llvm/lib/Target/X86/X86SchedSapphireRapids.td
    llvm/test/CodeGen/X86/apx/kmov-domain-assignment.ll
    llvm/test/CodeGen/X86/domain-reassignment.mir
    llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
    llvm/test/TableGen/x86-fold-tables.inc

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp
index e96f9279826beb..33104524c5a890 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp
@@ -89,12 +89,12 @@ bool X86ATTInstPrinter::printVecCompareInstr(const MCInst *MI,
   // Custom print the vector compare instructions to get the immediate
   // translated into the mnemonic.
   switch (MI->getOpcode()) {
-  case X86::CMPPDrmi:    case X86::CMPPDrri:
-  case X86::CMPPSrmi:    case X86::CMPPSrri:
-  case X86::CMPSDrm:     case X86::CMPSDrr:
-  case X86::CMPSDrm_Int: case X86::CMPSDrr_Int:
-  case X86::CMPSSrm:     case X86::CMPSSrr:
-  case X86::CMPSSrm_Int: case X86::CMPSSrr_Int:
+  case X86::CMPPDrmi:     case X86::CMPPDrri:
+  case X86::CMPPSrmi:     case X86::CMPPSrri:
+  case X86::CMPSDrmi:     case X86::CMPSDrri:
+  case X86::CMPSDrmi_Int: case X86::CMPSDrri_Int:
+  case X86::CMPSSrmi:     case X86::CMPSSrri:
+  case X86::CMPSSrmi_Int: case X86::CMPSSrri_Int:
     if (Imm >= 0 && Imm <= 7) {
       OS << '\t';
       printCMPMnemonic(MI, /*IsVCMP*/false, OS);
@@ -117,56 +117,56 @@ bool X86ATTInstPrinter::printVecCompareInstr(const MCInst *MI,
     }
     break;
 
-  case X86::VCMPPDrmi:      case X86::VCMPPDrri:
-  case X86::VCMPPDYrmi:     case X86::VCMPPDYrri:
-  case X86::VCMPPDZ128rmi:  case X86::VCMPPDZ128rri:
-  case X86::VCMPPDZ256rmi:  case X86::VCMPPDZ256rri:
-  case X86::VCMPPDZrmi:     case X86::VCMPPDZrri:
-  case X86::VCMPPSrmi:      case X86::VCMPPSrri:
-  case X86::VCMPPSYrmi:     case X86::VCMPPSYrri:
-  case X86::VCMPPSZ128rmi:  case X86::VCMPPSZ128rri:
-  case X86::VCMPPSZ256rmi:  case X86::VCMPPSZ256rri:
-  case X86::VCMPPSZrmi:     case X86::VCMPPSZrri:
-  case X86::VCMPSDrm:       case X86::VCMPSDrr:
-  case X86::VCMPSDZrm:      case X86::VCMPSDZrr:
-  case X86::VCMPSDrm_Int:   case X86::VCMPSDrr_Int:
-  case X86::VCMPSDZrm_Int:  case X86::VCMPSDZrr_Int:
-  case X86::VCMPSSrm:       case X86::VCMPSSrr:
-  case X86::VCMPSSZrm:      case X86::VCMPSSZrr:
-  case X86::VCMPSSrm_Int:   case X86::VCMPSSrr_Int:
-  case X86::VCMPSSZrm_Int:  case X86::VCMPSSZrr_Int:
-  case X86::VCMPPDZ128rmik: case X86::VCMPPDZ128rrik:
-  case X86::VCMPPDZ256rmik: case X86::VCMPPDZ256rrik:
-  case X86::VCMPPDZrmik:    case X86::VCMPPDZrrik:
-  case X86::VCMPPSZ128rmik: case X86::VCMPPSZ128rrik:
-  case X86::VCMPPSZ256rmik: case X86::VCMPPSZ256rrik:
-  case X86::VCMPPSZrmik:    case X86::VCMPPSZrrik:
-  case X86::VCMPSDZrm_Intk: case X86::VCMPSDZrr_Intk:
-  case X86::VCMPSSZrm_Intk: case X86::VCMPSSZrr_Intk:
-  case X86::VCMPPDZ128rmbi: case X86::VCMPPDZ128rmbik:
-  case X86::VCMPPDZ256rmbi: case X86::VCMPPDZ256rmbik:
-  case X86::VCMPPDZrmbi:    case X86::VCMPPDZrmbik:
-  case X86::VCMPPSZ128rmbi: case X86::VCMPPSZ128rmbik:
-  case X86::VCMPPSZ256rmbi: case X86::VCMPPSZ256rmbik:
-  case X86::VCMPPSZrmbi:    case X86::VCMPPSZrmbik:
-  case X86::VCMPPDZrrib:    case X86::VCMPPDZrribk:
-  case X86::VCMPPSZrrib:    case X86::VCMPPSZrribk:
-  case X86::VCMPSDZrrb_Int: case X86::VCMPSDZrrb_Intk:
-  case X86::VCMPSSZrrb_Int: case X86::VCMPSSZrrb_Intk:
-  case X86::VCMPPHZ128rmi:  case X86::VCMPPHZ128rri:
-  case X86::VCMPPHZ256rmi:  case X86::VCMPPHZ256rri:
-  case X86::VCMPPHZrmi:     case X86::VCMPPHZrri:
-  case X86::VCMPSHZrm:      case X86::VCMPSHZrr:
-  case X86::VCMPSHZrm_Int:  case X86::VCMPSHZrr_Int:
-  case X86::VCMPPHZ128rmik: case X86::VCMPPHZ128rrik:
-  case X86::VCMPPHZ256rmik: case X86::VCMPPHZ256rrik:
-  case X86::VCMPPHZrmik:    case X86::VCMPPHZrrik:
-  case X86::VCMPSHZrm_Intk: case X86::VCMPSHZrr_Intk:
-  case X86::VCMPPHZ128rmbi: case X86::VCMPPHZ128rmbik:
-  case X86::VCMPPHZ256rmbi: case X86::VCMPPHZ256rmbik:
-  case X86::VCMPPHZrmbi:    case X86::VCMPPHZrmbik:
-  case X86::VCMPPHZrrib:    case X86::VCMPPHZrribk:
-  case X86::VCMPSHZrrb_Int: case X86::VCMPSHZrrb_Intk:
+  case X86::VCMPPDrmi:       case X86::VCMPPDrri:
+  case X86::VCMPPDYrmi:      case X86::VCMPPDYrri:
+  case X86::VCMPPDZ128rmi:   case X86::VCMPPDZ128rri:
+  case X86::VCMPPDZ256rmi:   case X86::VCMPPDZ256rri:
+  case X86::VCMPPDZrmi:      case X86::VCMPPDZrri:
+  case X86::VCMPPSrmi:       case X86::VCMPPSrri:
+  case X86::VCMPPSYrmi:      case X86::VCMPPSYrri:
+  case X86::VCMPPSZ128rmi:   case X86::VCMPPSZ128rri:
+  case X86::VCMPPSZ256rmi:   case X86::VCMPPSZ256rri:
+  case X86::VCMPPSZrmi:      case X86::VCMPPSZrri:
+  case X86::VCMPSDrmi:       case X86::VCMPSDrri:
+  case X86::VCMPSDZrmi:      case X86::VCMPSDZrri:
+  case X86::VCMPSDrmi_Int:   case X86::VCMPSDrri_Int:
+  case X86::VCMPSDZrmi_Int:  case X86::VCMPSDZrri_Int:
+  case X86::VCMPSSrmi:       case X86::VCMPSSrri:
+  case X86::VCMPSSZrmi:      case X86::VCMPSSZrri:
+  case X86::VCMPSSrmi_Int:   case X86::VCMPSSrri_Int:
+  case X86::VCMPSSZrmi_Int:  case X86::VCMPSSZrri_Int:
+  case X86::VCMPPDZ128rmik:  case X86::VCMPPDZ128rrik:
+  case X86::VCMPPDZ256rmik:  case X86::VCMPPDZ256rrik:
+  case X86::VCMPPDZrmik:     case X86::VCMPPDZrrik:
+  case X86::VCMPPSZ128rmik:  case X86::VCMPPSZ128rrik:
+  case X86::VCMPPSZ256rmik:  case X86::VCMPPSZ256rrik:
+  case X86::VCMPPSZrmik:     case X86::VCMPPSZrrik:
+  case X86::VCMPSDZrmi_Intk: case X86::VCMPSDZrri_Intk:
+  case X86::VCMPSSZrmi_Intk: case X86::VCMPSSZrri_Intk:
+  case X86::VCMPPDZ128rmbi:  case X86::VCMPPDZ128rmbik:
+  case X86::VCMPPDZ256rmbi:  case X86::VCMPPDZ256rmbik:
+  case X86::VCMPPDZrmbi:     case X86::VCMPPDZrmbik:
+  case X86::VCMPPSZ128rmbi:  case X86::VCMPPSZ128rmbik:
+  case X86::VCMPPSZ256rmbi:  case X86::VCMPPSZ256rmbik:
+  case X86::VCMPPSZrmbi:     case X86::VCMPPSZrmbik:
+  case X86::VCMPPDZrrib:     case X86::VCMPPDZrribk:
+  case X86::VCMPPSZrrib:     case X86::VCMPPSZrribk:
+  case X86::VCMPSDZrrib_Int: case X86::VCMPSDZrrib_Intk:
+  case X86::VCMPSSZrrib_Int: case X86::VCMPSSZrrib_Intk:
+  case X86::VCMPPHZ128rmi:   case X86::VCMPPHZ128rri:
+  case X86::VCMPPHZ256rmi:   case X86::VCMPPHZ256rri:
+  case X86::VCMPPHZrmi:      case X86::VCMPPHZrri:
+  case X86::VCMPSHZrmi:      case X86::VCMPSHZrri:
+  case X86::VCMPSHZrmi_Int:  case X86::VCMPSHZrri_Int:
+  case X86::VCMPPHZ128rmik:  case X86::VCMPPHZ128rrik:
+  case X86::VCMPPHZ256rmik:  case X86::VCMPPHZ256rrik:
+  case X86::VCMPPHZrmik:     case X86::VCMPPHZrrik:
+  case X86::VCMPSHZrmi_Intk: case X86::VCMPSHZrri_Intk:
+  case X86::VCMPPHZ128rmbi:  case X86::VCMPPHZ128rmbik:
+  case X86::VCMPPHZ256rmbi:  case X86::VCMPPHZ256rmbik:
+  case X86::VCMPPHZrmbi:     case X86::VCMPPHZrmbik:
+  case X86::VCMPPHZrrib:     case X86::VCMPPHZrribk:
+  case X86::VCMPSHZrrib_Int: case X86::VCMPSHZrrib_Intk:
     if (Imm >= 0 && Imm <= 31) {
       OS << '\t';
       printCMPMnemonic(MI, /*IsVCMP*/true, OS);

diff  --git a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp
index 134206466c542f..001a9d4d4d3c1e 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp
@@ -52,8 +52,8 @@ bool X86::optimizeInstFromVEX3ToVEX2(MCInst &MI, const MCInstrDesc &Desc) {
   case X86::VCMPPDYrri:
   case X86::VCMPPSrri:
   case X86::VCMPPSYrri:
-  case X86::VCMPSDrr:
-  case X86::VCMPSSrr: {
+  case X86::VCMPSDrri:
+  case X86::VCMPSSrri: {
     switch (MI.getOperand(3).getImm() & 0x7) {
     default:
       return false;

diff  --git a/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp
index fd46e4e1df821a..29a1866bf01ab0 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp
@@ -272,24 +272,24 @@ void X86InstPrinterCommon::printCMPMnemonic(const MCInst *MI, bool IsVCmp,
   case X86::VCMPPSZrrib:    case X86::VCMPPSZrribk:
     OS << "ps\t";
     break;
-  case X86::CMPSDrm:        case X86::CMPSDrr:
-  case X86::CMPSDrm_Int:    case X86::CMPSDrr_Int:
-  case X86::VCMPSDrm:       case X86::VCMPSDrr:
-  case X86::VCMPSDrm_Int:   case X86::VCMPSDrr_Int:
-  case X86::VCMPSDZrm:      case X86::VCMPSDZrr:
-  case X86::VCMPSDZrm_Int:  case X86::VCMPSDZrr_Int:
-  case X86::VCMPSDZrm_Intk: case X86::VCMPSDZrr_Intk:
-  case X86::VCMPSDZrrb_Int: case X86::VCMPSDZrrb_Intk:
+  case X86::CMPSDrmi:        case X86::CMPSDrri:
+  case X86::CMPSDrmi_Int:    case X86::CMPSDrri_Int:
+  case X86::VCMPSDrmi:       case X86::VCMPSDrri:
+  case X86::VCMPSDrmi_Int:   case X86::VCMPSDrri_Int:
+  case X86::VCMPSDZrmi:      case X86::VCMPSDZrri:
+  case X86::VCMPSDZrmi_Int:  case X86::VCMPSDZrri_Int:
+  case X86::VCMPSDZrmi_Intk: case X86::VCMPSDZrri_Intk:
+  case X86::VCMPSDZrrib_Int: case X86::VCMPSDZrrib_Intk:
     OS << "sd\t";
     break;
-  case X86::CMPSSrm:        case X86::CMPSSrr:
-  case X86::CMPSSrm_Int:    case X86::CMPSSrr_Int:
-  case X86::VCMPSSrm:       case X86::VCMPSSrr:
-  case X86::VCMPSSrm_Int:   case X86::VCMPSSrr_Int:
-  case X86::VCMPSSZrm:      case X86::VCMPSSZrr:
-  case X86::VCMPSSZrm_Int:  case X86::VCMPSSZrr_Int:
-  case X86::VCMPSSZrm_Intk: case X86::VCMPSSZrr_Intk:
-  case X86::VCMPSSZrrb_Int: case X86::VCMPSSZrrb_Intk:
+  case X86::CMPSSrmi:        case X86::CMPSSrri:
+  case X86::CMPSSrmi_Int:    case X86::CMPSSrri_Int:
+  case X86::VCMPSSrmi:       case X86::VCMPSSrri:
+  case X86::VCMPSSrmi_Int:   case X86::VCMPSSrri_Int:
+  case X86::VCMPSSZrmi:      case X86::VCMPSSZrri:
+  case X86::VCMPSSZrmi_Int:  case X86::VCMPSSZrri_Int:
+  case X86::VCMPSSZrmi_Intk: case X86::VCMPSSZrri_Intk:
+  case X86::VCMPSSZrrib_Int: case X86::VCMPSSZrrib_Intk:
     OS << "ss\t";
     break;
   case X86::VCMPPHZ128rmi:  case X86::VCMPPHZ128rri:
@@ -304,10 +304,10 @@ void X86InstPrinterCommon::printCMPMnemonic(const MCInst *MI, bool IsVCmp,
   case X86::VCMPPHZrrib:    case X86::VCMPPHZrribk:
     OS << "ph\t";
     break;
-  case X86::VCMPSHZrm:      case X86::VCMPSHZrr:
-  case X86::VCMPSHZrm_Int:  case X86::VCMPSHZrr_Int:
-  case X86::VCMPSHZrrb_Int: case X86::VCMPSHZrrb_Intk:
-  case X86::VCMPSHZrm_Intk: case X86::VCMPSHZrr_Intk:
+  case X86::VCMPSHZrmi:      case X86::VCMPSHZrri:
+  case X86::VCMPSHZrmi_Int:  case X86::VCMPSHZrri_Int:
+  case X86::VCMPSHZrrib_Int: case X86::VCMPSHZrrib_Intk:
+  case X86::VCMPSHZrmi_Intk: case X86::VCMPSHZrri_Intk:
     OS << "sh\t";
     break;
   }

diff  --git a/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp
index 0705700c78173a..7c8459a546516e 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp
@@ -69,12 +69,12 @@ bool X86IntelInstPrinter::printVecCompareInstr(const MCInst *MI, raw_ostream &OS
   // Custom print the vector compare instructions to get the immediate
   // translated into the mnemonic.
   switch (MI->getOpcode()) {
-  case X86::CMPPDrmi:    case X86::CMPPDrri:
-  case X86::CMPPSrmi:    case X86::CMPPSrri:
-  case X86::CMPSDrm:     case X86::CMPSDrr:
-  case X86::CMPSDrm_Int: case X86::CMPSDrr_Int:
-  case X86::CMPSSrm:     case X86::CMPSSrr:
-  case X86::CMPSSrm_Int: case X86::CMPSSrr_Int:
+  case X86::CMPPDrmi:     case X86::CMPPDrri:
+  case X86::CMPPSrmi:     case X86::CMPPSrri:
+  case X86::CMPSDrmi:     case X86::CMPSDrri:
+  case X86::CMPSDrmi_Int: case X86::CMPSDrri_Int:
+  case X86::CMPSSrmi:     case X86::CMPSSrri:
+  case X86::CMPSSrmi_Int: case X86::CMPSSrri_Int:
     if (Imm >= 0 && Imm <= 7) {
       OS << '\t';
       printCMPMnemonic(MI, /*IsVCMP*/false, OS);
@@ -96,56 +96,56 @@ bool X86IntelInstPrinter::printVecCompareInstr(const MCInst *MI, raw_ostream &OS
     }
     break;
 
-  case X86::VCMPPDrmi:      case X86::VCMPPDrri:
-  case X86::VCMPPDYrmi:     case X86::VCMPPDYrri:
-  case X86::VCMPPDZ128rmi:  case X86::VCMPPDZ128rri:
-  case X86::VCMPPDZ256rmi:  case X86::VCMPPDZ256rri:
-  case X86::VCMPPDZrmi:     case X86::VCMPPDZrri:
-  case X86::VCMPPSrmi:      case X86::VCMPPSrri:
-  case X86::VCMPPSYrmi:     case X86::VCMPPSYrri:
-  case X86::VCMPPSZ128rmi:  case X86::VCMPPSZ128rri:
-  case X86::VCMPPSZ256rmi:  case X86::VCMPPSZ256rri:
-  case X86::VCMPPSZrmi:     case X86::VCMPPSZrri:
-  case X86::VCMPSDrm:       case X86::VCMPSDrr:
-  case X86::VCMPSDZrm:      case X86::VCMPSDZrr:
-  case X86::VCMPSDrm_Int:   case X86::VCMPSDrr_Int:
-  case X86::VCMPSDZrm_Int:  case X86::VCMPSDZrr_Int:
-  case X86::VCMPSSrm:       case X86::VCMPSSrr:
-  case X86::VCMPSSZrm:      case X86::VCMPSSZrr:
-  case X86::VCMPSSrm_Int:   case X86::VCMPSSrr_Int:
-  case X86::VCMPSSZrm_Int:  case X86::VCMPSSZrr_Int:
-  case X86::VCMPPDZ128rmik: case X86::VCMPPDZ128rrik:
-  case X86::VCMPPDZ256rmik: case X86::VCMPPDZ256rrik:
-  case X86::VCMPPDZrmik:    case X86::VCMPPDZrrik:
-  case X86::VCMPPSZ128rmik: case X86::VCMPPSZ128rrik:
-  case X86::VCMPPSZ256rmik: case X86::VCMPPSZ256rrik:
-  case X86::VCMPPSZrmik:    case X86::VCMPPSZrrik:
-  case X86::VCMPSDZrm_Intk: case X86::VCMPSDZrr_Intk:
-  case X86::VCMPSSZrm_Intk: case X86::VCMPSSZrr_Intk:
-  case X86::VCMPPDZ128rmbi: case X86::VCMPPDZ128rmbik:
-  case X86::VCMPPDZ256rmbi: case X86::VCMPPDZ256rmbik:
-  case X86::VCMPPDZrmbi:    case X86::VCMPPDZrmbik:
-  case X86::VCMPPSZ128rmbi: case X86::VCMPPSZ128rmbik:
-  case X86::VCMPPSZ256rmbi: case X86::VCMPPSZ256rmbik:
-  case X86::VCMPPSZrmbi:    case X86::VCMPPSZrmbik:
-  case X86::VCMPPDZrrib:    case X86::VCMPPDZrribk:
-  case X86::VCMPPSZrrib:    case X86::VCMPPSZrribk:
-  case X86::VCMPSDZrrb_Int: case X86::VCMPSDZrrb_Intk:
-  case X86::VCMPSSZrrb_Int: case X86::VCMPSSZrrb_Intk:
-  case X86::VCMPPHZ128rmi:  case X86::VCMPPHZ128rri:
-  case X86::VCMPPHZ256rmi:  case X86::VCMPPHZ256rri:
-  case X86::VCMPPHZrmi:     case X86::VCMPPHZrri:
-  case X86::VCMPSHZrm:      case X86::VCMPSHZrr:
-  case X86::VCMPSHZrm_Int:  case X86::VCMPSHZrr_Int:
-  case X86::VCMPPHZ128rmik: case X86::VCMPPHZ128rrik:
-  case X86::VCMPPHZ256rmik: case X86::VCMPPHZ256rrik:
-  case X86::VCMPPHZrmik:    case X86::VCMPPHZrrik:
-  case X86::VCMPSHZrm_Intk: case X86::VCMPSHZrr_Intk:
-  case X86::VCMPPHZ128rmbi: case X86::VCMPPHZ128rmbik:
-  case X86::VCMPPHZ256rmbi: case X86::VCMPPHZ256rmbik:
-  case X86::VCMPPHZrmbi:    case X86::VCMPPHZrmbik:
-  case X86::VCMPPHZrrib:    case X86::VCMPPHZrribk:
-  case X86::VCMPSHZrrb_Int: case X86::VCMPSHZrrb_Intk:
+  case X86::VCMPPDrmi:       case X86::VCMPPDrri:
+  case X86::VCMPPDYrmi:      case X86::VCMPPDYrri:
+  case X86::VCMPPDZ128rmi:   case X86::VCMPPDZ128rri:
+  case X86::VCMPPDZ256rmi:   case X86::VCMPPDZ256rri:
+  case X86::VCMPPDZrmi:      case X86::VCMPPDZrri:
+  case X86::VCMPPSrmi:       case X86::VCMPPSrri:
+  case X86::VCMPPSYrmi:      case X86::VCMPPSYrri:
+  case X86::VCMPPSZ128rmi:   case X86::VCMPPSZ128rri:
+  case X86::VCMPPSZ256rmi:   case X86::VCMPPSZ256rri:
+  case X86::VCMPPSZrmi:      case X86::VCMPPSZrri:
+  case X86::VCMPSDrmi:       case X86::VCMPSDrri:
+  case X86::VCMPSDZrmi:      case X86::VCMPSDZrri:
+  case X86::VCMPSDrmi_Int:   case X86::VCMPSDrri_Int:
+  case X86::VCMPSDZrmi_Int:  case X86::VCMPSDZrri_Int:
+  case X86::VCMPSSrmi:       case X86::VCMPSSrri:
+  case X86::VCMPSSZrmi:      case X86::VCMPSSZrri:
+  case X86::VCMPSSrmi_Int:   case X86::VCMPSSrri_Int:
+  case X86::VCMPSSZrmi_Int:  case X86::VCMPSSZrri_Int:
+  case X86::VCMPPDZ128rmik:  case X86::VCMPPDZ128rrik:
+  case X86::VCMPPDZ256rmik:  case X86::VCMPPDZ256rrik:
+  case X86::VCMPPDZrmik:     case X86::VCMPPDZrrik:
+  case X86::VCMPPSZ128rmik:  case X86::VCMPPSZ128rrik:
+  case X86::VCMPPSZ256rmik:  case X86::VCMPPSZ256rrik:
+  case X86::VCMPPSZrmik:     case X86::VCMPPSZrrik:
+  case X86::VCMPSDZrmi_Intk: case X86::VCMPSDZrri_Intk:
+  case X86::VCMPSSZrmi_Intk: case X86::VCMPSSZrri_Intk:
+  case X86::VCMPPDZ128rmbi:  case X86::VCMPPDZ128rmbik:
+  case X86::VCMPPDZ256rmbi:  case X86::VCMPPDZ256rmbik:
+  case X86::VCMPPDZrmbi:     case X86::VCMPPDZrmbik:
+  case X86::VCMPPSZ128rmbi:  case X86::VCMPPSZ128rmbik:
+  case X86::VCMPPSZ256rmbi:  case X86::VCMPPSZ256rmbik:
+  case X86::VCMPPSZrmbi:     case X86::VCMPPSZrmbik:
+  case X86::VCMPPDZrrib:     case X86::VCMPPDZrribk:
+  case X86::VCMPPSZrrib:     case X86::VCMPPSZrribk:
+  case X86::VCMPSDZrrib_Int: case X86::VCMPSDZrrib_Intk:
+  case X86::VCMPSSZrrib_Int: case X86::VCMPSSZrrib_Intk:
+  case X86::VCMPPHZ128rmi:   case X86::VCMPPHZ128rri:
+  case X86::VCMPPHZ256rmi:   case X86::VCMPPHZ256rri:
+  case X86::VCMPPHZrmi:      case X86::VCMPPHZrri:
+  case X86::VCMPSHZrmi:      case X86::VCMPSHZrri:
+  case X86::VCMPSHZrmi_Int:  case X86::VCMPSHZrri_Int:
+  case X86::VCMPPHZ128rmik:  case X86::VCMPPHZ128rrik:
+  case X86::VCMPPHZ256rmik:  case X86::VCMPPHZ256rrik:
+  case X86::VCMPPHZrmik:     case X86::VCMPPHZrrik:
+  case X86::VCMPSHZrmi_Intk: case X86::VCMPSHZrri_Intk:
+  case X86::VCMPPHZ128rmbi:  case X86::VCMPPHZ128rmbik:
+  case X86::VCMPPHZ256rmbi:  case X86::VCMPPHZ256rmbik:
+  case X86::VCMPPHZrmbi:     case X86::VCMPPHZrmbik:
+  case X86::VCMPPHZrrib:     case X86::VCMPPHZrribk:
+  case X86::VCMPSHZrrib_Int: case X86::VCMPSHZrrib_Intk:
     if (Imm >= 0 && Imm <= 31) {
       OS << '\t';
       printCMPMnemonic(MI, /*IsVCMP*/true, OS);

diff  --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index 9368de62817b3d..9f0b5f32df20a0 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -2198,7 +2198,7 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
     const TargetRegisterClass *VK1 = &X86::VK1RegClass;
 
     unsigned CmpOpcode =
-      (RetVT == MVT::f32) ? X86::VCMPSSZrr : X86::VCMPSDZrr;
+      (RetVT == MVT::f32) ? X86::VCMPSSZrri : X86::VCMPSDZrri;
     Register CmpReg = fastEmitInst_rri(CmpOpcode, VK1, CmpLHSReg, CmpRHSReg,
                                        CC);
 
@@ -2228,7 +2228,7 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
     // instructions as the AND/ANDN/OR sequence due to register moves, so
     // don't bother.
     unsigned CmpOpcode =
-      (RetVT == MVT::f32) ? X86::VCMPSSrr : X86::VCMPSDrr;
+      (RetVT == MVT::f32) ? X86::VCMPSSrri : X86::VCMPSDrri;
     unsigned BlendOpcode =
       (RetVT == MVT::f32) ? X86::VBLENDVPSrr : X86::VBLENDVPDrr;
 
@@ -2242,8 +2242,8 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
   } else {
     // Choose the SSE instruction sequence based on data type (float or double).
     static const uint16_t OpcTable[2][4] = {
-      { X86::CMPSSrr,  X86::ANDPSrr,  X86::ANDNPSrr,  X86::ORPSrr  },
-      { X86::CMPSDrr,  X86::ANDPDrr,  X86::ANDNPDrr,  X86::ORPDrr  }
+      { X86::CMPSSrri,  X86::ANDPSrr,  X86::ANDNPSrr,  X86::ORPSrr  },
+      { X86::CMPSDrri,  X86::ANDPDrr,  X86::ANDNPDrr,  X86::ORPDrr  }
     };
 
     const uint16_t *Opc = nullptr;

diff  --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index a76561f092c349..43a40f5e691ea3 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -1937,58 +1937,58 @@ defm VPBLENDMW : blendmask_bw<0x66, "vpblendmw", SchedWriteVarBlend,
 multiclass avx512_cmp_scalar<X86VectorVTInfo _, SDNode OpNode, SDNode OpNodeSAE,
                              PatFrag OpNode_su, PatFrag OpNodeSAE_su,
                              X86FoldableSchedWrite sched> {
-  defm  rr_Int  : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
-                      (outs _.KRC:$dst),
-                      (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
-                      "vcmp"#_.Suffix,
-                      "$cc, $src2, $src1", "$src1, $src2, $cc",
-                      (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2), timm:$cc),
-                      (OpNode_su (_.VT _.RC:$src1), (_.VT _.RC:$src2),
-                                 timm:$cc)>, EVEX, VVVV, VEX_LIG, Sched<[sched]>, SIMD_EXC;
+  defm  rri_Int  : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
+                                       (outs _.KRC:$dst),
+                                       (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
+                                       "vcmp"#_.Suffix,
+                                       "$cc, $src2, $src1", "$src1, $src2, $cc",
+                                       (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2), timm:$cc),
+                                       (OpNode_su (_.VT _.RC:$src1), (_.VT _.RC:$src2), timm:$cc)>,
+                                       EVEX, VVVV, VEX_LIG, Sched<[sched]>, SIMD_EXC;
   let mayLoad = 1 in
-  defm  rm_Int  : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
-                    (outs _.KRC:$dst),
-                    (ins _.RC:$src1, _.IntScalarMemOp:$src2, u8imm:$cc),
-                    "vcmp"#_.Suffix,
-                    "$cc, $src2, $src1", "$src1, $src2, $cc",
-                    (OpNode (_.VT _.RC:$src1), (_.ScalarIntMemFrags addr:$src2),
-                        timm:$cc),
-                    (OpNode_su (_.VT _.RC:$src1), (_.ScalarIntMemFrags addr:$src2),
-                        timm:$cc)>, EVEX, VVVV, VEX_LIG, EVEX_CD8<_.EltSize, CD8VT1>,
-                    Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
+  defm  rmi_Int  : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
+                                       (outs _.KRC:$dst),
+                                       (ins _.RC:$src1, _.IntScalarMemOp:$src2, u8imm:$cc),
+                                       "vcmp"#_.Suffix,
+                                       "$cc, $src2, $src1", "$src1, $src2, $cc",
+                                       (OpNode (_.VT _.RC:$src1), (_.ScalarIntMemFrags addr:$src2),
+                                           timm:$cc),
+                                       (OpNode_su (_.VT _.RC:$src1), (_.ScalarIntMemFrags addr:$src2),
+                                           timm:$cc)>, EVEX, VVVV, VEX_LIG, EVEX_CD8<_.EltSize, CD8VT1>,
+                                       Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
 
   let Uses = [MXCSR] in
-  defm  rrb_Int  : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
-                     (outs _.KRC:$dst),
-                     (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
-                     "vcmp"#_.Suffix,
-                     "$cc, {sae}, $src2, $src1","$src1, $src2, {sae}, $cc",
-                     (OpNodeSAE (_.VT _.RC:$src1), (_.VT _.RC:$src2),
-                                timm:$cc),
-                     (OpNodeSAE_su (_.VT _.RC:$src1), (_.VT _.RC:$src2),
-                                   timm:$cc)>,
-                     EVEX, VVVV, VEX_LIG, EVEX_B, Sched<[sched]>;
+  defm  rrib_Int  : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
+                                        (outs _.KRC:$dst),
+                                        (ins _.RC:$src1, _.RC:$src2, u8imm:$cc),
+                                        "vcmp"#_.Suffix,
+                                        "$cc, {sae}, $src2, $src1","$src1, $src2, {sae}, $cc",
+                                        (OpNodeSAE (_.VT _.RC:$src1), (_.VT _.RC:$src2),
+                                                   timm:$cc),
+                                        (OpNodeSAE_su (_.VT _.RC:$src1), (_.VT _.RC:$src2),
+                                                      timm:$cc)>,
+                                        EVEX, VVVV, VEX_LIG, EVEX_B, Sched<[sched]>;
 
   let isCodeGenOnly = 1 in {
     let isCommutable = 1 in
-    def rr : AVX512Ii8<0xC2, MRMSrcReg,
-                (outs _.KRC:$dst), (ins _.FRC:$src1, _.FRC:$src2, u8imm:$cc),
-                !strconcat("vcmp", _.Suffix,
-                           "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
-                [(set _.KRC:$dst, (OpNode _.FRC:$src1,
-                                          _.FRC:$src2,
-                                          timm:$cc))]>,
-                EVEX, VVVV, VEX_LIG, Sched<[sched]>, SIMD_EXC;
-    def rm : AVX512Ii8<0xC2, MRMSrcMem,
-              (outs _.KRC:$dst),
-              (ins _.FRC:$src1, _.ScalarMemOp:$src2, u8imm:$cc),
-              !strconcat("vcmp", _.Suffix,
-                         "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
-              [(set _.KRC:$dst, (OpNode _.FRC:$src1,
-                                        (_.ScalarLdFrag addr:$src2),
-                                        timm:$cc))]>,
-              EVEX, VVVV, VEX_LIG, EVEX_CD8<_.EltSize, CD8VT1>,
-              Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
+    def rri : AVX512Ii8<0xC2, MRMSrcReg,
+                        (outs _.KRC:$dst), (ins _.FRC:$src1, _.FRC:$src2, u8imm:$cc),
+                        !strconcat("vcmp", _.Suffix,
+                                   "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
+                        [(set _.KRC:$dst, (OpNode _.FRC:$src1,
+                                                  _.FRC:$src2,
+                                                  timm:$cc))]>,
+                        EVEX, VVVV, VEX_LIG, Sched<[sched]>, SIMD_EXC;
+    def rmi : AVX512Ii8<0xC2, MRMSrcMem,
+                        (outs _.KRC:$dst),
+                        (ins _.FRC:$src1, _.ScalarMemOp:$src2, u8imm:$cc),
+                        !strconcat("vcmp", _.Suffix,
+                                   "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
+                        [(set _.KRC:$dst, (OpNode _.FRC:$src1,
+                                                  (_.ScalarLdFrag addr:$src2),
+                                                  timm:$cc))]>,
+                        EVEX, VVVV, VEX_LIG, EVEX_CD8<_.EltSize, CD8VT1>,
+                        Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
   }
 }
 
@@ -2437,15 +2437,15 @@ defm VCMPPH : avx512_vcmp<SchedWriteFCmp, avx512vl_f16_info, HasFP16>,
 // Patterns to select fp compares with load as first operand.
 let Predicates = [HasAVX512] in {
   def : Pat<(v1i1 (X86cmpms (loadf64 addr:$src2), FR64X:$src1, timm:$cc)),
-            (VCMPSDZrm FR64X:$src1, addr:$src2, (X86cmpm_imm_commute timm:$cc))>;
+            (VCMPSDZrmi FR64X:$src1, addr:$src2, (X86cmpm_imm_commute timm:$cc))>;
 
   def : Pat<(v1i1 (X86cmpms (loadf32 addr:$src2), FR32X:$src1, timm:$cc)),
-            (VCMPSSZrm FR32X:$src1, addr:$src2, (X86cmpm_imm_commute timm:$cc))>;
+            (VCMPSSZrmi FR32X:$src1, addr:$src2, (X86cmpm_imm_commute timm:$cc))>;
 }
 
 let Predicates = [HasFP16] in {
   def : Pat<(v1i1 (X86cmpms (loadf16 addr:$src2), FR16X:$src1, timm:$cc)),
-            (VCMPSHZrm FR16X:$src1, addr:$src2, (X86cmpm_imm_commute timm:$cc))>;
+            (VCMPSHZrmi FR16X:$src1, addr:$src2, (X86cmpm_imm_commute timm:$cc))>;
 }
 
 // ----------------------------------------------------------------

diff  --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 3f0557e651f89b..af0ed071c29aba 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -2573,11 +2573,11 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
     WorkingMI->getOperand(3).setImm(
         X86::getSwappedVPCOMImm(MI.getOperand(3).getImm() & 0x7));
     break;
-  case X86::VCMPSDZrr:
-  case X86::VCMPSSZrr:
+  case X86::VCMPSDZrri:
+  case X86::VCMPSSZrri:
   case X86::VCMPPDZrri:
   case X86::VCMPPSZrri:
-  case X86::VCMPSHZrr:
+  case X86::VCMPSHZrri:
   case X86::VCMPPHZrri:
   case X86::VCMPPHZ128rri:
   case X86::VCMPPHZ256rri:
@@ -2820,21 +2820,21 @@ bool X86InstrInfo::findCommutedOpIndices(const MachineInstr &MI,
     return false;
 
   switch (MI.getOpcode()) {
-  case X86::CMPSDrr:
-  case X86::CMPSSrr:
+  case X86::CMPSDrri:
+  case X86::CMPSSrri:
   case X86::CMPPDrri:
   case X86::CMPPSrri:
-  case X86::VCMPSDrr:
-  case X86::VCMPSSrr:
+  case X86::VCMPSDrri:
+  case X86::VCMPSSrri:
   case X86::VCMPPDrri:
   case X86::VCMPPSrri:
   case X86::VCMPPDYrri:
   case X86::VCMPPSYrri:
-  case X86::VCMPSDZrr:
-  case X86::VCMPSSZrr:
+  case X86::VCMPSDZrri:
+  case X86::VCMPSSZrri:
   case X86::VCMPPDZrri:
   case X86::VCMPPSZrri:
-  case X86::VCMPSHZrr:
+  case X86::VCMPSHZrri:
   case X86::VCMPPHZrri:
   case X86::VCMPPHZ128rri:
   case X86::VCMPPHZ256rri:
@@ -7510,9 +7510,9 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
     case X86::ADDSSrr_Int:
     case X86::VADDSSrr_Int:
     case X86::VADDSSZrr_Int:
-    case X86::CMPSSrr_Int:
-    case X86::VCMPSSrr_Int:
-    case X86::VCMPSSZrr_Int:
+    case X86::CMPSSrri_Int:
+    case X86::VCMPSSrri_Int:
+    case X86::VCMPSSZrri_Int:
     case X86::DIVSSrr_Int:
     case X86::VDIVSSrr_Int:
     case X86::VDIVSSZrr_Int:
@@ -7533,7 +7533,7 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
     case X86::VSUBSSZrr_Int:
     case X86::VADDSSZrr_Intk:
     case X86::VADDSSZrr_Intkz:
-    case X86::VCMPSSZrr_Intk:
+    case X86::VCMPSSZrri_Intk:
     case X86::VDIVSSZrr_Intk:
     case X86::VDIVSSZrr_Intkz:
     case X86::VMAXSSZrr_Intk:
@@ -7679,9 +7679,9 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
     case X86::ADDSDrr_Int:
     case X86::VADDSDrr_Int:
     case X86::VADDSDZrr_Int:
-    case X86::CMPSDrr_Int:
-    case X86::VCMPSDrr_Int:
-    case X86::VCMPSDZrr_Int:
+    case X86::CMPSDrri_Int:
+    case X86::VCMPSDrri_Int:
+    case X86::VCMPSDZrri_Int:
     case X86::DIVSDrr_Int:
     case X86::VDIVSDrr_Int:
     case X86::VDIVSDZrr_Int:
@@ -7702,7 +7702,7 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
     case X86::VSUBSDZrr_Int:
     case X86::VADDSDZrr_Intk:
     case X86::VADDSDZrr_Intkz:
-    case X86::VCMPSDZrr_Intk:
+    case X86::VCMPSDZrri_Intk:
     case X86::VDIVSDZrr_Intk:
     case X86::VDIVSDZrr_Intkz:
     case X86::VMAXSDZrr_Intk:
@@ -7814,7 +7814,7 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
     // instruction isn't scalar (SH).
     switch (UserOpc) {
     case X86::VADDSHZrr_Int:
-    case X86::VCMPSHZrr_Int:
+    case X86::VCMPSHZrri_Int:
     case X86::VDIVSHZrr_Int:
     case X86::VMAXSHZrr_Int:
     case X86::VMINSHZrr_Int:
@@ -7822,7 +7822,7 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
     case X86::VSUBSHZrr_Int:
     case X86::VADDSHZrr_Intk:
     case X86::VADDSHZrr_Intkz:
-    case X86::VCMPSHZrr_Intk:
+    case X86::VCMPSHZrri_Intk:
     case X86::VDIVSHZrr_Intk:
     case X86::VDIVSHZrr_Intkz:
     case X86::VMAXSHZrr_Intk:

diff  --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index 459b5b03507c70..fd20090fe0973b 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -1830,29 +1830,29 @@ multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
                             PatFrag ld_frag, string asm,
                             X86FoldableSchedWrite sched,
                             PatFrags mem_frags> {
-  def rr_Int : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
-                    (ins VR128:$src1, VR128:$src2, u8imm:$cc), asm,
-                    [(set VR128:$dst, (OpNode (VT VR128:$src1),
-                                              VR128:$src2, timm:$cc))]>,
-           Sched<[sched]>, SIMD_EXC;
+  def rri_Int : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
+                     (ins VR128:$src1, VR128:$src2, u8imm:$cc), asm,
+                     [(set VR128:$dst, (OpNode (VT VR128:$src1),
+                                               VR128:$src2, timm:$cc))]>,
+                     Sched<[sched]>, SIMD_EXC;
   let mayLoad = 1 in
-  def rm_Int : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
-                    (ins VR128:$src1, memop:$src2, u8imm:$cc), asm,
-                    [(set VR128:$dst, (OpNode (VT VR128:$src1),
-                                              (mem_frags addr:$src2), timm:$cc))]>,
-           Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
+  def rmi_Int : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
+                     (ins VR128:$src1, memop:$src2, u8imm:$cc), asm,
+                     [(set VR128:$dst, (OpNode (VT VR128:$src1),
+                                               (mem_frags addr:$src2), timm:$cc))]>,
+                     Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
 
   let isCodeGenOnly = 1 in {
     let isCommutable = 1 in
-    def rr : SIi8<0xC2, MRMSrcReg,
-                  (outs RC:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc), asm,
-                  [(set RC:$dst, (OpNode RC:$src1, RC:$src2, timm:$cc))]>,
-                  Sched<[sched]>, SIMD_EXC;
-    def rm : SIi8<0xC2, MRMSrcMem,
-                  (outs RC:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc), asm,
-                  [(set RC:$dst, (OpNode RC:$src1,
-                                         (ld_frag addr:$src2), timm:$cc))]>,
-                  Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
+    def rri : SIi8<0xC2, MRMSrcReg,
+                   (outs RC:$dst), (ins RC:$src1, RC:$src2, u8imm:$cc), asm,
+                   [(set RC:$dst, (OpNode RC:$src1, RC:$src2, timm:$cc))]>,
+                   Sched<[sched]>, SIMD_EXC;
+    def rmi : SIi8<0xC2, MRMSrcMem,
+                   (outs RC:$dst), (ins RC:$src1, x86memop:$src2, u8imm:$cc), asm,
+                   [(set RC:$dst, (OpNode RC:$src1,
+                                          (ld_frag addr:$src2), timm:$cc))]>,
+                   Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
   }
 }
 
@@ -2023,11 +2023,11 @@ let Predicates = [HasAVX] in {
 
   def : Pat<(f64 (X86cmps (loadf64 addr:$src2), FR64:$src1,
                           CommutableCMPCC:$cc)),
-            (VCMPSDrm FR64:$src1, addr:$src2, timm:$cc)>;
+            (VCMPSDrmi FR64:$src1, addr:$src2, timm:$cc)>;
 
   def : Pat<(f32 (X86cmps (loadf32 addr:$src2), FR32:$src1,
                           CommutableCMPCC:$cc)),
-            (VCMPSSrm FR32:$src1, addr:$src2, timm:$cc)>;
+            (VCMPSSrmi FR32:$src1, addr:$src2, timm:$cc)>;
 }
 
 let Predicates = [UseSSE2] in {
@@ -2037,7 +2037,7 @@ let Predicates = [UseSSE2] in {
 
   def : Pat<(f64 (X86cmps (loadf64 addr:$src2), FR64:$src1,
                           CommutableCMPCC:$cc)),
-            (CMPSDrm FR64:$src1, addr:$src2, timm:$cc)>;
+            (CMPSDrmi FR64:$src1, addr:$src2, timm:$cc)>;
 }
 
 let Predicates = [UseSSE1] in {
@@ -2047,7 +2047,7 @@ let Predicates = [UseSSE1] in {
 
   def : Pat<(f32 (X86cmps (loadf32 addr:$src2), FR32:$src1,
                           CommutableCMPCC:$cc)),
-            (CMPSSrm FR32:$src1, addr:$src2, timm:$cc)>;
+            (CMPSSrmi FR32:$src1, addr:$src2, timm:$cc)>;
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/lib/Target/X86/X86SchedSapphireRapids.td b/llvm/lib/Target/X86/X86SchedSapphireRapids.td
index bf9e4b7dc6d9ae..78c5994ee96470 100644
--- a/llvm/lib/Target/X86/X86SchedSapphireRapids.td
+++ b/llvm/lib/Target/X86/X86SchedSapphireRapids.td
@@ -663,8 +663,8 @@ def : InstRW<[SPRWriteResGroup12], (instregex "^ADD_F(P?)rST0$",
                                               "^SUB(R?)_FST0r$",
                                               "^VALIGN(D|Q)Z256rri((k|kz)?)$",
                                               "^VCMPP(D|H|S)Z(128|256)rri(k?)$",
-                                              "^VCMPS(D|H|S)Zrr$",
-                                              "^VCMPS(D|H|S)Zrr(b?)_Int(k?)$",
+                                              "^VCMPS(D|H|S)Zrri$",
+                                              "^VCMPS(D|H|S)Zrr(b?)i_Int(k?)$",
                                               "^VFPCLASSP(D|H|S)Z(128|256)rr(k?)$",
                                               "^VFPCLASSS(D|H|S)Zrr(k?)$",
                                               "^VPACK(S|U)S(DW|WB)Yrr$",
@@ -2739,8 +2739,8 @@ def : InstRW<[SPRWriteResGroup263, ReadAfterVecYLd], (instregex "^VCMPP(D|H|S)Z(
                                                                 "^VPCMPUDZ((256)?)rmib(k?)$",
                                                                 "^VPTEST(N?)M(B|D|Q|W)Z((256)?)rm(k?)$",
                                                                 "^VPTEST(N?)M(D|Q)Z((256)?)rmb(k?)$")>;
-def : InstRW<[SPRWriteResGroup263, ReadAfterVecLd], (instregex "^VCMPS(D|H|S)Zrm$",
-                                                               "^VCMPS(D|H|S)Zrm_Int(k?)$",
+def : InstRW<[SPRWriteResGroup263, ReadAfterVecLd], (instregex "^VCMPS(D|H|S)Zrmi$",
+                                                               "^VCMPS(D|H|S)Zrmi_Int(k?)$",
                                                                "^VFPCLASSS(D|H|S)Zrmk$")>;
 
 def SPRWriteResGroup264 : SchedWriteRes<[SPRPort00, SPRPort02_03_11]> {

diff  --git a/llvm/test/CodeGen/X86/apx/kmov-domain-assignment.ll b/llvm/test/CodeGen/X86/apx/kmov-domain-assignment.ll
index b09a14cee95742..e70e5ff80d9592 100644
--- a/llvm/test/CodeGen/X86/apx/kmov-domain-assignment.ll
+++ b/llvm/test/CodeGen/X86/apx/kmov-domain-assignment.ll
@@ -21,8 +21,8 @@ define void @test_fcmp_storei1(i1 %cond, ptr %fptr, ptr %iptr, float %f1, float
   ; CHECK-NEXT: bb.1.if:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[VCMPSSZrr:%[0-9]+]]:vk1 = nofpexcept VCMPSSZrr [[COPY3]], [[COPY2]], 0, implicit $mxcsr
-  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:vk16 = COPY [[VCMPSSZrr]]
+  ; CHECK-NEXT:   [[VCMPSSZrri:%[0-9]+]]:vk1 = nofpexcept VCMPSSZrri [[COPY3]], [[COPY2]], 0, implicit $mxcsr
+  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:vk16 = COPY [[VCMPSSZrri]]
   ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:vk32 = COPY [[COPY7]]
   ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:vk8 = COPY [[COPY8]]
   ; CHECK-NEXT:   JMP_1 %bb.3
@@ -30,8 +30,8 @@ define void @test_fcmp_storei1(i1 %cond, ptr %fptr, ptr %iptr, float %f1, float
   ; CHECK-NEXT: bb.2.else:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[VCMPSSZrr1:%[0-9]+]]:vk1 = nofpexcept VCMPSSZrr [[COPY1]], [[COPY]], 0, implicit $mxcsr
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:vk16 = COPY [[VCMPSSZrr1]]
+  ; CHECK-NEXT:   [[VCMPSSZrri1:%[0-9]+]]:vk1 = nofpexcept VCMPSSZrri [[COPY1]], [[COPY]], 0, implicit $mxcsr
+  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:vk16 = COPY [[VCMPSSZrri1]]
   ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:vk32 = COPY [[COPY10]]
   ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:vk8 = COPY [[COPY11]]
   ; CHECK-NEXT: {{  $}}

diff  --git a/llvm/test/CodeGen/X86/domain-reassignment.mir b/llvm/test/CodeGen/X86/domain-reassignment.mir
index 8b2fbe04d14afb..dcd435619990cf 100644
--- a/llvm/test/CodeGen/X86/domain-reassignment.mir
+++ b/llvm/test/CodeGen/X86/domain-reassignment.mir
@@ -133,14 +133,14 @@ body:             |
   ; CHECK:   JMP_1 %bb.1
   ; CHECK: bb.1.if:
   ; CHECK:   successors: %bb.3(0x80000000)
-  ; CHECK:   [[VCMPSSZrr:%[0-9]+]]:vk1 = VCMPSSZrr [[COPY3]], [[COPY2]], 0
-  ; CHECK:   [[COPY9:%[0-9]+]]:vk32 = COPY [[VCMPSSZrr]]
+  ; CHECK:   [[VCMPSSZrri:%[0-9]+]]:vk1 = VCMPSSZrri [[COPY3]], [[COPY2]], 0
+  ; CHECK:   [[COPY9:%[0-9]+]]:vk32 = COPY [[VCMPSSZrri]]
   ; CHECK:   [[COPY10:%[0-9]+]]:vk8 = COPY [[COPY9]]
   ; CHECK:   JMP_1 %bb.3
   ; CHECK: bb.2.else:
   ; CHECK:   successors: %bb.3(0x80000000)
-  ; CHECK:   [[VCMPSSZrr1:%[0-9]+]]:vk1 = VCMPSSZrr [[COPY1]], [[COPY]], 0
-  ; CHECK:   [[COPY11:%[0-9]+]]:vk32 = COPY [[VCMPSSZrr1]]
+  ; CHECK:   [[VCMPSSZrri1:%[0-9]+]]:vk1 = VCMPSSZrri [[COPY1]], [[COPY]], 0
+  ; CHECK:   [[COPY11:%[0-9]+]]:vk32 = COPY [[VCMPSSZrri1]]
   ; CHECK:   [[COPY12:%[0-9]+]]:vk8 = COPY [[COPY11]]
   ; CHECK: bb.3.exit:
   ; CHECK:   [[PHI:%[0-9]+]]:vk8 = PHI [[COPY12]], %bb.2, [[COPY10]], %bb.1
@@ -173,7 +173,7 @@ body:             |
   bb.1.if:
     successors: %bb.3(0x80000000)
 
-    %14 = VCMPSSZrr %7, %8, 0, implicit $mxcsr
+    %14 = VCMPSSZrri %7, %8, 0, implicit $mxcsr
 
     ; check that cross domain copies are replaced with same domain copies.
 
@@ -183,7 +183,7 @@ body:             |
 
   bb.2.else:
     successors: %bb.3(0x80000000)
-    %12 = VCMPSSZrr %9, %10, 0, implicit $mxcsr
+    %12 = VCMPSSZrri %9, %10, 0, implicit $mxcsr
 
     ; check that cross domain copies are replaced with same domain copies.
 

diff  --git a/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll b/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
index 8a7fea78702d89..2c7da100344b76 100644
--- a/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
+++ b/llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll
@@ -40,8 +40,8 @@ define float @sqrt_ieee_ninf(float %f) #0 {
   ; CHECK-NEXT:   [[VPBROADCASTDrm:%[0-9]+]]:vr128 = VPBROADCASTDrm $rip, 1, $noreg, %const.2, $noreg :: (load (s32) from constant-pool)
   ; CHECK-NEXT:   [[VPANDrr:%[0-9]+]]:vr128 = VPANDrr killed [[COPY2]], killed [[VPBROADCASTDrm]]
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:fr32 = COPY [[VPANDrr]]
-  ; CHECK-NEXT:   [[VCMPSSrm:%[0-9]+]]:fr32 = nofpexcept VCMPSSrm killed [[COPY3]], $rip, 1, $noreg, %const.3, $noreg, 1, implicit $mxcsr :: (load (s32) from constant-pool)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr128 = COPY [[VCMPSSrm]]
+  ; CHECK-NEXT:   [[VCMPSSrmi:%[0-9]+]]:fr32 = nofpexcept VCMPSSrmi killed [[COPY3]], $rip, 1, $noreg, %const.3, $noreg, 1, implicit $mxcsr :: (load (s32) from constant-pool)
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr128 = COPY [[VCMPSSrmi]]
   ; CHECK-NEXT:   [[VPANDNrr:%[0-9]+]]:vr128 = VPANDNrr killed [[COPY4]], killed [[COPY1]]
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:fr32 = COPY [[VPANDNrr]]
   ; CHECK-NEXT:   $xmm0 = COPY [[COPY5]]
@@ -84,8 +84,8 @@ define float @sqrt_daz_ninf(float %f) #1 {
   ; CHECK-NEXT:   [[VMULSSrr5:%[0-9]+]]:fr32 = ninf afn nofpexcept VMULSSrr killed [[VMULSSrr4]], killed [[VFMADD213SSr1]], implicit $mxcsr
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr128 = COPY [[VMULSSrr5]]
   ; CHECK-NEXT:   [[FsFLD0SS:%[0-9]+]]:fr32 = FsFLD0SS
-  ; CHECK-NEXT:   [[VCMPSSrr:%[0-9]+]]:fr32 = nofpexcept VCMPSSrr [[COPY]], killed [[FsFLD0SS]], 0, implicit $mxcsr
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr128 = COPY [[VCMPSSrr]]
+  ; CHECK-NEXT:   [[VCMPSSrri:%[0-9]+]]:fr32 = nofpexcept VCMPSSrri [[COPY]], killed [[FsFLD0SS]], 0, implicit $mxcsr
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr128 = COPY [[VCMPSSrri]]
   ; CHECK-NEXT:   [[VPANDNrr:%[0-9]+]]:vr128 = VPANDNrr killed [[COPY2]], killed [[COPY1]]
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:fr32 = COPY [[VPANDNrr]]
   ; CHECK-NEXT:   $xmm0 = COPY [[COPY3]]

diff  --git a/llvm/test/TableGen/x86-fold-tables.inc b/llvm/test/TableGen/x86-fold-tables.inc
index c35f22ff36de0b..d0ae2c474e85ac 100644
--- a/llvm/test/TableGen/x86-fold-tables.inc
+++ b/llvm/test/TableGen/x86-fold-tables.inc
@@ -1941,10 +1941,10 @@ static const X86FoldTableEntry Table2[] = {
   {X86::CMOV64rr, X86::CMOV64rm, 0},
   {X86::CMPPDrri, X86::CMPPDrmi, TB_ALIGN_16},
   {X86::CMPPSrri, X86::CMPPSrmi, TB_ALIGN_16},
-  {X86::CMPSDrr, X86::CMPSDrm, 0},
-  {X86::CMPSDrr_Int, X86::CMPSDrm_Int, TB_NO_REVERSE},
-  {X86::CMPSSrr, X86::CMPSSrm, 0},
-  {X86::CMPSSrr_Int, X86::CMPSSrm_Int, TB_NO_REVERSE},
+  {X86::CMPSDrri, X86::CMPSDrmi, 0},
+  {X86::CMPSDrri_Int, X86::CMPSDrmi_Int, TB_NO_REVERSE},
+  {X86::CMPSSrri, X86::CMPSSrmi, 0},
+  {X86::CMPSSrri_Int, X86::CMPSSrmi_Int, TB_NO_REVERSE},
   {X86::CRC32r32r16, X86::CRC32r32m16, 0},
   {X86::CRC32r32r16_EVEX, X86::CRC32r32m16_EVEX, 0},
   {X86::CRC32r32r32, X86::CRC32r32m32, 0},
@@ -2390,16 +2390,16 @@ static const X86FoldTableEntry Table2[] = {
   {X86::VCMPPSZ256rri, X86::VCMPPSZ256rmi, 0},
   {X86::VCMPPSZrri, X86::VCMPPSZrmi, 0},
   {X86::VCMPPSrri, X86::VCMPPSrmi, 0},
-  {X86::VCMPSDZrr, X86::VCMPSDZrm, 0},
-  {X86::VCMPSDZrr_Int, X86::VCMPSDZrm_Int, TB_NO_REVERSE},
-  {X86::VCMPSDrr, X86::VCMPSDrm, 0},
-  {X86::VCMPSDrr_Int, X86::VCMPSDrm_Int, TB_NO_REVERSE},
-  {X86::VCMPSHZrr, X86::VCMPSHZrm, 0},
-  {X86::VCMPSHZrr_Int, X86::VCMPSHZrm_Int, TB_NO_REVERSE},
-  {X86::VCMPSSZrr, X86::VCMPSSZrm, 0},
-  {X86::VCMPSSZrr_Int, X86::VCMPSSZrm_Int, TB_NO_REVERSE},
-  {X86::VCMPSSrr, X86::VCMPSSrm, 0},
-  {X86::VCMPSSrr_Int, X86::VCMPSSrm_Int, TB_NO_REVERSE},
+  {X86::VCMPSDZrri, X86::VCMPSDZrmi, 0},
+  {X86::VCMPSDZrri_Int, X86::VCMPSDZrmi_Int, TB_NO_REVERSE},
+  {X86::VCMPSDrri, X86::VCMPSDrmi, 0},
+  {X86::VCMPSDrri_Int, X86::VCMPSDrmi_Int, TB_NO_REVERSE},
+  {X86::VCMPSHZrri, X86::VCMPSHZrmi, 0},
+  {X86::VCMPSHZrri_Int, X86::VCMPSHZrmi_Int, TB_NO_REVERSE},
+  {X86::VCMPSSZrri, X86::VCMPSSZrmi, 0},
+  {X86::VCMPSSZrri_Int, X86::VCMPSSZrmi_Int, TB_NO_REVERSE},
+  {X86::VCMPSSrri, X86::VCMPSSrmi, 0},
+  {X86::VCMPSSrri_Int, X86::VCMPSSrmi_Int, TB_NO_REVERSE},
   {X86::VCVTDQ2PDZ128rrkz, X86::VCVTDQ2PDZ128rmkz, TB_NO_REVERSE},
   {X86::VCVTDQ2PDZ256rrkz, X86::VCVTDQ2PDZ256rmkz, 0},
   {X86::VCVTDQ2PDZrrkz, X86::VCVTDQ2PDZrmkz, 0},
@@ -3973,9 +3973,9 @@ static const X86FoldTableEntry Table3[] = {
   {X86::VCMPPSZ128rrik, X86::VCMPPSZ128rmik, 0},
   {X86::VCMPPSZ256rrik, X86::VCMPPSZ256rmik, 0},
   {X86::VCMPPSZrrik, X86::VCMPPSZrmik, 0},
-  {X86::VCMPSDZrr_Intk, X86::VCMPSDZrm_Intk, TB_NO_REVERSE},
-  {X86::VCMPSHZrr_Intk, X86::VCMPSHZrm_Intk, TB_NO_REVERSE},
-  {X86::VCMPSSZrr_Intk, X86::VCMPSSZrm_Intk, TB_NO_REVERSE},
+  {X86::VCMPSDZrri_Intk, X86::VCMPSDZrmi_Intk, TB_NO_REVERSE},
+  {X86::VCMPSHZrri_Intk, X86::VCMPSHZrmi_Intk, TB_NO_REVERSE},
+  {X86::VCMPSSZrri_Intk, X86::VCMPSSZrmi_Intk, TB_NO_REVERSE},
   {X86::VCVTDQ2PDZ128rrk, X86::VCVTDQ2PDZ128rmk, TB_NO_REVERSE},
   {X86::VCVTDQ2PDZ256rrk, X86::VCVTDQ2PDZ256rmk, 0},
   {X86::VCVTDQ2PDZrrk, X86::VCVTDQ2PDZrmk, 0},


        


More information about the llvm-commits mailing list