[llvm] b87bb4e - [X86][MC] Move the code about fixed register encoding optimization to X86EncodingOptimization.cpp, NFCI

Shengchen Kan via llvm-commits llvm-commits at lists.llvm.org
Thu May 18 04:42:27 PDT 2023


Author: Shengchen Kan
Date: 2023-05-18T19:42:00+08:00
New Revision: b87bb4e0ff71a6248cfddea772aac6add590b615

URL: https://github.com/llvm/llvm-project/commit/b87bb4e0ff71a6248cfddea772aac6add590b615
DIFF: https://github.com/llvm/llvm-project/commit/b87bb4e0ff71a6248cfddea772aac6add590b615.diff

LOG: [X86][MC] Move the code about fixed register encoding optimization to X86EncodingOptimization.cpp, NFCI

Added: 
    

Modified: 
    llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp
    llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.h
    llvm/lib/Target/X86/X86MCInstLower.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp
index b398bf0b4ba5..69f65841d7b5 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp
@@ -306,6 +306,10 @@ bool X86::optimizeINCDEC(MCInst &MI, bool In64BitMode) {
   return true;
 }
 
+static bool isARegister(unsigned Reg) {
+  return Reg == X86::AL || Reg == X86::AX || Reg == X86::EAX || Reg == X86::RAX;
+}
+
 /// Simplify things like MOV32rm to MOV32o32a.
 bool X86::optimizeMOV(MCInst &MI, bool In64BitMode) {
   // Don't make these simplifications in 64-bit mode; other assemblers don't
@@ -338,7 +342,7 @@ bool X86::optimizeMOV(MCInst &MI, bool In64BitMode) {
   unsigned AddrOp = AddrBase + 3;
   // Check whether the destination register can be fixed.
   unsigned Reg = MI.getOperand(RegOp).getReg();
-  if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX)
+  if (!isARegister(Reg))
     return false;
   // Check whether this is an absolute address.
   // FIXME: We know TLVP symbol refs aren't, but there should be a better way
@@ -363,3 +367,60 @@ bool X86::optimizeMOV(MCInst &MI, bool In64BitMode) {
   MI.addOperand(Seg);
   return true;
 }
+
+/// Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with
+/// a short fixed-register form.
+bool X86::optimizeToFixedRegisterForm(MCInst &MI) {
+  unsigned NewOpc;
+  switch (MI.getOpcode()) {
+  default:
+    return false;
+    FROM_TO(ADC8ri, ADC8i8)
+    FROM_TO(ADC16ri, ADC16i16)
+    FROM_TO(ADC32ri, ADC32i32)
+    FROM_TO(ADC64ri32, ADC64i32)
+    FROM_TO(ADD8ri, ADD8i8)
+    FROM_TO(ADD16ri, ADD16i16)
+    FROM_TO(ADD32ri, ADD32i32)
+    FROM_TO(ADD64ri32, ADD64i32)
+    FROM_TO(AND8ri, AND8i8)
+    FROM_TO(AND16ri, AND16i16)
+    FROM_TO(AND32ri, AND32i32)
+    FROM_TO(AND64ri32, AND64i32)
+    FROM_TO(CMP8ri, CMP8i8)
+    FROM_TO(CMP16ri, CMP16i16)
+    FROM_TO(CMP32ri, CMP32i32)
+    FROM_TO(CMP64ri32, CMP64i32)
+    FROM_TO(OR8ri, OR8i8)
+    FROM_TO(OR16ri, OR16i16)
+    FROM_TO(OR32ri, OR32i32)
+    FROM_TO(OR64ri32, OR64i32)
+    FROM_TO(SBB8ri, SBB8i8)
+    FROM_TO(SBB16ri, SBB16i16)
+    FROM_TO(SBB32ri, SBB32i32)
+    FROM_TO(SBB64ri32, SBB64i32)
+    FROM_TO(SUB8ri, SUB8i8)
+    FROM_TO(SUB16ri, SUB16i16)
+    FROM_TO(SUB32ri, SUB32i32)
+    FROM_TO(SUB64ri32, SUB64i32)
+    FROM_TO(TEST8ri, TEST8i8)
+    FROM_TO(TEST16ri, TEST16i16)
+    FROM_TO(TEST32ri, TEST32i32)
+    FROM_TO(TEST64ri32, TEST64i32)
+    FROM_TO(XOR8ri, XOR8i8)
+    FROM_TO(XOR16ri, XOR16i16)
+    FROM_TO(XOR32ri, XOR32i32)
+    FROM_TO(XOR64ri32, XOR64i32)
+  }
+  // Check whether the destination register can be fixed.
+  unsigned Reg = MI.getOperand(0).getReg();
+  if (!isARegister(Reg))
+    return false;
+
+  // If so, rewrite the instruction.
+  MCOperand Saved = MI.getOperand(MI.getNumOperands() - 1);
+  MI.clear();
+  MI.setOpcode(NewOpc);
+  MI.addOperand(Saved);
+  return true;
+}

diff  --git a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.h b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.h
index b1908a6d8225..7d0c31751e84 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.h
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.h
@@ -22,6 +22,7 @@ bool optimizeVPCMPWithImmediateOneOrSix(MCInst &MI);
 bool optimizeMOVSX(MCInst &MI);
 bool optimizeINCDEC(MCInst &MI, bool In64BitMode);
 bool optimizeMOV(MCInst &MI, bool In64BitMode);
+bool optimizeToFixedRegisterForm(MCInst &MI);
 } // namespace X86
 } // namespace llvm
 #endif

diff  --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp
index 7bb05c42f760..defc199930cb 100644
--- a/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -320,29 +320,6 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
   return MCOperand::createExpr(Expr);
 }
 
-/// Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with
-/// a short fixed-register form.
-static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) {
-  unsigned ImmOp = Inst.getNumOperands() - 1;
-  assert(Inst.getOperand(0).isReg() &&
-         (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) &&
-         ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() &&
-           Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) ||
-          Inst.getNumOperands() == 2) &&
-         "Unexpected instruction!");
-
-  // Check whether the destination register can be fixed.
-  unsigned Reg = Inst.getOperand(0).getReg();
-  if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX)
-    return;
-
-  // If so, rewrite the instruction.
-  MCOperand Saved = Inst.getOperand(ImmOp);
-  Inst = MCInst();
-  Inst.setOpcode(Opcode);
-  Inst.addOperand(Saved);
-}
-
 static unsigned getRetOpcode(const X86Subtarget &Subtarget) {
   return Subtarget.is64Bit() ? X86::RET64 : X86::RET32;
 }
@@ -422,23 +399,13 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
     if (auto MaybeMCOp = LowerMachineOperand(MI, MO))
       OutMI.addOperand(*MaybeMCOp);
 
-  if (X86::optimizeInstFromVEX3ToVEX2(OutMI, MI->getDesc()))
-    return;
-
-  if (X86::optimizeShiftRotateWithImmediateOne(OutMI))
-    return;
-
-  if (X86::optimizeVPCMPWithImmediateOneOrSix(OutMI))
-    return;
-
-  if (X86::optimizeMOVSX(OutMI))
-    return;
-
   bool In64BitMode = AsmPrinter.getSubtarget().is64Bit();
-  if (X86::optimizeINCDEC(OutMI, In64BitMode))
-    return;
-
-  if (X86::optimizeMOV(OutMI, In64BitMode))
+  if (X86::optimizeInstFromVEX3ToVEX2(OutMI, MI->getDesc()) ||
+      X86::optimizeShiftRotateWithImmediateOne(OutMI) ||
+      X86::optimizeVPCMPWithImmediateOneOrSix(OutMI) ||
+      X86::optimizeMOVSX(OutMI) || X86::optimizeINCDEC(OutMI, In64BitMode) ||
+      X86::optimizeMOV(OutMI, In64BitMode) ||
+      X86::optimizeToFixedRegisterForm(OutMI))
     return;
 
   // Handle a few special cases to eliminate operand modifiers.
@@ -532,58 +499,6 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
     OutMI.setOpcode(convertTailJumpOpcode(OutMI.getOpcode()));
     break;
 
-  case X86::ADC8ri: case X86::ADC16ri: case X86::ADC32ri: case X86::ADC64ri32:
-  case X86::ADD8ri: case X86::ADD16ri: case X86::ADD32ri: case X86::ADD64ri32:
-  case X86::AND8ri: case X86::AND16ri: case X86::AND32ri: case X86::AND64ri32:
-  case X86::CMP8ri: case X86::CMP16ri: case X86::CMP32ri: case X86::CMP64ri32:
-  case X86::OR8ri:  case X86::OR16ri:  case X86::OR32ri:  case X86::OR64ri32:
-  case X86::SBB8ri: case X86::SBB16ri: case X86::SBB32ri: case X86::SBB64ri32:
-  case X86::SUB8ri: case X86::SUB16ri: case X86::SUB32ri: case X86::SUB64ri32:
-  case X86::TEST8ri:case X86::TEST16ri:case X86::TEST32ri:case X86::TEST64ri32:
-  case X86::XOR8ri: case X86::XOR16ri: case X86::XOR32ri: case X86::XOR64ri32: {
-    unsigned NewOpc;
-    switch (OutMI.getOpcode()) {
-    default: llvm_unreachable("Invalid opcode");
-    case X86::ADC8ri:     NewOpc = X86::ADC8i8;    break;
-    case X86::ADC16ri:    NewOpc = X86::ADC16i16;  break;
-    case X86::ADC32ri:    NewOpc = X86::ADC32i32;  break;
-    case X86::ADC64ri32:  NewOpc = X86::ADC64i32;  break;
-    case X86::ADD8ri:     NewOpc = X86::ADD8i8;    break;
-    case X86::ADD16ri:    NewOpc = X86::ADD16i16;  break;
-    case X86::ADD32ri:    NewOpc = X86::ADD32i32;  break;
-    case X86::ADD64ri32:  NewOpc = X86::ADD64i32;  break;
-    case X86::AND8ri:     NewOpc = X86::AND8i8;    break;
-    case X86::AND16ri:    NewOpc = X86::AND16i16;  break;
-    case X86::AND32ri:    NewOpc = X86::AND32i32;  break;
-    case X86::AND64ri32:  NewOpc = X86::AND64i32;  break;
-    case X86::CMP8ri:     NewOpc = X86::CMP8i8;    break;
-    case X86::CMP16ri:    NewOpc = X86::CMP16i16;  break;
-    case X86::CMP32ri:    NewOpc = X86::CMP32i32;  break;
-    case X86::CMP64ri32:  NewOpc = X86::CMP64i32;  break;
-    case X86::OR8ri:      NewOpc = X86::OR8i8;     break;
-    case X86::OR16ri:     NewOpc = X86::OR16i16;   break;
-    case X86::OR32ri:     NewOpc = X86::OR32i32;   break;
-    case X86::OR64ri32:   NewOpc = X86::OR64i32;   break;
-    case X86::SBB8ri:     NewOpc = X86::SBB8i8;    break;
-    case X86::SBB16ri:    NewOpc = X86::SBB16i16;  break;
-    case X86::SBB32ri:    NewOpc = X86::SBB32i32;  break;
-    case X86::SBB64ri32:  NewOpc = X86::SBB64i32;  break;
-    case X86::SUB8ri:     NewOpc = X86::SUB8i8;    break;
-    case X86::SUB16ri:    NewOpc = X86::SUB16i16;  break;
-    case X86::SUB32ri:    NewOpc = X86::SUB32i32;  break;
-    case X86::SUB64ri32:  NewOpc = X86::SUB64i32;  break;
-    case X86::TEST8ri:    NewOpc = X86::TEST8i8;   break;
-    case X86::TEST16ri:   NewOpc = X86::TEST16i16; break;
-    case X86::TEST32ri:   NewOpc = X86::TEST32i32; break;
-    case X86::TEST64ri32: NewOpc = X86::TEST64i32; break;
-    case X86::XOR8ri:     NewOpc = X86::XOR8i8;    break;
-    case X86::XOR16ri:    NewOpc = X86::XOR16i16;  break;
-    case X86::XOR32ri:    NewOpc = X86::XOR32i32;  break;
-    case X86::XOR64ri32:  NewOpc = X86::XOR64i32;  break;
-    }
-    SimplifyShortImmForm(OutMI, NewOpc);
-    break;
-  }
   case X86::MASKMOVDQU:
   case X86::VMASKMOVDQU:
     if (In64BitMode)


        


More information about the llvm-commits mailing list