[llvm] 1695536 - [X86][NFC] Remove TB_FOLDED_BCAST and format code in X86InstrFoldTables.cpp

Shengchen Kan via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 29 08:27:32 PST 2024


Author: Shengchen Kan
Date: 2024-01-30T00:27:16+08:00
New Revision: 169553688ca40d9a495f19e8ba2af1137e13cff8

URL: https://github.com/llvm/llvm-project/commit/169553688ca40d9a495f19e8ba2af1137e13cff8
DIFF: https://github.com/llvm/llvm-project/commit/169553688ca40d9a495f19e8ba2af1137e13cff8.diff

LOG: [X86][NFC] Remove TB_FOLDED_BCAST and format code in X86InstrFoldTables.cpp

Added: 
    

Modified: 
    llvm/include/llvm/Support/X86FoldTablesUtils.h
    llvm/lib/Target/X86/X86InstrFoldTables.cpp
    llvm/lib/Target/X86/X86InstrInfo.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Support/X86FoldTablesUtils.h b/llvm/include/llvm/Support/X86FoldTablesUtils.h
index 77d32cc7fb37ed..790cdce9039f5b 100644
--- a/llvm/include/llvm/Support/X86FoldTablesUtils.h
+++ b/llvm/include/llvm/Support/X86FoldTablesUtils.h
@@ -31,12 +31,11 @@ enum {
 
   TB_FOLDED_LOAD = 1 << 5,
   TB_FOLDED_STORE = 1 << 6,
-  TB_FOLDED_BCAST = 1 << 7,
 
   // Minimum alignment required for load/store.
   // Used for RegOp->MemOp conversion. Encoded as Log2(Align)
-  // (stored in bits 9 - 11)
-  TB_ALIGN_SHIFT = 8,
+  // (stored in bits 8 - 10)
+  TB_ALIGN_SHIFT = 7,
   TB_ALIGN_1 = 0 << TB_ALIGN_SHIFT,
   TB_ALIGN_16 = 4 << TB_ALIGN_SHIFT,
   TB_ALIGN_32 = 5 << TB_ALIGN_SHIFT,
@@ -44,14 +43,14 @@ enum {
   TB_ALIGN_MASK = 0x7 << TB_ALIGN_SHIFT,
 
   // Broadcast type.
-  // (stored in bits 12 - 14)
+  // (stored in bits 11 - 13)
   TB_BCAST_TYPE_SHIFT = TB_ALIGN_SHIFT + 3,
-  TB_BCAST_W = 0 << TB_BCAST_TYPE_SHIFT,
-  TB_BCAST_D = 1 << TB_BCAST_TYPE_SHIFT,
-  TB_BCAST_Q = 2 << TB_BCAST_TYPE_SHIFT,
-  TB_BCAST_SS = 3 << TB_BCAST_TYPE_SHIFT,
-  TB_BCAST_SD = 4 << TB_BCAST_TYPE_SHIFT,
-  TB_BCAST_SH = 5 << TB_BCAST_TYPE_SHIFT,
+  TB_BCAST_W = 1 << TB_BCAST_TYPE_SHIFT,
+  TB_BCAST_D = 2 << TB_BCAST_TYPE_SHIFT,
+  TB_BCAST_Q = 3 << TB_BCAST_TYPE_SHIFT,
+  TB_BCAST_SS = 4 << TB_BCAST_TYPE_SHIFT,
+  TB_BCAST_SD = 5 << TB_BCAST_TYPE_SHIFT,
+  TB_BCAST_SH = 6 << TB_BCAST_TYPE_SHIFT,
   TB_BCAST_MASK = 0x7 << TB_BCAST_TYPE_SHIFT,
 
   // Unused bits 15-16

diff  --git a/llvm/lib/Target/X86/X86InstrFoldTables.cpp b/llvm/lib/Target/X86/X86InstrFoldTables.cpp
index bb0a4d54996f03..1d6df0f6ad129f 100644
--- a/llvm/lib/Target/X86/X86InstrFoldTables.cpp
+++ b/llvm/lib/Target/X86/X86InstrFoldTables.cpp
@@ -121,13 +121,11 @@ lookupFoldTableImpl(ArrayRef<X86FoldTableEntry> Table, unsigned RegOp) {
   return nullptr;
 }
 
-const X86FoldTableEntry *
-llvm::lookupTwoAddrFoldTable(unsigned RegOp) {
+const X86FoldTableEntry *llvm::lookupTwoAddrFoldTable(unsigned RegOp) {
   return lookupFoldTableImpl(Table2Addr, RegOp);
 }
 
-const X86FoldTableEntry *
-llvm::lookupFoldTable(unsigned RegOp, unsigned OpNum) {
+const X86FoldTableEntry *llvm::lookupFoldTable(unsigned RegOp, unsigned OpNum) {
   ArrayRef<X86FoldTableEntry> FoldTable;
   if (OpNum == 0)
     FoldTable = ArrayRef(Table0);
@@ -181,19 +179,19 @@ struct X86MemUnfoldTable {
     // Broadcast tables.
     for (const X86FoldTableEntry &Entry : BroadcastTable1)
       // Index 1, folded broadcast
-      addTableEntry(Entry, TB_INDEX_1 | TB_FOLDED_LOAD | TB_FOLDED_BCAST);
+      addTableEntry(Entry, TB_INDEX_1 | TB_FOLDED_LOAD);
 
     for (const X86FoldTableEntry &Entry : BroadcastTable2)
       // Index 2, folded broadcast
-      addTableEntry(Entry, TB_INDEX_2 | TB_FOLDED_LOAD | TB_FOLDED_BCAST);
+      addTableEntry(Entry, TB_INDEX_2 | TB_FOLDED_LOAD);
 
     for (const X86FoldTableEntry &Entry : BroadcastTable3)
       // Index 3, folded broadcast
-      addTableEntry(Entry, TB_INDEX_3 | TB_FOLDED_LOAD | TB_FOLDED_BCAST);
+      addTableEntry(Entry, TB_INDEX_3 | TB_FOLDED_LOAD);
 
     for (const X86FoldTableEntry &Entry : BroadcastTable4)
       // Index 4, folded broadcast
-      addTableEntry(Entry, TB_INDEX_4 | TB_FOLDED_LOAD | TB_FOLDED_BCAST);
+      addTableEntry(Entry, TB_INDEX_4 | TB_FOLDED_LOAD);
 
     // Sort the memory->reg unfold table.
     array_pod_sort(Table.begin(), Table.end());
@@ -203,18 +201,16 @@ struct X86MemUnfoldTable {
            "Memory unfolding table is not unique!");
   }
 
-  void addTableEntry(const X86FoldTableEntry &Entry,
-                     uint16_t ExtraFlags) {
+  void addTableEntry(const X86FoldTableEntry &Entry, uint16_t ExtraFlags) {
     // NOTE: This swaps the KeyOp and DstOp in the table so we can sort it.
     if ((Entry.Flags & TB_NO_REVERSE) == 0)
       Table.push_back({Entry.DstOp, Entry.KeyOp,
-                      static_cast<uint16_t>(Entry.Flags | ExtraFlags) });
+                       static_cast<uint16_t>(Entry.Flags | ExtraFlags)});
   }
 };
-}
+} // namespace
 
-const X86FoldTableEntry *
-llvm::lookupUnfoldTable(unsigned MemOp) {
+const X86FoldTableEntry *llvm::lookupUnfoldTable(unsigned MemOp) {
   static X86MemUnfoldTable MemUnfoldTable;
   auto &Table = MemUnfoldTable.Table;
   auto I = llvm::lower_bound(Table, MemOp);
@@ -238,8 +234,8 @@ struct X86BroadcastFoldTable {
       unsigned BcstOp = Reg2Bcst.DstOp;
       if (const X86FoldTableEntry *Reg2Mem = lookupFoldTable(RegOp, 2)) {
         unsigned MemOp = Reg2Mem->DstOp;
-        uint16_t Flags = Reg2Mem->Flags | Reg2Bcst.Flags | TB_INDEX_2 |
-                         TB_FOLDED_LOAD | TB_FOLDED_BCAST;
+        uint16_t Flags =
+            Reg2Mem->Flags | Reg2Bcst.Flags | TB_INDEX_2 | TB_FOLDED_LOAD;
         Table.push_back({MemOp, BcstOp, Flags});
       }
     }
@@ -248,8 +244,8 @@ struct X86BroadcastFoldTable {
       unsigned BcstOp = Reg2Bcst.DstOp;
       if (const X86FoldTableEntry *Reg2Mem = lookupFoldTable(RegOp, 2)) {
         unsigned MemOp = Reg2Mem->DstOp;
-        uint16_t Flags = Reg2Mem->Flags | Reg2Bcst.Flags | TB_INDEX_2 |
-                         TB_FOLDED_LOAD | TB_FOLDED_BCAST;
+        uint16_t Flags =
+            Reg2Mem->Flags | Reg2Bcst.Flags | TB_INDEX_2 | TB_FOLDED_LOAD;
         Table.push_back({MemOp, BcstOp, Flags});
       }
     }
@@ -259,8 +255,8 @@ struct X86BroadcastFoldTable {
       unsigned BcstOp = Reg2Bcst.DstOp;
       if (const X86FoldTableEntry *Reg2Mem = lookupFoldTable(RegOp, 3)) {
         unsigned MemOp = Reg2Mem->DstOp;
-        uint16_t Flags = Reg2Mem->Flags | Reg2Bcst.Flags | TB_INDEX_3 |
-                         TB_FOLDED_LOAD | TB_FOLDED_BCAST;
+        uint16_t Flags =
+            Reg2Mem->Flags | Reg2Bcst.Flags | TB_INDEX_3 | TB_FOLDED_LOAD;
         Table.push_back({MemOp, BcstOp, Flags});
       }
     }
@@ -269,8 +265,8 @@ struct X86BroadcastFoldTable {
       unsigned BcstOp = Reg2Bcst.DstOp;
       if (const X86FoldTableEntry *Reg2Mem = lookupFoldTable(RegOp, 3)) {
         unsigned MemOp = Reg2Mem->DstOp;
-        uint16_t Flags = Reg2Mem->Flags | Reg2Bcst.Flags | TB_INDEX_3 |
-                         TB_FOLDED_LOAD | TB_FOLDED_BCAST;
+        uint16_t Flags =
+            Reg2Mem->Flags | Reg2Bcst.Flags | TB_INDEX_3 | TB_FOLDED_LOAD;
         Table.push_back({MemOp, BcstOp, Flags});
       }
     }
@@ -280,8 +276,8 @@ struct X86BroadcastFoldTable {
       unsigned BcstOp = Reg2Bcst.DstOp;
       if (const X86FoldTableEntry *Reg2Mem = lookupFoldTable(RegOp, 4)) {
         unsigned MemOp = Reg2Mem->DstOp;
-        uint16_t Flags = Reg2Mem->Flags | Reg2Bcst.Flags | TB_INDEX_4 |
-                         TB_FOLDED_LOAD | TB_FOLDED_BCAST;
+        uint16_t Flags =
+            Reg2Mem->Flags | Reg2Bcst.Flags | TB_INDEX_4 | TB_FOLDED_LOAD;
         Table.push_back({MemOp, BcstOp, Flags});
       }
     }

diff  --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 9a95464287c5dc..13fe75fc9edd1b 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -8191,7 +8191,6 @@ bool X86InstrInfo::unfoldMemoryOperand(
   unsigned Index = I->Flags & TB_INDEX_MASK;
   bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
   bool FoldedStore = I->Flags & TB_FOLDED_STORE;
-  bool FoldedBCast = I->Flags & TB_FOLDED_BCAST;
   if (UnfoldLoad && !FoldedLoad)
     return false;
   UnfoldLoad &= FoldedLoad;
@@ -8231,7 +8230,7 @@ bool X86InstrInfo::unfoldMemoryOperand(
     auto MMOs = extractLoadMMOs(MI.memoperands(), MF);
 
     unsigned Opc;
-    if (FoldedBCast) {
+    if (I->Flags & TB_BCAST_MASK) {
       Opc = getBroadcastOpcode(I, RC, Subtarget);
     } else {
       unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
@@ -8341,7 +8340,6 @@ bool X86InstrInfo::unfoldMemoryOperand(
   unsigned Index = I->Flags & TB_INDEX_MASK;
   bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
   bool FoldedStore = I->Flags & TB_FOLDED_STORE;
-  bool FoldedBCast = I->Flags & TB_FOLDED_BCAST;
   const MCInstrDesc &MCID = get(Opc);
   MachineFunction &MF = DAG.getMachineFunction();
   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
@@ -8377,7 +8375,7 @@ bool X86InstrInfo::unfoldMemoryOperand(
     // memory access is slow above.
 
     unsigned Opc;
-    if (FoldedBCast) {
+    if (I->Flags & TB_BCAST_MASK) {
       Opc = getBroadcastOpcode(I, RC, Subtarget);
     } else {
       unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);


        


More information about the llvm-commits mailing list