[llvm] [X86][MC] Support encoding/decoding for JMPABS (PR #72835)

via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 20 00:36:31 PST 2023


github-actions[bot] wrote:

<!--LLVM CODE FORMAT COMMENT: {clang-format}-->


:warning: C/C++ code formatter, clang-format found issues in your code. :warning:

<details>
<summary>
You can test this locally with the following command:
</summary>

``````````bash
git-clang-format --diff a2e1de193477e7d92ec5c0a2ecd17a622cbf7aed 8154951e54cf4752a7791bfae09c0946399b32db -- llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp llvm/utils/TableGen/X86DisassemblerTables.cpp llvm/utils/TableGen/X86RecognizableInstr.cpp llvm/utils/TableGen/X86RecognizableInstr.h
``````````

</details>

<details>
<summary>
View the diff from clang-format here.
</summary>

``````````diff
diff --git a/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h b/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h
index f9089c4755..c163abe952 100644
--- a/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h
+++ b/llvm/include/llvm/Support/X86DisassemblerDecoderCommon.h
@@ -53,22 +53,22 @@ namespace X86Disassembler {
 // processed correctly.  Most of these indicate the presence of particular
 // prefixes, but ATTR_64BIT is simply an attribute of the decoding context.
 enum attributeBits {
-  ATTR_NONE   = 0x00,
-  ATTR_64BIT  = 0x1 << 0,
-  ATTR_XS     = 0x1 << 1,
-  ATTR_XD     = 0x1 << 2,
-  ATTR_REXW   = 0x1 << 3,
+  ATTR_NONE = 0x00,
+  ATTR_64BIT = 0x1 << 0,
+  ATTR_XS = 0x1 << 1,
+  ATTR_XD = 0x1 << 2,
+  ATTR_REXW = 0x1 << 3,
   ATTR_OPSIZE = 0x1 << 4,
   ATTR_ADSIZE = 0x1 << 5,
-  ATTR_VEX    = 0x1 << 6,
-  ATTR_VEXL   = 0x1 << 7,
-  ATTR_EVEX   = 0x1 << 8,
+  ATTR_VEX = 0x1 << 6,
+  ATTR_VEXL = 0x1 << 7,
+  ATTR_EVEX = 0x1 << 8,
   ATTR_EVEXL2 = 0x1 << 9,
-  ATTR_EVEXK  = 0x1 << 10,
+  ATTR_EVEXK = 0x1 << 10,
   ATTR_EVEXKZ = 0x1 << 11,
-  ATTR_EVEXB  = 0x1 << 12,
-  ATTR_REX2   = 0x1 << 13,
-  ATTR_max    = 0x1 << 14,
+  ATTR_EVEXB = 0x1 << 12,
+  ATTR_REX2 = 0x1 << 13,
+  ATTR_max = 0x1 << 14,
 };
 
 // Combinations of the above attributes that are relevant to instruction
@@ -77,209 +77,255 @@ enum attributeBits {
 
 //           Class name           Rank  Rationale for rank assignment
 #define INSTRUCTION_CONTEXTS                                                   \
-  ENUM_ENTRY(IC,                    0,  "says nothing about the instruction")  \
-  ENUM_ENTRY(IC_64BIT,              1,  "says the instruction applies in "     \
-                                        "64-bit mode but no more")             \
-  ENUM_ENTRY(IC_OPSIZE,             3,  "requires an OPSIZE prefix, so "       \
-                                        "operands change width")               \
-  ENUM_ENTRY(IC_ADSIZE,             3,  "requires an ADSIZE prefix, so "       \
-                                        "operands change width")               \
-  ENUM_ENTRY(IC_OPSIZE_ADSIZE,      4,  "requires ADSIZE and OPSIZE prefixes") \
-  ENUM_ENTRY(IC_XD,                 2,  "may say something about the opcode "  \
-                                        "but not the operands")                \
-  ENUM_ENTRY(IC_XS,                 2,  "may say something about the opcode "  \
-                                        "but not the operands")                \
-  ENUM_ENTRY(IC_XD_OPSIZE,          3,  "requires an OPSIZE prefix, so "       \
-                                        "operands change width")               \
-  ENUM_ENTRY(IC_XS_OPSIZE,          3,  "requires an OPSIZE prefix, so "       \
-                                        "operands change width")               \
-  ENUM_ENTRY(IC_XD_ADSIZE,          3,  "requires an ADSIZE prefix, so "       \
-                                        "operands change width")               \
-  ENUM_ENTRY(IC_XS_ADSIZE,          3,  "requires an ADSIZE prefix, so "       \
-                                        "operands change width")               \
-  ENUM_ENTRY(IC_64BIT_REXW,         5,  "requires a REX.W prefix, so operands "\
-                                        "change width; overrides IC_OPSIZE")   \
-  ENUM_ENTRY(IC_64BIT_REXW_ADSIZE,  6,  "requires a REX.W prefix and 0x67 "    \
-                                        "prefix")                              \
-  ENUM_ENTRY(IC_64BIT_OPSIZE,       3,  "Just as meaningful as IC_OPSIZE")     \
-  ENUM_ENTRY(IC_64BIT_ADSIZE,       3,  "Just as meaningful as IC_ADSIZE")     \
-  ENUM_ENTRY(IC_64BIT_OPSIZE_ADSIZE, 4, "Just as meaningful as IC_OPSIZE/"     \
-                                        "IC_ADSIZE")                           \
-  ENUM_ENTRY(IC_64BIT_XD,           6,  "XD instructions are SSE; REX.W is "   \
-                                        "secondary")                           \
-  ENUM_ENTRY(IC_64BIT_XS,           6,  "Just as meaningful as IC_64BIT_XD")   \
-  ENUM_ENTRY(IC_64BIT_XD_OPSIZE,    3,  "Just as meaningful as IC_XD_OPSIZE")  \
-  ENUM_ENTRY(IC_64BIT_XS_OPSIZE,    3,  "Just as meaningful as IC_XS_OPSIZE")  \
-  ENUM_ENTRY(IC_64BIT_XD_ADSIZE,    3,  "Just as meaningful as IC_XD_ADSIZE")  \
-  ENUM_ENTRY(IC_64BIT_XS_ADSIZE,    3,  "Just as meaningful as IC_XS_ADSIZE")  \
-  ENUM_ENTRY(IC_64BIT_REXW_XS,      7,  "OPSIZE could mean a different "       \
-                                        "opcode")                              \
-  ENUM_ENTRY(IC_64BIT_REXW_XD,      7,  "Just as meaningful as "               \
-                                        "IC_64BIT_REXW_XS")                    \
-  ENUM_ENTRY(IC_64BIT_REXW_OPSIZE,  8,  "The Dynamic Duo!  Prefer over all "   \
-                                        "else because this changes most "      \
-                                        "operands' meaning")                   \
-  ENUM_ENTRY(IC_64BIT_REX2,         2,  "requires a REX2 prefix")              \
-  ENUM_ENTRY(IC_VEX,                1,  "requires a VEX prefix")               \
-  ENUM_ENTRY(IC_VEX_XS,             2,  "requires VEX and the XS prefix")      \
-  ENUM_ENTRY(IC_VEX_XD,             2,  "requires VEX and the XD prefix")      \
-  ENUM_ENTRY(IC_VEX_OPSIZE,         2,  "requires VEX and the OpSize prefix")  \
-  ENUM_ENTRY(IC_VEX_W,              3,  "requires VEX and the W prefix")       \
-  ENUM_ENTRY(IC_VEX_W_XS,           4,  "requires VEX, W, and XS prefix")      \
-  ENUM_ENTRY(IC_VEX_W_XD,           4,  "requires VEX, W, and XD prefix")      \
-  ENUM_ENTRY(IC_VEX_W_OPSIZE,       4,  "requires VEX, W, and OpSize")         \
-  ENUM_ENTRY(IC_VEX_L,              3,  "requires VEX and the L prefix")       \
-  ENUM_ENTRY(IC_VEX_L_XS,           4,  "requires VEX and the L and XS prefix")\
-  ENUM_ENTRY(IC_VEX_L_XD,           4,  "requires VEX and the L and XD prefix")\
-  ENUM_ENTRY(IC_VEX_L_OPSIZE,       4,  "requires VEX, L, and OpSize")         \
-  ENUM_ENTRY(IC_VEX_L_W,            4,  "requires VEX, L and W")               \
-  ENUM_ENTRY(IC_VEX_L_W_XS,         5,  "requires VEX, L, W and XS prefix")    \
-  ENUM_ENTRY(IC_VEX_L_W_XD,         5,  "requires VEX, L, W and XD prefix")    \
-  ENUM_ENTRY(IC_VEX_L_W_OPSIZE,     5,  "requires VEX, L, W and OpSize")       \
-  ENUM_ENTRY(IC_EVEX,               1,  "requires an EVEX prefix")             \
-  ENUM_ENTRY(IC_EVEX_XS,            2,  "requires EVEX and the XS prefix")     \
-  ENUM_ENTRY(IC_EVEX_XD,            2,  "requires EVEX and the XD prefix")     \
-  ENUM_ENTRY(IC_EVEX_OPSIZE,        2,  "requires EVEX and the OpSize prefix") \
-  ENUM_ENTRY(IC_EVEX_W,             3,  "requires EVEX and the W prefix")      \
-  ENUM_ENTRY(IC_EVEX_W_XS,          4,  "requires EVEX, W, and XS prefix")     \
-  ENUM_ENTRY(IC_EVEX_W_XD,          4,  "requires EVEX, W, and XD prefix")     \
-  ENUM_ENTRY(IC_EVEX_W_OPSIZE,      4,  "requires EVEX, W, and OpSize")        \
-  ENUM_ENTRY(IC_EVEX_L,             3,  "requires EVEX and the L prefix")       \
-  ENUM_ENTRY(IC_EVEX_L_XS,          4,  "requires EVEX and the L and XS prefix")\
-  ENUM_ENTRY(IC_EVEX_L_XD,          4,  "requires EVEX and the L and XD prefix")\
-  ENUM_ENTRY(IC_EVEX_L_OPSIZE,      4,  "requires EVEX, L, and OpSize")         \
-  ENUM_ENTRY(IC_EVEX_L_W,           3,  "requires EVEX, L and W")               \
-  ENUM_ENTRY(IC_EVEX_L_W_XS,        4,  "requires EVEX, L, W and XS prefix")    \
-  ENUM_ENTRY(IC_EVEX_L_W_XD,        4,  "requires EVEX, L, W and XD prefix")    \
-  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE,    4,  "requires EVEX, L, W and OpSize")       \
-  ENUM_ENTRY(IC_EVEX_L2,            3,  "requires EVEX and the L2 prefix")       \
-  ENUM_ENTRY(IC_EVEX_L2_XS,         4,  "requires EVEX and the L2 and XS prefix")\
-  ENUM_ENTRY(IC_EVEX_L2_XD,         4,  "requires EVEX and the L2 and XD prefix")\
-  ENUM_ENTRY(IC_EVEX_L2_OPSIZE,     4,  "requires EVEX, L2, and OpSize")         \
-  ENUM_ENTRY(IC_EVEX_L2_W,          3,  "requires EVEX, L2 and W")               \
-  ENUM_ENTRY(IC_EVEX_L2_W_XS,       4,  "requires EVEX, L2, W and XS prefix")    \
-  ENUM_ENTRY(IC_EVEX_L2_W_XD,       4,  "requires EVEX, L2, W and XD prefix")    \
-  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE,   4,  "requires EVEX, L2, W and OpSize")       \
-  ENUM_ENTRY(IC_EVEX_K,             1,  "requires an EVEX_K prefix")             \
-  ENUM_ENTRY(IC_EVEX_XS_K,          2,  "requires EVEX_K and the XS prefix")     \
-  ENUM_ENTRY(IC_EVEX_XD_K,          2,  "requires EVEX_K and the XD prefix")     \
-  ENUM_ENTRY(IC_EVEX_OPSIZE_K,      2,  "requires EVEX_K and the OpSize prefix") \
-  ENUM_ENTRY(IC_EVEX_W_K,           3,  "requires EVEX_K and the W prefix")      \
-  ENUM_ENTRY(IC_EVEX_W_XS_K,        4,  "requires EVEX_K, W, and XS prefix")     \
-  ENUM_ENTRY(IC_EVEX_W_XD_K,        4,  "requires EVEX_K, W, and XD prefix")     \
-  ENUM_ENTRY(IC_EVEX_W_OPSIZE_K,    4,  "requires EVEX_K, W, and OpSize")        \
-  ENUM_ENTRY(IC_EVEX_L_K,           3,  "requires EVEX_K and the L prefix")       \
-  ENUM_ENTRY(IC_EVEX_L_XS_K,        4,  "requires EVEX_K and the L and XS prefix")\
-  ENUM_ENTRY(IC_EVEX_L_XD_K,        4,  "requires EVEX_K and the L and XD prefix")\
-  ENUM_ENTRY(IC_EVEX_L_OPSIZE_K,    4,  "requires EVEX_K, L, and OpSize")         \
-  ENUM_ENTRY(IC_EVEX_L_W_K,         3,  "requires EVEX_K, L and W")               \
-  ENUM_ENTRY(IC_EVEX_L_W_XS_K,      4,  "requires EVEX_K, L, W and XS prefix")    \
-  ENUM_ENTRY(IC_EVEX_L_W_XD_K,      4,  "requires EVEX_K, L, W and XD prefix")    \
-  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_K,  4,  "requires EVEX_K, L, W and OpSize")       \
-  ENUM_ENTRY(IC_EVEX_L2_K,          3,  "requires EVEX_K and the L2 prefix")       \
-  ENUM_ENTRY(IC_EVEX_L2_XS_K,       4,  "requires EVEX_K and the L2 and XS prefix")\
-  ENUM_ENTRY(IC_EVEX_L2_XD_K,       4,  "requires EVEX_K and the L2 and XD prefix")\
-  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_K,   4,  "requires EVEX_K, L2, and OpSize")         \
-  ENUM_ENTRY(IC_EVEX_L2_W_K,        3,  "requires EVEX_K, L2 and W")               \
-  ENUM_ENTRY(IC_EVEX_L2_W_XS_K,     4,  "requires EVEX_K, L2, W and XS prefix")    \
-  ENUM_ENTRY(IC_EVEX_L2_W_XD_K,     4,  "requires EVEX_K, L2, W and XD prefix")    \
-  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_K, 4,  "requires EVEX_K, L2, W and OpSize")     \
-  ENUM_ENTRY(IC_EVEX_B,             1,  "requires an EVEX_B prefix")             \
-  ENUM_ENTRY(IC_EVEX_XS_B,          2,  "requires EVEX_B and the XS prefix")     \
-  ENUM_ENTRY(IC_EVEX_XD_B,          2,  "requires EVEX_B and the XD prefix")     \
-  ENUM_ENTRY(IC_EVEX_OPSIZE_B,      2,  "requires EVEX_B and the OpSize prefix") \
-  ENUM_ENTRY(IC_EVEX_W_B,           3,  "requires EVEX_B and the W prefix")      \
-  ENUM_ENTRY(IC_EVEX_W_XS_B,        4,  "requires EVEX_B, W, and XS prefix")     \
-  ENUM_ENTRY(IC_EVEX_W_XD_B,        4,  "requires EVEX_B, W, and XD prefix")     \
-  ENUM_ENTRY(IC_EVEX_W_OPSIZE_B,    4,  "requires EVEX_B, W, and OpSize")        \
-  ENUM_ENTRY(IC_EVEX_L_B,           3,  "requires EVEX_B and the L prefix")       \
-  ENUM_ENTRY(IC_EVEX_L_XS_B,        4,  "requires EVEX_B and the L and XS prefix")\
-  ENUM_ENTRY(IC_EVEX_L_XD_B,        4,  "requires EVEX_B and the L and XD prefix")\
-  ENUM_ENTRY(IC_EVEX_L_OPSIZE_B,    4,  "requires EVEX_B, L, and OpSize")         \
-  ENUM_ENTRY(IC_EVEX_L_W_B,         3,  "requires EVEX_B, L and W")               \
-  ENUM_ENTRY(IC_EVEX_L_W_XS_B,      4,  "requires EVEX_B, L, W and XS prefix")    \
-  ENUM_ENTRY(IC_EVEX_L_W_XD_B,      4,  "requires EVEX_B, L, W and XD prefix")    \
-  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_B,  4,  "requires EVEX_B, L, W and OpSize")       \
-  ENUM_ENTRY(IC_EVEX_L2_B,          3,  "requires EVEX_B and the L2 prefix")       \
-  ENUM_ENTRY(IC_EVEX_L2_XS_B,       4,  "requires EVEX_B and the L2 and XS prefix")\
-  ENUM_ENTRY(IC_EVEX_L2_XD_B,       4,  "requires EVEX_B and the L2 and XD prefix")\
-  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_B,   4,  "requires EVEX_B, L2, and OpSize")         \
-  ENUM_ENTRY(IC_EVEX_L2_W_B,        3,  "requires EVEX_B, L2 and W")               \
-  ENUM_ENTRY(IC_EVEX_L2_W_XS_B,     4,  "requires EVEX_B, L2, W and XS prefix")    \
-  ENUM_ENTRY(IC_EVEX_L2_W_XD_B,     4,  "requires EVEX_B, L2, W and XD prefix")    \
-  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_B, 4,  "requires EVEX_B, L2, W and OpSize")       \
-  ENUM_ENTRY(IC_EVEX_K_B,           1,  "requires EVEX_B and EVEX_K prefix")             \
-  ENUM_ENTRY(IC_EVEX_XS_K_B,        2,  "requires EVEX_B, EVEX_K and the XS prefix")     \
-  ENUM_ENTRY(IC_EVEX_XD_K_B,        2,  "requires EVEX_B, EVEX_K and the XD prefix")     \
-  ENUM_ENTRY(IC_EVEX_OPSIZE_K_B,    2,  "requires EVEX_B, EVEX_K and the OpSize prefix") \
-  ENUM_ENTRY(IC_EVEX_W_K_B,         3,  "requires EVEX_B, EVEX_K and the W prefix")      \
-  ENUM_ENTRY(IC_EVEX_W_XS_K_B,      4,  "requires EVEX_B, EVEX_K, W, and XS prefix")     \
-  ENUM_ENTRY(IC_EVEX_W_XD_K_B,      4,  "requires EVEX_B, EVEX_K, W, and XD prefix")     \
-  ENUM_ENTRY(IC_EVEX_W_OPSIZE_K_B,  4,  "requires EVEX_B, EVEX_K, W, and OpSize")        \
-  ENUM_ENTRY(IC_EVEX_L_K_B,         3,  "requires EVEX_B, EVEX_K and the L prefix")       \
-  ENUM_ENTRY(IC_EVEX_L_XS_K_B,      4,  "requires EVEX_B, EVEX_K and the L and XS prefix")\
-  ENUM_ENTRY(IC_EVEX_L_XD_K_B,      4,  "requires EVEX_B, EVEX_K and the L and XD prefix")\
-  ENUM_ENTRY(IC_EVEX_L_OPSIZE_K_B,  4,  "requires EVEX_B, EVEX_K, L, and OpSize")         \
-  ENUM_ENTRY(IC_EVEX_L_W_K_B,       3,  "requires EVEX_B, EVEX_K, L and W")               \
-  ENUM_ENTRY(IC_EVEX_L_W_XS_K_B,    4,  "requires EVEX_B, EVEX_K, L, W and XS prefix")    \
-  ENUM_ENTRY(IC_EVEX_L_W_XD_K_B,    4,  "requires EVEX_B, EVEX_K, L, W and XD prefix")    \
-  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_K_B,4,  "requires EVEX_B, EVEX_K, L, W and OpSize")       \
-  ENUM_ENTRY(IC_EVEX_L2_K_B,        3,  "requires EVEX_B, EVEX_K and the L2 prefix")       \
-  ENUM_ENTRY(IC_EVEX_L2_XS_K_B,     4,  "requires EVEX_B, EVEX_K and the L2 and XS prefix")\
-  ENUM_ENTRY(IC_EVEX_L2_XD_K_B,     4,  "requires EVEX_B, EVEX_K and the L2 and XD prefix")\
-  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_K_B, 4,  "requires EVEX_B, EVEX_K, L2, and OpSize")         \
-  ENUM_ENTRY(IC_EVEX_L2_W_K_B,      3,  "requires EVEX_B, EVEX_K, L2 and W")               \
-  ENUM_ENTRY(IC_EVEX_L2_W_XS_K_B,   4,  "requires EVEX_B, EVEX_K, L2, W and XS prefix")    \
-  ENUM_ENTRY(IC_EVEX_L2_W_XD_K_B,   4,  "requires EVEX_B, EVEX_K, L2, W and XD prefix")    \
-  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_K_B,4,  "requires EVEX_B, EVEX_K, L2, W and OpSize")       \
-  ENUM_ENTRY(IC_EVEX_KZ_B,           1,  "requires EVEX_B and EVEX_KZ prefix")             \
-  ENUM_ENTRY(IC_EVEX_XS_KZ_B,        2,  "requires EVEX_B, EVEX_KZ and the XS prefix")     \
-  ENUM_ENTRY(IC_EVEX_XD_KZ_B,        2,  "requires EVEX_B, EVEX_KZ and the XD prefix")     \
-  ENUM_ENTRY(IC_EVEX_OPSIZE_KZ_B,    2,  "requires EVEX_B, EVEX_KZ and the OpSize prefix") \
-  ENUM_ENTRY(IC_EVEX_W_KZ_B,         3,  "requires EVEX_B, EVEX_KZ and the W prefix")      \
-  ENUM_ENTRY(IC_EVEX_W_XS_KZ_B,      4,  "requires EVEX_B, EVEX_KZ, W, and XS prefix")     \
-  ENUM_ENTRY(IC_EVEX_W_XD_KZ_B,      4,  "requires EVEX_B, EVEX_KZ, W, and XD prefix")     \
-  ENUM_ENTRY(IC_EVEX_W_OPSIZE_KZ_B,  4,  "requires EVEX_B, EVEX_KZ, W, and OpSize")        \
-  ENUM_ENTRY(IC_EVEX_L_KZ_B,           3,  "requires EVEX_B, EVEX_KZ and the L prefix")       \
-  ENUM_ENTRY(IC_EVEX_L_XS_KZ_B,        4,  "requires EVEX_B, EVEX_KZ and the L and XS prefix")\
-  ENUM_ENTRY(IC_EVEX_L_XD_KZ_B,        4,  "requires EVEX_B, EVEX_KZ and the L and XD prefix")\
-  ENUM_ENTRY(IC_EVEX_L_OPSIZE_KZ_B,    4,  "requires EVEX_B, EVEX_KZ, L, and OpSize")         \
-  ENUM_ENTRY(IC_EVEX_L_W_KZ_B,         3,  "requires EVEX_B, EVEX_KZ, L and W")               \
-  ENUM_ENTRY(IC_EVEX_L_W_XS_KZ_B,      4,  "requires EVEX_B, EVEX_KZ, L, W and XS prefix")    \
-  ENUM_ENTRY(IC_EVEX_L_W_XD_KZ_B,      4,  "requires EVEX_B, EVEX_KZ, L, W and XD prefix")    \
-  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_KZ_B,  4,  "requires EVEX_B, EVEX_KZ, L, W and OpSize")       \
-  ENUM_ENTRY(IC_EVEX_L2_KZ_B,          3,  "requires EVEX_B, EVEX_KZ and the L2 prefix")       \
-  ENUM_ENTRY(IC_EVEX_L2_XS_KZ_B,       4,  "requires EVEX_B, EVEX_KZ and the L2 and XS prefix")\
-  ENUM_ENTRY(IC_EVEX_L2_XD_KZ_B,       4,  "requires EVEX_B, EVEX_KZ and the L2 and XD prefix")\
-  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_KZ_B,   4,  "requires EVEX_B, EVEX_KZ, L2, and OpSize")         \
-  ENUM_ENTRY(IC_EVEX_L2_W_KZ_B,        3,  "requires EVEX_B, EVEX_KZ, L2 and W")               \
-  ENUM_ENTRY(IC_EVEX_L2_W_XS_KZ_B,     4,  "requires EVEX_B, EVEX_KZ, L2, W and XS prefix")    \
-  ENUM_ENTRY(IC_EVEX_L2_W_XD_KZ_B,     4,  "requires EVEX_B, EVEX_KZ, L2, W and XD prefix")    \
-  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_KZ_B, 4,  "requires EVEX_B, EVEX_KZ, L2, W and OpSize")       \
-  ENUM_ENTRY(IC_EVEX_KZ,             1,  "requires an EVEX_KZ prefix")             \
-  ENUM_ENTRY(IC_EVEX_XS_KZ,          2,  "requires EVEX_KZ and the XS prefix")     \
-  ENUM_ENTRY(IC_EVEX_XD_KZ,          2,  "requires EVEX_KZ and the XD prefix")     \
-  ENUM_ENTRY(IC_EVEX_OPSIZE_KZ,      2,  "requires EVEX_KZ and the OpSize prefix") \
-  ENUM_ENTRY(IC_EVEX_W_KZ,           3,  "requires EVEX_KZ and the W prefix")      \
-  ENUM_ENTRY(IC_EVEX_W_XS_KZ,        4,  "requires EVEX_KZ, W, and XS prefix")     \
-  ENUM_ENTRY(IC_EVEX_W_XD_KZ,        4,  "requires EVEX_KZ, W, and XD prefix")     \
-  ENUM_ENTRY(IC_EVEX_W_OPSIZE_KZ,    4,  "requires EVEX_KZ, W, and OpSize")        \
-  ENUM_ENTRY(IC_EVEX_L_KZ,           3,  "requires EVEX_KZ and the L prefix")       \
-  ENUM_ENTRY(IC_EVEX_L_XS_KZ,        4,  "requires EVEX_KZ and the L and XS prefix")\
-  ENUM_ENTRY(IC_EVEX_L_XD_KZ,        4,  "requires EVEX_KZ and the L and XD prefix")\
-  ENUM_ENTRY(IC_EVEX_L_OPSIZE_KZ,    4,  "requires EVEX_KZ, L, and OpSize")         \
-  ENUM_ENTRY(IC_EVEX_L_W_KZ,         3,  "requires EVEX_KZ, L and W")               \
-  ENUM_ENTRY(IC_EVEX_L_W_XS_KZ,      4,  "requires EVEX_KZ, L, W and XS prefix")    \
-  ENUM_ENTRY(IC_EVEX_L_W_XD_KZ,      4,  "requires EVEX_KZ, L, W and XD prefix")    \
-  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_KZ,  4,  "requires EVEX_KZ, L, W and OpSize")       \
-  ENUM_ENTRY(IC_EVEX_L2_KZ,          3,  "requires EVEX_KZ and the L2 prefix")       \
-  ENUM_ENTRY(IC_EVEX_L2_XS_KZ,       4,  "requires EVEX_KZ and the L2 and XS prefix")\
-  ENUM_ENTRY(IC_EVEX_L2_XD_KZ,       4,  "requires EVEX_KZ and the L2 and XD prefix")\
-  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_KZ,   4,  "requires EVEX_KZ, L2, and OpSize")         \
-  ENUM_ENTRY(IC_EVEX_L2_W_KZ,        3,  "requires EVEX_KZ, L2 and W")               \
-  ENUM_ENTRY(IC_EVEX_L2_W_XS_KZ,     4,  "requires EVEX_KZ, L2, W and XS prefix")    \
-  ENUM_ENTRY(IC_EVEX_L2_W_XD_KZ,     4,  "requires EVEX_KZ, L2, W and XD prefix")    \
-  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_KZ, 4,  "requires EVEX_KZ, L2, W and OpSize")
+  ENUM_ENTRY(IC, 0, "says nothing about the instruction")                      \
+  ENUM_ENTRY(IC_64BIT, 1,                                                      \
+             "says the instruction applies in "                                \
+             "64-bit mode but no more")                                        \
+  ENUM_ENTRY(IC_OPSIZE, 3,                                                     \
+             "requires an OPSIZE prefix, so "                                  \
+             "operands change width")                                          \
+  ENUM_ENTRY(IC_ADSIZE, 3,                                                     \
+             "requires an ADSIZE prefix, so "                                  \
+             "operands change width")                                          \
+  ENUM_ENTRY(IC_OPSIZE_ADSIZE, 4, "requires ADSIZE and OPSIZE prefixes")       \
+  ENUM_ENTRY(IC_XD, 2,                                                         \
+             "may say something about the opcode "                             \
+             "but not the operands")                                           \
+  ENUM_ENTRY(IC_XS, 2,                                                         \
+             "may say something about the opcode "                             \
+             "but not the operands")                                           \
+  ENUM_ENTRY(IC_XD_OPSIZE, 3,                                                  \
+             "requires an OPSIZE prefix, so "                                  \
+             "operands change width")                                          \
+  ENUM_ENTRY(IC_XS_OPSIZE, 3,                                                  \
+             "requires an OPSIZE prefix, so "                                  \
+             "operands change width")                                          \
+  ENUM_ENTRY(IC_XD_ADSIZE, 3,                                                  \
+             "requires an ADSIZE prefix, so "                                  \
+             "operands change width")                                          \
+  ENUM_ENTRY(IC_XS_ADSIZE, 3,                                                  \
+             "requires an ADSIZE prefix, so "                                  \
+             "operands change width")                                          \
+  ENUM_ENTRY(IC_64BIT_REXW, 5,                                                 \
+             "requires a REX.W prefix, so operands "                           \
+             "change width; overrides IC_OPSIZE")                              \
+  ENUM_ENTRY(IC_64BIT_REXW_ADSIZE, 6,                                          \
+             "requires a REX.W prefix and 0x67 "                               \
+             "prefix")                                                         \
+  ENUM_ENTRY(IC_64BIT_OPSIZE, 3, "Just as meaningful as IC_OPSIZE")            \
+  ENUM_ENTRY(IC_64BIT_ADSIZE, 3, "Just as meaningful as IC_ADSIZE")            \
+  ENUM_ENTRY(IC_64BIT_OPSIZE_ADSIZE, 4,                                        \
+             "Just as meaningful as IC_OPSIZE/"                                \
+             "IC_ADSIZE")                                                      \
+  ENUM_ENTRY(IC_64BIT_XD, 6,                                                   \
+             "XD instructions are SSE; REX.W is "                              \
+             "secondary")                                                      \
+  ENUM_ENTRY(IC_64BIT_XS, 6, "Just as meaningful as IC_64BIT_XD")              \
+  ENUM_ENTRY(IC_64BIT_XD_OPSIZE, 3, "Just as meaningful as IC_XD_OPSIZE")      \
+  ENUM_ENTRY(IC_64BIT_XS_OPSIZE, 3, "Just as meaningful as IC_XS_OPSIZE")      \
+  ENUM_ENTRY(IC_64BIT_XD_ADSIZE, 3, "Just as meaningful as IC_XD_ADSIZE")      \
+  ENUM_ENTRY(IC_64BIT_XS_ADSIZE, 3, "Just as meaningful as IC_XS_ADSIZE")      \
+  ENUM_ENTRY(IC_64BIT_REXW_XS, 7,                                              \
+             "OPSIZE could mean a different "                                  \
+             "opcode")                                                         \
+  ENUM_ENTRY(IC_64BIT_REXW_XD, 7,                                              \
+             "Just as meaningful as "                                          \
+             "IC_64BIT_REXW_XS")                                               \
+  ENUM_ENTRY(IC_64BIT_REXW_OPSIZE, 8,                                          \
+             "The Dynamic Duo!  Prefer over all "                              \
+             "else because this changes most "                                 \
+             "operands' meaning")                                              \
+  ENUM_ENTRY(IC_64BIT_REX2, 2, "requires a REX2 prefix")                       \
+  ENUM_ENTRY(IC_VEX, 1, "requires a VEX prefix")                               \
+  ENUM_ENTRY(IC_VEX_XS, 2, "requires VEX and the XS prefix")                   \
+  ENUM_ENTRY(IC_VEX_XD, 2, "requires VEX and the XD prefix")                   \
+  ENUM_ENTRY(IC_VEX_OPSIZE, 2, "requires VEX and the OpSize prefix")           \
+  ENUM_ENTRY(IC_VEX_W, 3, "requires VEX and the W prefix")                     \
+  ENUM_ENTRY(IC_VEX_W_XS, 4, "requires VEX, W, and XS prefix")                 \
+  ENUM_ENTRY(IC_VEX_W_XD, 4, "requires VEX, W, and XD prefix")                 \
+  ENUM_ENTRY(IC_VEX_W_OPSIZE, 4, "requires VEX, W, and OpSize")                \
+  ENUM_ENTRY(IC_VEX_L, 3, "requires VEX and the L prefix")                     \
+  ENUM_ENTRY(IC_VEX_L_XS, 4, "requires VEX and the L and XS prefix")           \
+  ENUM_ENTRY(IC_VEX_L_XD, 4, "requires VEX and the L and XD prefix")           \
+  ENUM_ENTRY(IC_VEX_L_OPSIZE, 4, "requires VEX, L, and OpSize")                \
+  ENUM_ENTRY(IC_VEX_L_W, 4, "requires VEX, L and W")                           \
+  ENUM_ENTRY(IC_VEX_L_W_XS, 5, "requires VEX, L, W and XS prefix")             \
+  ENUM_ENTRY(IC_VEX_L_W_XD, 5, "requires VEX, L, W and XD prefix")             \
+  ENUM_ENTRY(IC_VEX_L_W_OPSIZE, 5, "requires VEX, L, W and OpSize")            \
+  ENUM_ENTRY(IC_EVEX, 1, "requires an EVEX prefix")                            \
+  ENUM_ENTRY(IC_EVEX_XS, 2, "requires EVEX and the XS prefix")                 \
+  ENUM_ENTRY(IC_EVEX_XD, 2, "requires EVEX and the XD prefix")                 \
+  ENUM_ENTRY(IC_EVEX_OPSIZE, 2, "requires EVEX and the OpSize prefix")         \
+  ENUM_ENTRY(IC_EVEX_W, 3, "requires EVEX and the W prefix")                   \
+  ENUM_ENTRY(IC_EVEX_W_XS, 4, "requires EVEX, W, and XS prefix")               \
+  ENUM_ENTRY(IC_EVEX_W_XD, 4, "requires EVEX, W, and XD prefix")               \
+  ENUM_ENTRY(IC_EVEX_W_OPSIZE, 4, "requires EVEX, W, and OpSize")              \
+  ENUM_ENTRY(IC_EVEX_L, 3, "requires EVEX and the L prefix")                   \
+  ENUM_ENTRY(IC_EVEX_L_XS, 4, "requires EVEX and the L and XS prefix")         \
+  ENUM_ENTRY(IC_EVEX_L_XD, 4, "requires EVEX and the L and XD prefix")         \
+  ENUM_ENTRY(IC_EVEX_L_OPSIZE, 4, "requires EVEX, L, and OpSize")              \
+  ENUM_ENTRY(IC_EVEX_L_W, 3, "requires EVEX, L and W")                         \
+  ENUM_ENTRY(IC_EVEX_L_W_XS, 4, "requires EVEX, L, W and XS prefix")           \
+  ENUM_ENTRY(IC_EVEX_L_W_XD, 4, "requires EVEX, L, W and XD prefix")           \
+  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE, 4, "requires EVEX, L, W and OpSize")          \
+  ENUM_ENTRY(IC_EVEX_L2, 3, "requires EVEX and the L2 prefix")                 \
+  ENUM_ENTRY(IC_EVEX_L2_XS, 4, "requires EVEX and the L2 and XS prefix")       \
+  ENUM_ENTRY(IC_EVEX_L2_XD, 4, "requires EVEX and the L2 and XD prefix")       \
+  ENUM_ENTRY(IC_EVEX_L2_OPSIZE, 4, "requires EVEX, L2, and OpSize")            \
+  ENUM_ENTRY(IC_EVEX_L2_W, 3, "requires EVEX, L2 and W")                       \
+  ENUM_ENTRY(IC_EVEX_L2_W_XS, 4, "requires EVEX, L2, W and XS prefix")         \
+  ENUM_ENTRY(IC_EVEX_L2_W_XD, 4, "requires EVEX, L2, W and XD prefix")         \
+  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE, 4, "requires EVEX, L2, W and OpSize")        \
+  ENUM_ENTRY(IC_EVEX_K, 1, "requires an EVEX_K prefix")                        \
+  ENUM_ENTRY(IC_EVEX_XS_K, 2, "requires EVEX_K and the XS prefix")             \
+  ENUM_ENTRY(IC_EVEX_XD_K, 2, "requires EVEX_K and the XD prefix")             \
+  ENUM_ENTRY(IC_EVEX_OPSIZE_K, 2, "requires EVEX_K and the OpSize prefix")     \
+  ENUM_ENTRY(IC_EVEX_W_K, 3, "requires EVEX_K and the W prefix")               \
+  ENUM_ENTRY(IC_EVEX_W_XS_K, 4, "requires EVEX_K, W, and XS prefix")           \
+  ENUM_ENTRY(IC_EVEX_W_XD_K, 4, "requires EVEX_K, W, and XD prefix")           \
+  ENUM_ENTRY(IC_EVEX_W_OPSIZE_K, 4, "requires EVEX_K, W, and OpSize")          \
+  ENUM_ENTRY(IC_EVEX_L_K, 3, "requires EVEX_K and the L prefix")               \
+  ENUM_ENTRY(IC_EVEX_L_XS_K, 4, "requires EVEX_K and the L and XS prefix")     \
+  ENUM_ENTRY(IC_EVEX_L_XD_K, 4, "requires EVEX_K and the L and XD prefix")     \
+  ENUM_ENTRY(IC_EVEX_L_OPSIZE_K, 4, "requires EVEX_K, L, and OpSize")          \
+  ENUM_ENTRY(IC_EVEX_L_W_K, 3, "requires EVEX_K, L and W")                     \
+  ENUM_ENTRY(IC_EVEX_L_W_XS_K, 4, "requires EVEX_K, L, W and XS prefix")       \
+  ENUM_ENTRY(IC_EVEX_L_W_XD_K, 4, "requires EVEX_K, L, W and XD prefix")       \
+  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_K, 4, "requires EVEX_K, L, W and OpSize")      \
+  ENUM_ENTRY(IC_EVEX_L2_K, 3, "requires EVEX_K and the L2 prefix")             \
+  ENUM_ENTRY(IC_EVEX_L2_XS_K, 4, "requires EVEX_K and the L2 and XS prefix")   \
+  ENUM_ENTRY(IC_EVEX_L2_XD_K, 4, "requires EVEX_K and the L2 and XD prefix")   \
+  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_K, 4, "requires EVEX_K, L2, and OpSize")        \
+  ENUM_ENTRY(IC_EVEX_L2_W_K, 3, "requires EVEX_K, L2 and W")                   \
+  ENUM_ENTRY(IC_EVEX_L2_W_XS_K, 4, "requires EVEX_K, L2, W and XS prefix")     \
+  ENUM_ENTRY(IC_EVEX_L2_W_XD_K, 4, "requires EVEX_K, L2, W and XD prefix")     \
+  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_K, 4, "requires EVEX_K, L2, W and OpSize")    \
+  ENUM_ENTRY(IC_EVEX_B, 1, "requires an EVEX_B prefix")                        \
+  ENUM_ENTRY(IC_EVEX_XS_B, 2, "requires EVEX_B and the XS prefix")             \
+  ENUM_ENTRY(IC_EVEX_XD_B, 2, "requires EVEX_B and the XD prefix")             \
+  ENUM_ENTRY(IC_EVEX_OPSIZE_B, 2, "requires EVEX_B and the OpSize prefix")     \
+  ENUM_ENTRY(IC_EVEX_W_B, 3, "requires EVEX_B and the W prefix")               \
+  ENUM_ENTRY(IC_EVEX_W_XS_B, 4, "requires EVEX_B, W, and XS prefix")           \
+  ENUM_ENTRY(IC_EVEX_W_XD_B, 4, "requires EVEX_B, W, and XD prefix")           \
+  ENUM_ENTRY(IC_EVEX_W_OPSIZE_B, 4, "requires EVEX_B, W, and OpSize")          \
+  ENUM_ENTRY(IC_EVEX_L_B, 3, "requires EVEX_B and the L prefix")               \
+  ENUM_ENTRY(IC_EVEX_L_XS_B, 4, "requires EVEX_B and the L and XS prefix")     \
+  ENUM_ENTRY(IC_EVEX_L_XD_B, 4, "requires EVEX_B and the L and XD prefix")     \
+  ENUM_ENTRY(IC_EVEX_L_OPSIZE_B, 4, "requires EVEX_B, L, and OpSize")          \
+  ENUM_ENTRY(IC_EVEX_L_W_B, 3, "requires EVEX_B, L and W")                     \
+  ENUM_ENTRY(IC_EVEX_L_W_XS_B, 4, "requires EVEX_B, L, W and XS prefix")       \
+  ENUM_ENTRY(IC_EVEX_L_W_XD_B, 4, "requires EVEX_B, L, W and XD prefix")       \
+  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_B, 4, "requires EVEX_B, L, W and OpSize")      \
+  ENUM_ENTRY(IC_EVEX_L2_B, 3, "requires EVEX_B and the L2 prefix")             \
+  ENUM_ENTRY(IC_EVEX_L2_XS_B, 4, "requires EVEX_B and the L2 and XS prefix")   \
+  ENUM_ENTRY(IC_EVEX_L2_XD_B, 4, "requires EVEX_B and the L2 and XD prefix")   \
+  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_B, 4, "requires EVEX_B, L2, and OpSize")        \
+  ENUM_ENTRY(IC_EVEX_L2_W_B, 3, "requires EVEX_B, L2 and W")                   \
+  ENUM_ENTRY(IC_EVEX_L2_W_XS_B, 4, "requires EVEX_B, L2, W and XS prefix")     \
+  ENUM_ENTRY(IC_EVEX_L2_W_XD_B, 4, "requires EVEX_B, L2, W and XD prefix")     \
+  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_B, 4, "requires EVEX_B, L2, W and OpSize")    \
+  ENUM_ENTRY(IC_EVEX_K_B, 1, "requires EVEX_B and EVEX_K prefix")              \
+  ENUM_ENTRY(IC_EVEX_XS_K_B, 2, "requires EVEX_B, EVEX_K and the XS prefix")   \
+  ENUM_ENTRY(IC_EVEX_XD_K_B, 2, "requires EVEX_B, EVEX_K and the XD prefix")   \
+  ENUM_ENTRY(IC_EVEX_OPSIZE_K_B, 2,                                            \
+             "requires EVEX_B, EVEX_K and the OpSize prefix")                  \
+  ENUM_ENTRY(IC_EVEX_W_K_B, 3, "requires EVEX_B, EVEX_K and the W prefix")     \
+  ENUM_ENTRY(IC_EVEX_W_XS_K_B, 4, "requires EVEX_B, EVEX_K, W, and XS prefix") \
+  ENUM_ENTRY(IC_EVEX_W_XD_K_B, 4, "requires EVEX_B, EVEX_K, W, and XD prefix") \
+  ENUM_ENTRY(IC_EVEX_W_OPSIZE_K_B, 4,                                          \
+             "requires EVEX_B, EVEX_K, W, and OpSize")                         \
+  ENUM_ENTRY(IC_EVEX_L_K_B, 3, "requires EVEX_B, EVEX_K and the L prefix")     \
+  ENUM_ENTRY(IC_EVEX_L_XS_K_B, 4,                                              \
+             "requires EVEX_B, EVEX_K and the L and XS prefix")                \
+  ENUM_ENTRY(IC_EVEX_L_XD_K_B, 4,                                              \
+             "requires EVEX_B, EVEX_K and the L and XD prefix")                \
+  ENUM_ENTRY(IC_EVEX_L_OPSIZE_K_B, 4,                                          \
+             "requires EVEX_B, EVEX_K, L, and OpSize")                         \
+  ENUM_ENTRY(IC_EVEX_L_W_K_B, 3, "requires EVEX_B, EVEX_K, L and W")           \
+  ENUM_ENTRY(IC_EVEX_L_W_XS_K_B, 4,                                            \
+             "requires EVEX_B, EVEX_K, L, W and XS prefix")                    \
+  ENUM_ENTRY(IC_EVEX_L_W_XD_K_B, 4,                                            \
+             "requires EVEX_B, EVEX_K, L, W and XD prefix")                    \
+  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_K_B, 4,                                        \
+             "requires EVEX_B, EVEX_K, L, W and OpSize")                       \
+  ENUM_ENTRY(IC_EVEX_L2_K_B, 3, "requires EVEX_B, EVEX_K and the L2 prefix")   \
+  ENUM_ENTRY(IC_EVEX_L2_XS_K_B, 4,                                             \
+             "requires EVEX_B, EVEX_K and the L2 and XS prefix")               \
+  ENUM_ENTRY(IC_EVEX_L2_XD_K_B, 4,                                             \
+             "requires EVEX_B, EVEX_K and the L2 and XD prefix")               \
+  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_K_B, 4,                                         \
+             "requires EVEX_B, EVEX_K, L2, and OpSize")                        \
+  ENUM_ENTRY(IC_EVEX_L2_W_K_B, 3, "requires EVEX_B, EVEX_K, L2 and W")         \
+  ENUM_ENTRY(IC_EVEX_L2_W_XS_K_B, 4,                                           \
+             "requires EVEX_B, EVEX_K, L2, W and XS prefix")                   \
+  ENUM_ENTRY(IC_EVEX_L2_W_XD_K_B, 4,                                           \
+             "requires EVEX_B, EVEX_K, L2, W and XD prefix")                   \
+  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_K_B, 4,                                       \
+             "requires EVEX_B, EVEX_K, L2, W and OpSize")                      \
+  ENUM_ENTRY(IC_EVEX_KZ_B, 1, "requires EVEX_B and EVEX_KZ prefix")            \
+  ENUM_ENTRY(IC_EVEX_XS_KZ_B, 2, "requires EVEX_B, EVEX_KZ and the XS prefix") \
+  ENUM_ENTRY(IC_EVEX_XD_KZ_B, 2, "requires EVEX_B, EVEX_KZ and the XD prefix") \
+  ENUM_ENTRY(IC_EVEX_OPSIZE_KZ_B, 2,                                           \
+             "requires EVEX_B, EVEX_KZ and the OpSize prefix")                 \
+  ENUM_ENTRY(IC_EVEX_W_KZ_B, 3, "requires EVEX_B, EVEX_KZ and the W prefix")   \
+  ENUM_ENTRY(IC_EVEX_W_XS_KZ_B, 4,                                             \
+             "requires EVEX_B, EVEX_KZ, W, and XS prefix")                     \
+  ENUM_ENTRY(IC_EVEX_W_XD_KZ_B, 4,                                             \
+             "requires EVEX_B, EVEX_KZ, W, and XD prefix")                     \
+  ENUM_ENTRY(IC_EVEX_W_OPSIZE_KZ_B, 4,                                         \
+             "requires EVEX_B, EVEX_KZ, W, and OpSize")                        \
+  ENUM_ENTRY(IC_EVEX_L_KZ_B, 3, "requires EVEX_B, EVEX_KZ and the L prefix")   \
+  ENUM_ENTRY(IC_EVEX_L_XS_KZ_B, 4,                                             \
+             "requires EVEX_B, EVEX_KZ and the L and XS prefix")               \
+  ENUM_ENTRY(IC_EVEX_L_XD_KZ_B, 4,                                             \
+             "requires EVEX_B, EVEX_KZ and the L and XD prefix")               \
+  ENUM_ENTRY(IC_EVEX_L_OPSIZE_KZ_B, 4,                                         \
+             "requires EVEX_B, EVEX_KZ, L, and OpSize")                        \
+  ENUM_ENTRY(IC_EVEX_L_W_KZ_B, 3, "requires EVEX_B, EVEX_KZ, L and W")         \
+  ENUM_ENTRY(IC_EVEX_L_W_XS_KZ_B, 4,                                           \
+             "requires EVEX_B, EVEX_KZ, L, W and XS prefix")                   \
+  ENUM_ENTRY(IC_EVEX_L_W_XD_KZ_B, 4,                                           \
+             "requires EVEX_B, EVEX_KZ, L, W and XD prefix")                   \
+  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_KZ_B, 4,                                       \
+             "requires EVEX_B, EVEX_KZ, L, W and OpSize")                      \
+  ENUM_ENTRY(IC_EVEX_L2_KZ_B, 3, "requires EVEX_B, EVEX_KZ and the L2 prefix") \
+  ENUM_ENTRY(IC_EVEX_L2_XS_KZ_B, 4,                                            \
+             "requires EVEX_B, EVEX_KZ and the L2 and XS prefix")              \
+  ENUM_ENTRY(IC_EVEX_L2_XD_KZ_B, 4,                                            \
+             "requires EVEX_B, EVEX_KZ and the L2 and XD prefix")              \
+  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_KZ_B, 4,                                        \
+             "requires EVEX_B, EVEX_KZ, L2, and OpSize")                       \
+  ENUM_ENTRY(IC_EVEX_L2_W_KZ_B, 3, "requires EVEX_B, EVEX_KZ, L2 and W")       \
+  ENUM_ENTRY(IC_EVEX_L2_W_XS_KZ_B, 4,                                          \
+             "requires EVEX_B, EVEX_KZ, L2, W and XS prefix")                  \
+  ENUM_ENTRY(IC_EVEX_L2_W_XD_KZ_B, 4,                                          \
+             "requires EVEX_B, EVEX_KZ, L2, W and XD prefix")                  \
+  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_KZ_B, 4,                                      \
+             "requires EVEX_B, EVEX_KZ, L2, W and OpSize")                     \
+  ENUM_ENTRY(IC_EVEX_KZ, 1, "requires an EVEX_KZ prefix")                      \
+  ENUM_ENTRY(IC_EVEX_XS_KZ, 2, "requires EVEX_KZ and the XS prefix")           \
+  ENUM_ENTRY(IC_EVEX_XD_KZ, 2, "requires EVEX_KZ and the XD prefix")           \
+  ENUM_ENTRY(IC_EVEX_OPSIZE_KZ, 2, "requires EVEX_KZ and the OpSize prefix")   \
+  ENUM_ENTRY(IC_EVEX_W_KZ, 3, "requires EVEX_KZ and the W prefix")             \
+  ENUM_ENTRY(IC_EVEX_W_XS_KZ, 4, "requires EVEX_KZ, W, and XS prefix")         \
+  ENUM_ENTRY(IC_EVEX_W_XD_KZ, 4, "requires EVEX_KZ, W, and XD prefix")         \
+  ENUM_ENTRY(IC_EVEX_W_OPSIZE_KZ, 4, "requires EVEX_KZ, W, and OpSize")        \
+  ENUM_ENTRY(IC_EVEX_L_KZ, 3, "requires EVEX_KZ and the L prefix")             \
+  ENUM_ENTRY(IC_EVEX_L_XS_KZ, 4, "requires EVEX_KZ and the L and XS prefix")   \
+  ENUM_ENTRY(IC_EVEX_L_XD_KZ, 4, "requires EVEX_KZ and the L and XD prefix")   \
+  ENUM_ENTRY(IC_EVEX_L_OPSIZE_KZ, 4, "requires EVEX_KZ, L, and OpSize")        \
+  ENUM_ENTRY(IC_EVEX_L_W_KZ, 3, "requires EVEX_KZ, L and W")                   \
+  ENUM_ENTRY(IC_EVEX_L_W_XS_KZ, 4, "requires EVEX_KZ, L, W and XS prefix")     \
+  ENUM_ENTRY(IC_EVEX_L_W_XD_KZ, 4, "requires EVEX_KZ, L, W and XD prefix")     \
+  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_KZ, 4, "requires EVEX_KZ, L, W and OpSize")    \
+  ENUM_ENTRY(IC_EVEX_L2_KZ, 3, "requires EVEX_KZ and the L2 prefix")           \
+  ENUM_ENTRY(IC_EVEX_L2_XS_KZ, 4, "requires EVEX_KZ and the L2 and XS prefix") \
+  ENUM_ENTRY(IC_EVEX_L2_XD_KZ, 4, "requires EVEX_KZ and the L2 and XD prefix") \
+  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_KZ, 4, "requires EVEX_KZ, L2, and OpSize")      \
+  ENUM_ENTRY(IC_EVEX_L2_W_KZ, 3, "requires EVEX_KZ, L2 and W")                 \
+  ENUM_ENTRY(IC_EVEX_L2_W_XS_KZ, 4, "requires EVEX_KZ, L2, W and XS prefix")   \
+  ENUM_ENTRY(IC_EVEX_L2_W_XD_KZ, 4, "requires EVEX_KZ, L2, W and XD prefix")   \
+  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_KZ, 4, "requires EVEX_KZ, L2, W and OpSize")
 
 #define ENUM_ENTRY(n, r, d) n,
 enum InstructionContext {
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
index f6ae7c6fa7..6eb6fb5b3b 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
@@ -584,32 +584,32 @@ namespace X86II {
     // PseudoFrm - This represents an instruction that is a pseudo instruction
     // or one that has not been implemented yet.  It is illegal to code generate
     // it, but tolerated for intermediate implementation stages.
-    Pseudo         = 0,
+    Pseudo = 0,
 
     /// Raw - This form is for instructions that don't have any operands, so
     /// they are just a fixed opcode value, like 'leave'.
-    RawFrm         = 1,
+    RawFrm = 1,
 
     /// AddRegFrm - This form is used for instructions like 'push r32' that have
     /// their one register operand added to their opcode.
-    AddRegFrm      = 2,
+    AddRegFrm = 2,
 
     /// RawFrmMemOffs - This form is for instructions that store an absolute
     /// memory offset as an immediate with a possible segment override.
-    RawFrmMemOffs  = 3,
+    RawFrmMemOffs = 3,
 
     /// RawFrmSrc - This form is for instructions that use the source index
     /// register SI/ESI/RSI with a possible segment override.
-    RawFrmSrc      = 4,
+    RawFrmSrc = 4,
 
     /// RawFrmDst - This form is for instructions that use the destination index
     /// register DI/EDI/RDI.
-    RawFrmDst      = 5,
+    RawFrmDst = 5,
 
     /// RawFrmDstSrc - This form is for instructions that use the source index
     /// register SI/ESI/RSI with a possible segment override, and also the
     /// destination index register DI/EDI/RDI.
-    RawFrmDstSrc   = 6,
+    RawFrmDstSrc = 6,
 
     /// RawFrmImm8 - This is used for the ENTER instruction, which has two
     /// immediates, the first of which is a 16-bit immediate (specified by
@@ -630,7 +630,8 @@ namespace X86II {
     /// byte like data16 or rep.
     PrefixByte = 10,
 
-    /// MRMDestMem4VOp3CC - This form is used for instructions that use the Mod/RM
+    /// MRMDestMem4VOp3CC - This form is used for instructions that use the
+    /// Mod/RM
     /// byte to specify a destination which in this case is memory and operand 3
     /// with VEX.VVVV, and also encodes a condition code.
     MRMDestMem4VOp3CC = 20,
@@ -644,7 +645,7 @@ namespace X86II {
     MRMr0 = 21,
 
     /// MRMSrcMem - But force to use the SIB field.
-    MRMSrcMemFSIB  = 22,
+    MRMSrcMemFSIB = 22,
 
     /// MRMDestMem - But force to use the SIB field.
     MRMDestMemFSIB = 23,
@@ -652,12 +653,12 @@ namespace X86II {
     /// MRMDestMem - This form is used for instructions that use the Mod/RM byte
     /// to specify a destination, which in this case is memory.
     ///
-    MRMDestMem     = 24,
+    MRMDestMem = 24,
 
     /// MRMSrcMem - This form is used for instructions that use the Mod/RM byte
     /// to specify a source, which in this case is memory.
     ///
-    MRMSrcMem      = 25,
+    MRMSrcMem = 25,
 
     /// MRMSrcMem4VOp3 - This form is used for instructions that encode
     /// operand 3 with VEX.VVVV and load from memory.
@@ -667,12 +668,12 @@ namespace X86II {
     /// MRMSrcMemOp4 - This form is used for instructions that use the Mod/RM
     /// byte to specify the fourth source, which in this case is memory.
     ///
-    MRMSrcMemOp4   = 27,
+    MRMSrcMemOp4 = 27,
 
     /// MRMSrcMemCC - This form is used for instructions that use the Mod/RM
     /// byte to specify the operands and also encodes a condition code.
     ///
-    MRMSrcMemCC    = 28,
+    MRMSrcMemCC = 28,
 
     /// MRMXm - This form is used for instructions that use the Mod/RM byte
     /// to specify a memory source, but doesn't use the middle field. And has
@@ -686,18 +687,24 @@ namespace X86II {
     MRMXm = 31,
 
     // Next, instructions that operate on a memory r/m operand...
-    MRM0m = 32,  MRM1m = 33,  MRM2m = 34,  MRM3m = 35, // Format /0 /1 /2 /3
-    MRM4m = 36,  MRM5m = 37,  MRM6m = 38,  MRM7m = 39, // Format /4 /5 /6 /7
+    MRM0m = 32,
+    MRM1m = 33,
+    MRM2m = 34,
+    MRM3m = 35, // Format /0 /1 /2 /3
+    MRM4m = 36,
+    MRM5m = 37,
+    MRM6m = 38,
+    MRM7m = 39, // Format /4 /5 /6 /7
 
     /// MRMDestReg - This form is used for instructions that use the Mod/RM byte
     /// to specify a destination, which in this case is a register.
     ///
-    MRMDestReg     = 40,
+    MRMDestReg = 40,
 
     /// MRMSrcReg - This form is used for instructions that use the Mod/RM byte
     /// to specify a source, which in this case is a register.
     ///
-    MRMSrcReg      = 41,
+    MRMSrcReg = 41,
 
     /// MRMSrcReg4VOp3 - This form is used for instructions that encode
     /// operand 3 with VEX.VVVV and do not load from memory.
@@ -707,12 +714,12 @@ namespace X86II {
     /// MRMSrcRegOp4 - This form is used for instructions that use the Mod/RM
     /// byte to specify the fourth source, which in this case is a register.
     ///
-    MRMSrcRegOp4   = 43,
+    MRMSrcRegOp4 = 43,
 
     /// MRMSrcRegCC - This form is used for instructions that use the Mod/RM
     /// byte to specify the operands and also encodes a condition code
     ///
-    MRMSrcRegCC    = 44,
+    MRMSrcRegCC = 44,
 
     /// MRMXCCr - This form is used for instructions that use the Mod/RM byte
     /// to specify a register source, but doesn't use the middle field. And has
@@ -726,32 +733,92 @@ namespace X86II {
     MRMXr = 47,
 
     // Instructions that operate on a register r/m operand...
-    MRM0r = 48,  MRM1r = 49,  MRM2r = 50,  MRM3r = 51, // Format /0 /1 /2 /3
-    MRM4r = 52,  MRM5r = 53,  MRM6r = 54,  MRM7r = 55, // Format /4 /5 /6 /7
+    MRM0r = 48,
+    MRM1r = 49,
+    MRM2r = 50,
+    MRM3r = 51, // Format /0 /1 /2 /3
+    MRM4r = 52,
+    MRM5r = 53,
+    MRM6r = 54,
+    MRM7r = 55, // Format /4 /5 /6 /7
 
     // Instructions that operate that have mod=11 and an opcode but ignore r/m.
-    MRM0X = 56,  MRM1X = 57,  MRM2X = 58,  MRM3X = 59, // Format /0 /1 /2 /3
-    MRM4X = 60,  MRM5X = 61,  MRM6X = 62,  MRM7X = 63, // Format /4 /5 /6 /7
+    MRM0X = 56,
+    MRM1X = 57,
+    MRM2X = 58,
+    MRM3X = 59, // Format /0 /1 /2 /3
+    MRM4X = 60,
+    MRM5X = 61,
+    MRM6X = 62,
+    MRM7X = 63, // Format /4 /5 /6 /7
 
     /// MRM_XX - A mod/rm byte of exactly 0xXX.
-    MRM_C0 = 64,  MRM_C1 = 65,  MRM_C2 = 66,  MRM_C3 = 67,
-    MRM_C4 = 68,  MRM_C5 = 69,  MRM_C6 = 70,  MRM_C7 = 71,
-    MRM_C8 = 72,  MRM_C9 = 73,  MRM_CA = 74,  MRM_CB = 75,
-    MRM_CC = 76,  MRM_CD = 77,  MRM_CE = 78,  MRM_CF = 79,
-    MRM_D0 = 80,  MRM_D1 = 81,  MRM_D2 = 82,  MRM_D3 = 83,
-    MRM_D4 = 84,  MRM_D5 = 85,  MRM_D6 = 86,  MRM_D7 = 87,
-    MRM_D8 = 88,  MRM_D9 = 89,  MRM_DA = 90,  MRM_DB = 91,
-    MRM_DC = 92,  MRM_DD = 93,  MRM_DE = 94,  MRM_DF = 95,
-    MRM_E0 = 96,  MRM_E1 = 97,  MRM_E2 = 98,  MRM_E3 = 99,
-    MRM_E4 = 100, MRM_E5 = 101, MRM_E6 = 102, MRM_E7 = 103,
-    MRM_E8 = 104, MRM_E9 = 105, MRM_EA = 106, MRM_EB = 107,
-    MRM_EC = 108, MRM_ED = 109, MRM_EE = 110, MRM_EF = 111,
-    MRM_F0 = 112, MRM_F1 = 113, MRM_F2 = 114, MRM_F3 = 115,
-    MRM_F4 = 116, MRM_F5 = 117, MRM_F6 = 118, MRM_F7 = 119,
-    MRM_F8 = 120, MRM_F9 = 121, MRM_FA = 122, MRM_FB = 123,
-    MRM_FC = 124, MRM_FD = 125, MRM_FE = 126, MRM_FF = 127,
-
-    FormMask       = 127,
+    MRM_C0 = 64,
+    MRM_C1 = 65,
+    MRM_C2 = 66,
+    MRM_C3 = 67,
+    MRM_C4 = 68,
+    MRM_C5 = 69,
+    MRM_C6 = 70,
+    MRM_C7 = 71,
+    MRM_C8 = 72,
+    MRM_C9 = 73,
+    MRM_CA = 74,
+    MRM_CB = 75,
+    MRM_CC = 76,
+    MRM_CD = 77,
+    MRM_CE = 78,
+    MRM_CF = 79,
+    MRM_D0 = 80,
+    MRM_D1 = 81,
+    MRM_D2 = 82,
+    MRM_D3 = 83,
+    MRM_D4 = 84,
+    MRM_D5 = 85,
+    MRM_D6 = 86,
+    MRM_D7 = 87,
+    MRM_D8 = 88,
+    MRM_D9 = 89,
+    MRM_DA = 90,
+    MRM_DB = 91,
+    MRM_DC = 92,
+    MRM_DD = 93,
+    MRM_DE = 94,
+    MRM_DF = 95,
+    MRM_E0 = 96,
+    MRM_E1 = 97,
+    MRM_E2 = 98,
+    MRM_E3 = 99,
+    MRM_E4 = 100,
+    MRM_E5 = 101,
+    MRM_E6 = 102,
+    MRM_E7 = 103,
+    MRM_E8 = 104,
+    MRM_E9 = 105,
+    MRM_EA = 106,
+    MRM_EB = 107,
+    MRM_EC = 108,
+    MRM_ED = 109,
+    MRM_EE = 110,
+    MRM_EF = 111,
+    MRM_F0 = 112,
+    MRM_F1 = 113,
+    MRM_F2 = 114,
+    MRM_F3 = 115,
+    MRM_F4 = 116,
+    MRM_F5 = 117,
+    MRM_F6 = 118,
+    MRM_F7 = 119,
+    MRM_F8 = 120,
+    MRM_F9 = 121,
+    MRM_FA = 122,
+    MRM_FB = 123,
+    MRM_FC = 124,
+    MRM_FD = 125,
+    MRM_FE = 126,
+    MRM_FF = 127,
+
+    FormMask = 127,
 
     //===------------------------------------------------------------------===//
     // Actual flags...
@@ -763,18 +830,18 @@ namespace X86II {
     OpSizeShift = 7,
     OpSizeMask = 0x3 << OpSizeShift,
 
-    OpSizeFixed  = 0 << OpSizeShift,
-    OpSize16     = 1 << OpSizeShift,
-    OpSize32     = 2 << OpSizeShift,
+    OpSizeFixed = 0 << OpSizeShift,
+    OpSize16 = 1 << OpSizeShift,
+    OpSize32 = 2 << OpSizeShift,
 
     // AsSize - AdSizeX implies this instruction determines its need of 0x67
     // prefix from a normal ModRM memory operand. The other types indicate that
     // an operand is encoded with a specific width and a prefix is needed if
     // it differs from the current mode.
     AdSizeShift = OpSizeShift + 2,
-    AdSizeMask  = 0x3 << AdSizeShift,
+    AdSizeMask = 0x3 << AdSizeShift,
 
-    AdSizeX  = 0 << AdSizeShift,
+    AdSizeX = 0 << AdSizeShift,
     AdSize16 = 1 << AdSizeShift,
     AdSize32 = 2 << AdSizeShift,
     AdSize64 = 3 << AdSizeShift,
@@ -785,7 +852,7 @@ namespace X86II {
     // no prefix.
     //
     OpPrefixShift = AdSizeShift + 2,
-    OpPrefixMask  = 0x3 << OpPrefixShift,
+    OpPrefixMask = 0x3 << OpPrefixShift,
 
     // PD - Prefix code for packed double precision vector floating point
     // operations performed in the SSE registers.
@@ -793,14 +860,15 @@ namespace X86II {
 
     // XS, XD - These prefix codes are for single and double precision scalar
     // floating point operations performed in the SSE registers.
-    XS = 2 << OpPrefixShift,  XD = 3 << OpPrefixShift,
+    XS = 2 << OpPrefixShift,
+    XD = 3 << OpPrefixShift,
 
     //===------------------------------------------------------------------===//
     // OpMap - This field determines which opcode map this instruction
     // belongs to. i.e. one-byte, two-byte, 0x0f 0x38, 0x0f 0x3a, etc.
     //
     OpMapShift = OpPrefixShift + 2,
-    OpMapMask  = 0xF << OpMapShift,
+    OpMapMask = 0xF << OpMapShift,
 
     // OB - OneByte - Set if this instruction has a one byte opcode.
     OB = 0 << OpMapShift,
@@ -810,7 +878,8 @@ namespace X86II {
     TB = 1 << OpMapShift,
 
     // T8, TA - Prefix after the 0x0F prefix.
-    T8 = 2 << OpMapShift,  TA = 3 << OpMapShift,
+    T8 = 2 << OpMapShift,
+    TA = 3 << OpMapShift,
 
     // XOP8 - Prefix to include use of imm byte.
     XOP8 = 4 << OpMapShift,
@@ -840,39 +909,39 @@ namespace X86II {
     // etc. We only cares about REX.W and REX.R bits and only the former is
     // statically determined.
     //
-    REXShift    = OpMapShift + 4,
-    REX_W       = 1 << REXShift,
+    REXShift = OpMapShift + 4,
+    REX_W = 1 << REXShift,
 
     //===------------------------------------------------------------------===//
     // This three-bit field describes the size of an immediate operand.  Zero is
     // unused so that we can tell if we forgot to set a value.
     ImmShift = REXShift + 1,
-    ImmMask    = 15 << ImmShift,
-    Imm8       = 1 << ImmShift,
-    Imm8PCRel  = 2 << ImmShift,
-    Imm8Reg    = 3 << ImmShift,
-    Imm16      = 4 << ImmShift,
+    ImmMask = 15 << ImmShift,
+    Imm8 = 1 << ImmShift,
+    Imm8PCRel = 2 << ImmShift,
+    Imm8Reg = 3 << ImmShift,
+    Imm16 = 4 << ImmShift,
     Imm16PCRel = 5 << ImmShift,
-    Imm32      = 6 << ImmShift,
+    Imm32 = 6 << ImmShift,
     Imm32PCRel = 7 << ImmShift,
-    Imm32S     = 8 << ImmShift,
-    Imm64      = 9 << ImmShift,
+    Imm32S = 8 << ImmShift,
+    Imm64 = 9 << ImmShift,
 
     //===------------------------------------------------------------------===//
     // FP Instruction Classification...  Zero is non-fp instruction.
 
     // FPTypeMask - Mask for all of the FP types...
     FPTypeShift = ImmShift + 4,
-    FPTypeMask  = 7 << FPTypeShift,
+    FPTypeMask = 7 << FPTypeShift,
 
     // NotFP - The default, set for instructions that do not use FP registers.
-    NotFP      = 0 << FPTypeShift,
+    NotFP = 0 << FPTypeShift,
 
     // ZeroArgFP - 0 arg FP instruction which implicitly pushes ST(0), f.e. fld0
-    ZeroArgFP  = 1 << FPTypeShift,
+    ZeroArgFP = 1 << FPTypeShift,
 
     // OneArgFP - 1 arg FP instructions which implicitly read ST(0), such as fst
-    OneArgFP   = 2 << FPTypeShift,
+    OneArgFP = 2 << FPTypeShift,
 
     // OneArgFPRW - 1 arg FP instruction which implicitly read ST(0) and write a
     // result back to ST(0).  For example, fcos, fsqrt, etc.
@@ -882,17 +951,17 @@ namespace X86II {
     // TwoArgFP - 2 arg FP instructions which implicitly read ST(0), and an
     // explicit argument, storing the result to either ST(0) or the implicit
     // argument.  For example: fadd, fsub, fmul, etc...
-    TwoArgFP   = 4 << FPTypeShift,
+    TwoArgFP = 4 << FPTypeShift,
 
     // CompareFP - 2 arg FP instructions which implicitly read ST(0) and an
     // explicit argument, but have no destination.  Example: fucom, fucomi, ...
-    CompareFP  = 5 << FPTypeShift,
+    CompareFP = 5 << FPTypeShift,
 
     // CondMovFP - "2 operand" floating point conditional move instructions.
-    CondMovFP  = 6 << FPTypeShift,
+    CondMovFP = 6 << FPTypeShift,
 
     // SpecialFP - Special instruction forms.  Dispatch by opcode explicitly.
-    SpecialFP  = 7 << FPTypeShift,
+    SpecialFP = 7 << FPTypeShift,
 
     // Lock prefix
     LOCKShift = FPTypeShift + 3,
@@ -923,36 +992,36 @@ namespace X86II {
     EVEX = 3 << EncodingShift,
 
     // Opcode
-    OpcodeShift   = EncodingShift + 2,
+    OpcodeShift = EncodingShift + 2,
 
     /// VEX_4V - Used to specify an additional AVX/SSE register. Several 2
     /// address instructions in SSE are represented as 3 address ones in AVX
     /// and the additional register is encoded in VEX_VVVV prefix.
     VEX_4VShift = OpcodeShift + 8,
-    VEX_4V      = 1ULL << VEX_4VShift,
+    VEX_4V = 1ULL << VEX_4VShift,
 
     /// VEX_L - Stands for a bit in the VEX opcode prefix meaning the current
     /// instruction uses 256-bit wide registers. This is usually auto detected
     /// if a VR256 register is used, but some AVX instructions also have this
     /// field marked when using a f256 memory references.
     VEX_LShift = VEX_4VShift + 1,
-    VEX_L       = 1ULL << VEX_LShift,
+    VEX_L = 1ULL << VEX_LShift,
 
     // EVEX_K - Set if this instruction requires masking
     EVEX_KShift = VEX_LShift + 1,
-    EVEX_K      = 1ULL << EVEX_KShift,
+    EVEX_K = 1ULL << EVEX_KShift,
 
     // EVEX_Z - Set if this instruction has EVEX.Z field set.
     EVEX_ZShift = EVEX_KShift + 1,
-    EVEX_Z      = 1ULL << EVEX_ZShift,
+    EVEX_Z = 1ULL << EVEX_ZShift,
 
     // EVEX_L2 - Set if this instruction has EVEX.L' field set.
     EVEX_L2Shift = EVEX_ZShift + 1,
-    EVEX_L2     = 1ULL << EVEX_L2Shift,
+    EVEX_L2 = 1ULL << EVEX_L2Shift,
 
     // EVEX_B - Set if this instruction has EVEX.B field set.
     EVEX_BShift = EVEX_L2Shift + 1,
-    EVEX_B      = 1ULL << EVEX_BShift,
+    EVEX_B = 1ULL << EVEX_BShift,
 
     // The scaling factor for the AVX512's 8-bit compressed displacement.
     CD8_Scale_Shift = EVEX_BShift + 1,
diff --git a/llvm/utils/TableGen/X86DisassemblerTables.cpp b/llvm/utils/TableGen/X86DisassemblerTables.cpp
index e8dffaa29c..ea27671a0a 100644
--- a/llvm/utils/TableGen/X86DisassemblerTables.cpp
+++ b/llvm/utils/TableGen/X86DisassemblerTables.cpp
@@ -908,8 +908,7 @@ void DisassemblerTables::emitContextTable(raw_ostream &o, unsigned &i) const {
         if (index & ATTR_EVEXB)
           o << "_B";
       }
-    }
-    else if ((index & ATTR_64BIT) && (index & ATTR_REX2))
+    } else if ((index & ATTR_64BIT) && (index & ATTR_REX2))
       o << "IC_64BIT_REX2";
     else if ((index & ATTR_64BIT) && (index & ATTR_REXW) && (index & ATTR_XS))
       o << "IC_64BIT_REXW_XS";

``````````

</details>


https://github.com/llvm/llvm-project/pull/72835


More information about the llvm-commits mailing list