[llvm] Add option to turn off optimization for X86 assembler (PR #75895)

Yi Kong via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 18 21:57:05 PST 2023


https://github.com/kongy created https://github.com/llvm/llvm-project/pull/75895

There are use cases that we are not expecting the assembler to produce the exact instructions without any optimizations.

>From 26fb5866e92de1670db78f714944ff82c46108eb Mon Sep 17 00:00:00 2001
From: Yi Kong <yikong at google.com>
Date: Tue, 19 Dec 2023 14:23:39 +0900
Subject: [PATCH] Add option to turn off optimization for X86 assembler

There are use cases that we are not expecting the assembler to produce
the exact instructions without any optimizations.
---
 .../lib/Target/X86/AsmParser/X86AsmParser.cpp |  8 ++-
 llvm/test/MC/X86/avx-64-att.s                 | 54 ++++++++++++-------
 2 files changed, 42 insertions(+), 20 deletions(-)

diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 1d40ce35c1b416..5390dd94b760d8 100644
--- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -48,6 +48,10 @@ static cl::opt<bool> LVIInlineAsmHardening(
     cl::desc("Harden inline assembly code that may be vulnerable to Load Value"
              " Injection (LVI). This feature is experimental."), cl::Hidden);
 
+static cl::opt<bool> AsmOptimize(
+    "x86-inline-asm-optimize", cl::init(true),
+    cl::desc("Optimize X86 inline assembly code."), cl::Hidden);
+
 static bool checkScale(unsigned Scale, StringRef &ErrMsg) {
   if (Scale != 1 && Scale != 2 && Scale != 4 && Scale != 8) {
     ErrMsg = "scale factor in address must be 1, 2, 4 or 8";
@@ -3670,11 +3674,11 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
 }
 
 bool X86AsmParser::processInstruction(MCInst &Inst, const OperandVector &Ops) {
-  if (ForcedVEXEncoding != VEXEncoding_VEX3 &&
+  if (AsmOptimize && ForcedVEXEncoding != VEXEncoding_VEX3 &&
       X86::optimizeInstFromVEX3ToVEX2(Inst, MII.get(Inst.getOpcode())))
     return true;
 
-  if (X86::optimizeShiftRotateWithImmediateOne(Inst))
+  if (AsmOptimize && X86::optimizeShiftRotateWithImmediateOne(Inst))
     return true;
 
   switch (Inst.getOpcode()) {
diff --git a/llvm/test/MC/X86/avx-64-att.s b/llvm/test/MC/X86/avx-64-att.s
index 39ee048c3736d4..7cdd93891c94cb 100644
--- a/llvm/test/MC/X86/avx-64-att.s
+++ b/llvm/test/MC/X86/avx-64-att.s
@@ -1,4 +1,5 @@
-// RUN: llvm-mc -triple x86_64-unknown-unknown --show-encoding %s | FileCheck %s
+// RUN: llvm-mc -triple x86_64-unknown-unknown --show-encoding %s | FileCheck --check-prefixes=CHECK,OPT %s
+// RUN: llvm-mc -triple x86_64-unknown-unknown --show-encoding -x86-inline-asm-optimize=false %s | FileCheck --check-prefixes=CHECK,NOOPT %s
 
 // CHECK: vaddss  %xmm8, %xmm9, %xmm10
 // CHECK:  encoding: [0xc4,0x41,0x32,0x58,0xd0]
@@ -3168,20 +3169,28 @@ vdivpd  -4(%rcx,%rbx,8), %xmm10, %xmm11
 // CHECK: encoding: [0xc4,0xc1,0x5d,0x5e,0xf4]
           vdivpd  %ymm12, %ymm4, %ymm6
 
-// CHECK: vaddps  %ymm4, %ymm12, %ymm6
-// CHECK: encoding: [0xc5,0x9c,0x58,0xf4]
+// OPT:   vaddps  %ymm4, %ymm12, %ymm6
+// OPT:   encoding: [0xc5,0x9c,0x58,0xf4]
+// NOOPT: vaddps  %ymm12, %ymm4, %ymm6
+// NOOPT: encoding: [0xc4,0xc1,0x5c,0x58,0xf4]
           vaddps  %ymm12, %ymm4, %ymm6
 
-// CHECK: vaddpd  %ymm4, %ymm12, %ymm6
-// CHECK: encoding: [0xc5,0x9d,0x58,0xf4]
+// OPT:   vaddpd  %ymm4, %ymm12, %ymm6
+// OPT:   encoding: [0xc5,0x9d,0x58,0xf4]
+// NOOPT: vaddpd  %ymm12, %ymm4, %ymm6
+// NOOPT: encoding: [0xc4,0xc1,0x5d,0x58,0xf4]
           vaddpd  %ymm12, %ymm4, %ymm6
 
-// CHECK: vmulps  %ymm4, %ymm12, %ymm6
-// CHECK: encoding: [0xc5,0x9c,0x59,0xf4]
+// OPT:   vmulps  %ymm4, %ymm12, %ymm6
+// OPT:   encoding: [0xc5,0x9c,0x59,0xf4]
+// NOOPT: vmulps  %ymm12, %ymm4, %ymm6
+// NOOPT: encoding: [0xc4,0xc1,0x5c,0x59,0xf4]
           vmulps  %ymm12, %ymm4, %ymm6
 
-// CHECK: vmulpd  %ymm4, %ymm12, %ymm6
-// CHECK: encoding: [0xc5,0x9d,0x59,0xf4]
+// OPT:   vmulpd  %ymm4, %ymm12, %ymm6
+// OPT:   encoding: [0xc5,0x9d,0x59,0xf4]
+// NOOPT: vmulpd  %ymm12, %ymm4, %ymm6
+// NOOPT: encoding: [0xc4,0xc1,0x5d,0x59,0xf4]
           vmulpd  %ymm12, %ymm4, %ymm6
 
 // CHECK: vmaxps  (%rax), %ymm4, %ymm6
@@ -4203,7 +4212,8 @@ _foo2:
           {vex3} vmovq %xmm0, %xmm8
 
 // CHECK: vmovq %xmm8, %xmm0
-// CHECK: encoding: [0xc5,0x79,0xd6,0xc0]
+// OPT:   encoding: [0xc5,0x79,0xd6,0xc0]
+// NOOPT: encoding: [0xc4,0xc1,0x7a,0x7e,0xc0]
           vmovq %xmm8, %xmm0
 
 // CHECK: vmovq %xmm8, %xmm0
@@ -4219,7 +4229,8 @@ _foo2:
           {vex3} vmovdqa %xmm0, %xmm8
 
 // CHECK: vmovdqa %xmm8, %xmm0
-// CHECK: encoding: [0xc5,0x79,0x7f,0xc0]
+// OPT:   encoding: [0xc5,0x79,0x7f,0xc0]
+// NOOPT: encoding: [0xc4,0xc1,0x79,0x6f,0xc0]
           vmovdqa %xmm8, %xmm0
 
 // CHECK: vmovdqa %xmm8, %xmm0
@@ -4235,7 +4246,8 @@ _foo2:
           {vex3} vmovdqu %xmm0, %xmm8
 
 // CHECK: vmovdqu %xmm8, %xmm0
-// CHECK: encoding: [0xc5,0x7a,0x7f,0xc0]
+// OPT:   encoding: [0xc5,0x7a,0x7f,0xc0]
+// NOOPT: encoding: [0xc4,0xc1,0x7a,0x6f,0xc0]
           vmovdqu %xmm8, %xmm0
 
 // CHECK: vmovdqu %xmm8, %xmm0
@@ -4251,7 +4263,8 @@ _foo2:
           {vex3} vmovaps %xmm0, %xmm8
 
 // CHECK: vmovaps %xmm8, %xmm0
-// CHECK: encoding: [0xc5,0x78,0x29,0xc0]
+// OPT:   encoding: [0xc5,0x78,0x29,0xc0]
+// NOOPT: encoding: [0xc4,0xc1,0x78,0x28,0xc0]
           vmovaps %xmm8, %xmm0
 
 // CHECK: vmovaps %xmm8, %xmm0
@@ -4267,7 +4280,8 @@ _foo2:
           {vex3} vmovaps %ymm0, %ymm8
 
 // CHECK: vmovaps %ymm8, %ymm0
-// CHECK: encoding: [0xc5,0x7c,0x29,0xc0]
+// OPT:   encoding: [0xc5,0x7c,0x29,0xc0]
+// NOOPT: encoding: [0xc4,0xc1,0x7c,0x28,0xc0]
           vmovaps %ymm8, %ymm0
 
 // CHECK: vmovaps %ymm8, %ymm0
@@ -4283,7 +4297,8 @@ _foo2:
           {vex3} vmovups %xmm0, %xmm8
 
 // CHECK: vmovups %xmm8, %xmm0
-// CHECK: encoding: [0xc5,0x78,0x11,0xc0]
+// OPT:   encoding: [0xc5,0x78,0x11,0xc0]
+// NOOPT: encoding: [0xc4,0xc1,0x78,0x10,0xc0]
           vmovups %xmm8, %xmm0
 
 // CHECK: vmovups %xmm8, %xmm0
@@ -4299,7 +4314,8 @@ _foo2:
           {vex3} vmovups %ymm0, %ymm8
 
 // CHECK: vmovups %ymm8, %ymm0
-// CHECK: encoding: [0xc5,0x7c,0x11,0xc0]
+// OPT:   encoding: [0xc5,0x7c,0x11,0xc0]
+// NOOPT: encoding: [0xc4,0xc1,0x7c,0x10,0xc0]
           vmovups %ymm8, %ymm0
 
 // CHECK: vmovups %ymm8, %ymm0
@@ -4323,7 +4339,8 @@ _foo2:
           {vex3} vmovss %xmm0, %xmm8, %xmm0
 
 // CHECK: vmovss %xmm8, %xmm0, %xmm0
-// CHECK: encoding: [0xc5,0x7a,0x11,0xc0]
+// OPT:   encoding: [0xc5,0x7a,0x11,0xc0]
+// NOOPT: encoding: [0xc4,0xc1,0x7a,0x10,0xc0]
           vmovss %xmm8, %xmm0, %xmm0
 
 // CHECK: vmovss %xmm8, %xmm0, %xmm0
@@ -4347,7 +4364,8 @@ _foo2:
           {vex3} vmovsd %xmm0, %xmm8, %xmm0
 
 // CHECK: vmovsd %xmm8, %xmm0, %xmm0
-// CHECK: encoding: [0xc5,0x7b,0x11,0xc0]
+// OPT:   encoding: [0xc5,0x7b,0x11,0xc0]
+// NOOPT: encoding: [0xc4,0xc1,0x7b,0x10,0xc0]
           vmovsd %xmm8, %xmm0, %xmm0
 
 // CHECK: vmovsd %xmm8, %xmm0, %xmm0



More information about the llvm-commits mailing list