[clang] 15f15ab - [x86][MC] Fix movdir64b addressing

Akshay Khadse via cfe-commits cfe-commits at lists.llvm.org
Thu Jun 8 07:41:51 PDT 2023


Author: Akshay Khadse
Date: 2023-06-08T22:41:00+08:00
New Revision: 15f15ab2c895545da70c14a72289fa7c00ed3f94

URL: https://github.com/llvm/llvm-project/commit/15f15ab2c895545da70c14a72289fa7c00ed3f94
DIFF: https://github.com/llvm/llvm-project/commit/15f15ab2c895545da70c14a72289fa7c00ed3f94.diff

LOG: [x86][MC] Fix movdir64b addressing

This patch is to fix the [[ https://github.com/llvm/llvm-project/issues/63045 | issue 63045]].

Look at the following code:
```
int main(int argc, char *argv[]) {
    int arr[1000];
    __asm movdir64b rax, ZMMWORD PTR [arr]
    return 0;
}
```
Compiling this code using `clang -O0 -fasm-blocks bug.c` gives the a linker error.

The problem seems to be in the generated assembly. Following is the out put of `clang -S -O0 -fasm-blocks bug.c`:
```
movq %rsi, -16(%rbp)
#APP

movdir64b arr, %rax

#NO_APP
xorl %eax, %eax
```
The symbol `arr` should be replaced with some address like `-4(%rbp)`.

This makes me believe that issue is not in the linker, but with the ASM parser.

This issue originates with patch [D145893](https://reviews.llvm.org/D145893). And that's why reverting it fixes the issue. More specifically, the function [isMem512_GR64()](https://github.com/llvm/llvm-project/blob/ff471dcf7669b1ad7903a44d0773bef4eb175eb9/llvm/lib/Target/X86/AsmParser/X86Operand.h#L404) within the [llvm/lib/Target/X86/AsmParser/X86Operand.h](https://github.com/llvm/llvm-project/blob/main/llvm/lib/Target/X86/AsmParser/X86Operand.h) file.

Reviewed By: skan

Differential Revision: https://reviews.llvm.org/D151863

Added: 
    llvm/test/CodeGen/X86/inline-asm-movdir64b-x86_64.ll
    llvm/test/CodeGen/X86/inline-asm-movdir64b.ll

Modified: 
    clang/test/CodeGen/ms-inline-asm-64.c
    clang/test/CodeGen/ms-inline-asm.c
    llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/ms-inline-asm-64.c b/clang/test/CodeGen/ms-inline-asm-64.c
index d688c10ca7080..313d380e121bc 100644
--- a/clang/test/CodeGen/ms-inline-asm-64.c
+++ b/clang/test/CodeGen/ms-inline-asm-64.c
@@ -72,3 +72,10 @@ void t5(void) {
   // CHECK-SAME: jmp ${1:P}
   // CHECK-SAME: "*m,*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(void (...)) @bar, ptr elementtype(void (...)) @bar)
 }
+
+void t47(void) {
+  // CHECK-LABEL: define{{.*}} void @t47
+  int arr[1000];
+  __asm movdir64b rax, zmmword ptr [arr]
+  // CHECK: call void asm sideeffect inteldialect "movdir64b rax, zmmword ptr $0", "*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype([1000 x i32]) %arr)
+}

diff  --git a/clang/test/CodeGen/ms-inline-asm.c b/clang/test/CodeGen/ms-inline-asm.c
index 9498be1034768..c3eef9a23e166 100644
--- a/clang/test/CodeGen/ms-inline-asm.c
+++ b/clang/test/CodeGen/ms-inline-asm.c
@@ -675,6 +675,13 @@ void t46(void) {
   // CHECK: call void asm sideeffect inteldialect "add eax, [eax + $$-128]", "~{eax},~{flags},~{dirflag},~{fpsr},~{flags}"()
 }
 
+void t47(void) {
+  // CHECK-LABEL: define{{.*}} void @t47
+  int arr[1000];
+  __asm movdir64b eax, zmmword ptr [arr]
+  // CHECK: call void asm sideeffect inteldialect "movdir64b eax, zmmword ptr $0", "*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype([1000 x i32]) %arr)
+}
+
 void dot_operator(void){
   // CHECK-LABEL: define{{.*}} void @dot_operator
 	__asm { mov eax, 3[ebx]A.b}

diff  --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 8c6ae1d1611aa..11cfe3cba751d 100644
--- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -1776,10 +1776,6 @@ bool X86AsmParser::CreateMemForMSInlineAsm(
                                              BaseReg && IndexReg));
     return false;
   }
-  // Otherwise, we set the base register to a non-zero value
-  // if we don't know the actual value at this time.  This is necessary to
-  // get the matching correct in some cases.
-  BaseReg = BaseReg ? BaseReg : 1;
   Operands.push_back(X86Operand::CreateMem(
       getPointerWidth(), SegReg, Disp, BaseReg, IndexReg, Scale, Start, End,
       Size,

diff  --git a/llvm/test/CodeGen/X86/inline-asm-movdir64b-x86_64.ll b/llvm/test/CodeGen/X86/inline-asm-movdir64b-x86_64.ll
new file mode 100644
index 0000000000000..c8724a3960d49
--- /dev/null
+++ b/llvm/test/CodeGen/X86/inline-asm-movdir64b-x86_64.ll
@@ -0,0 +1,21 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+movdir64b | FileCheck %s --check-prefix=X64
+
+define void @test_movdir64b() {
+; X64-LABEL: test_movdir64b:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    subq $3880, %rsp # imm = 0xF28
+; X64-NEXT:    .cfi_def_cfa_offset 3888
+; X64-NEXT:    #APP
+; X64-EMPTY:
+; X64-NEXT:    movdir64b -{{[0-9]+}}(%rsp), %rax
+; X64-EMPTY:
+; X64-NEXT:    #NO_APP
+; X64-NEXT:    addq $3880, %rsp # imm = 0xF28
+; X64-NEXT:    .cfi_def_cfa_offset 8
+; X64-NEXT:    retq
+entry:
+  %arr = alloca [1000 x i32], align 16
+  call void asm sideeffect inteldialect "movdir64b rax, zmmword ptr $0", "*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype([1000 x i32]) %arr)
+  ret void
+}

diff  --git a/llvm/test/CodeGen/X86/inline-asm-movdir64b.ll b/llvm/test/CodeGen/X86/inline-asm-movdir64b.ll
new file mode 100644
index 0000000000000..0fe7189de34c6
--- /dev/null
+++ b/llvm/test/CodeGen/X86/inline-asm-movdir64b.ll
@@ -0,0 +1,21 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+movdir64b | FileCheck %s --check-prefix=X86
+
+define void @test_movdir64b() {
+; X86-LABEL: test_movdir64b:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    subl $4000, %esp # imm = 0xFA0
+; X86-NEXT:    .cfi_def_cfa_offset 4004
+; X86-NEXT:    #APP
+; X86-EMPTY:
+; X86-NEXT:    movdir64b (%esp), %eax
+; X86-EMPTY:
+; X86-NEXT:    #NO_APP
+; X86-NEXT:    addl $4000, %esp # imm = 0xFA0
+; X86-NEXT:    .cfi_def_cfa_offset 4
+; X86-NEXT:    retl
+entry:
+  %arr = alloca [1000 x i32], align 4
+  call void asm sideeffect inteldialect "movdir64b eax, zmmword ptr $0", "*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype([1000 x i32]) %arr)
+  ret void
+}


        


More information about the cfe-commits mailing list