[llvm] f8395f8 - [X86] Cleanup check prefixes identified in #92248

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed May 15 06:25:41 PDT 2024


Author: Simon Pilgrim
Date: 2024-05-15T14:25:29+01:00
New Revision: f8395f8420cee8fc0854f43c9e88819c0ed54696

URL: https://github.com/llvm/llvm-project/commit/f8395f8420cee8fc0854f43c9e88819c0ed54696
DIFF: https://github.com/llvm/llvm-project/commit/f8395f8420cee8fc0854f43c9e88819c0ed54696.diff

LOG: [X86] Cleanup check prefixes identified in #92248

Avoid using leading numbers in check prefixes - replace with actual triple config names.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/align-branch-boundary-suppressions-tls.ll
    llvm/test/CodeGen/X86/asm-modifier.ll
    llvm/test/CodeGen/X86/pr32345.ll
    llvm/test/CodeGen/X86/x32-va_start.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/align-branch-boundary-suppressions-tls.ll b/llvm/test/CodeGen/X86/align-branch-boundary-suppressions-tls.ll
index fd58cfeb65fd7..0e2e6f3ef81df 100644
--- a/llvm/test/CodeGen/X86/align-branch-boundary-suppressions-tls.ll
+++ b/llvm/test/CodeGen/X86/align-branch-boundary-suppressions-tls.ll
@@ -2,8 +2,8 @@
 ;; sequence. It uses prefixes to allow linker relaxation. We need to disable
 ;; prefix or nop padding for it. For simplicity and consistency, disable for
 ;; Local Dynamic and 32-bit as well.
-; RUN: llc -mtriple=i386 -relocation-model=pic -x86-branches-within-32B-boundaries < %s | FileCheck --check-prefixes=CHECK,32 %s
-; RUN: llc -mtriple=x86_64 -relocation-model=pic -x86-branches-within-32B-boundaries < %s | FileCheck --check-prefixes=CHECK,64 %s
+; RUN: llc -mtriple=i386 -relocation-model=pic -x86-branches-within-32B-boundaries < %s | FileCheck --check-prefixes=CHECK,X86 %s
+; RUN: llc -mtriple=x86_64 -relocation-model=pic -x86-branches-within-32B-boundaries < %s | FileCheck --check-prefixes=CHECK,X64 %s
 
 @gd = external thread_local global i32
 @ld = internal thread_local global i32 0
@@ -11,17 +11,17 @@
 define i32 @tls_get_addr() {
 ; CHECK-LABEL: tls_get_addr:
 ; CHECK: #noautopadding
-; 32: leal gd at TLSGD(,%ebx), %eax
-; 32: calll ___tls_get_addr at PLT
-; 64: data16
-; 64: leaq gd at TLSGD(%rip), %rdi
-; 64: callq __tls_get_addr at PLT
+; X86: leal gd at TLSGD(,%ebx), %eax
+; X86: calll ___tls_get_addr at PLT
+; X64: data16
+; X64: leaq gd at TLSGD(%rip), %rdi
+; X64: callq __tls_get_addr at PLT
 ; CHECK: #autopadding
 ; CHECK: #noautopadding
-; 32: leal ld at TLSLDM(%ebx), %eax
-; 32: calll ___tls_get_addr at PLT
-; 64: leaq ld at TLSLD(%rip), %rdi
-; 64: callq __tls_get_addr at PLT
+; X86: leal ld at TLSLDM(%ebx), %eax
+; X86: calll ___tls_get_addr at PLT
+; X64: leaq ld at TLSLD(%rip), %rdi
+; X64: callq __tls_get_addr at PLT
 ; CHECK: #autopadding
   %1 = load i32, ptr @gd
   %2 = load i32, ptr @ld

diff  --git a/llvm/test/CodeGen/X86/asm-modifier.ll b/llvm/test/CodeGen/X86/asm-modifier.ll
index c121b46f84506..9a69402d22168 100644
--- a/llvm/test/CodeGen/X86/asm-modifier.ll
+++ b/llvm/test/CodeGen/X86/asm-modifier.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -mtriple=i686 < %s | FileCheck %s --check-prefixes=CHECK,32
-; RUN: llc -mtriple=x86_64 < %s | FileCheck %s --check-prefixes=CHECK,64
+; RUN: llc -mtriple=i686 < %s | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc -mtriple=x86_64 < %s | FileCheck %s --check-prefixes=CHECK,X64
 
 @var = internal global i32 0, align 4
 
@@ -43,20 +43,20 @@ entry:
 }
 
 define void @test_V(ptr %p) {
-; 32-LABEL: test_V:
-; 32:       # %bb.0: # %entry
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    #APP
-; 32-NEXT:    calll __x86_indirect_thunk_eax
-; 32-NEXT:    #NO_APP
-; 32-NEXT:    retl
+; X86-LABEL: test_V:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    #APP
+; X86-NEXT:    calll __x86_indirect_thunk_eax
+; X86-NEXT:    #NO_APP
+; X86-NEXT:    retl
 ;
-; 64-LABEL: test_V:
-; 64:       # %bb.0: # %entry
-; 64-NEXT:    #APP
-; 64-NEXT:    callq __x86_indirect_thunk_rdi
-; 64-NEXT:    #NO_APP
-; 64-NEXT:    retq
+; X64-LABEL: test_V:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    #APP
+; X64-NEXT:    callq __x86_indirect_thunk_rdi
+; X64-NEXT:    #NO_APP
+; X64-NEXT:    retq
 entry:
   tail call void asm sideeffect "call __x86_indirect_thunk_${0:V}", "r,~{dirflag},~{fpsr},~{flags}"(ptr %p)
   ret void

diff  --git a/llvm/test/CodeGen/X86/pr32345.ll b/llvm/test/CodeGen/X86/pr32345.ll
index 2745cb8bb908b..c7405e982660c 100644
--- a/llvm/test/CodeGen/X86/pr32345.ll
+++ b/llvm/test/CodeGen/X86/pr32345.ll
@@ -1,74 +1,74 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -O0 -mtriple=x86_64-unknown-linux-gnu -o - %s | FileCheck %s -check-prefix=X640
-; RUN: llc -O0 -mtriple=i686-unknown             -o - %s | FileCheck %s -check-prefix=6860
-; RUN: llc     -mtriple=x86_64-unknown-linux-gnu -o - %s | FileCheck %s -check-prefix=X64
-; RUN: llc     -mtriple=i686-unknown             -o - %s | FileCheck %s -check-prefix=686
+; RUN: llc -O0 -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s -check-prefix=X64-O0
+; RUN: llc -O0 -mtriple=i686-unknown             < %s | FileCheck %s -check-prefix=X86-O0
+; RUN: llc     -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s -check-prefix=X64
+; RUN: llc     -mtriple=i686-unknown             < %s | FileCheck %s -check-prefix=X86
 
 @var_22 = external dso_local global i16, align 2
 @var_27 = external dso_local global i16, align 2
 
 define void @foo() {
-; X640-LABEL: foo:
-; X640:       # %bb.0: # %bb
-; X640-NEXT:    movzwl var_22, %eax
-; X640-NEXT:    movzwl var_27, %ecx
-; X640-NEXT:    xorl %ecx, %eax
-; X640-NEXT:    movzwl var_27, %ecx
-; X640-NEXT:    xorl %ecx, %eax
-; X640-NEXT:    cltq
-; X640-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
-; X640-NEXT:    movzwl var_22, %eax
-; X640-NEXT:    movzwl var_27, %ecx
-; X640-NEXT:    xorl %ecx, %eax
-; X640-NEXT:    movzwl var_27, %ecx
-; X640-NEXT:    xorl %ecx, %eax
-; X640-NEXT:    cltq
-; X640-NEXT:    movzwl var_27, %ecx
-; X640-NEXT:    subl $16610, %ecx # imm = 0x40E2
-; X640-NEXT:    movl %ecx, %ecx
-; X640-NEXT:    # kill: def $rcx killed $ecx
-; X640-NEXT:    # kill: def $cl killed $rcx
-; X640-NEXT:    sarq %cl, %rax
-; X640-NEXT:    movb %al, %cl
-; X640-NEXT:    # implicit-def: $rax
-; X640-NEXT:    movb %cl, (%rax)
-; X640-NEXT:    retq
+; X64-O0-LABEL: foo:
+; X64-O0:       # %bb.0: # %bb
+; X64-O0-NEXT:    movzwl var_22, %eax
+; X64-O0-NEXT:    movzwl var_27, %ecx
+; X64-O0-NEXT:    xorl %ecx, %eax
+; X64-O0-NEXT:    movzwl var_27, %ecx
+; X64-O0-NEXT:    xorl %ecx, %eax
+; X64-O0-NEXT:    cltq
+; X64-O0-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
+; X64-O0-NEXT:    movzwl var_22, %eax
+; X64-O0-NEXT:    movzwl var_27, %ecx
+; X64-O0-NEXT:    xorl %ecx, %eax
+; X64-O0-NEXT:    movzwl var_27, %ecx
+; X64-O0-NEXT:    xorl %ecx, %eax
+; X64-O0-NEXT:    cltq
+; X64-O0-NEXT:    movzwl var_27, %ecx
+; X64-O0-NEXT:    subl $16610, %ecx # imm = 0x40E2
+; X64-O0-NEXT:    movl %ecx, %ecx
+; X64-O0-NEXT:    # kill: def $rcx killed $ecx
+; X64-O0-NEXT:    # kill: def $cl killed $rcx
+; X64-O0-NEXT:    sarq %cl, %rax
+; X64-O0-NEXT:    movb %al, %cl
+; X64-O0-NEXT:    # implicit-def: $rax
+; X64-O0-NEXT:    movb %cl, (%rax)
+; X64-O0-NEXT:    retq
 ;
-; 6860-LABEL: foo:
-; 6860:       # %bb.0: # %bb
-; 6860-NEXT:    pushl %ebp
-; 6860-NEXT:    .cfi_def_cfa_offset 8
-; 6860-NEXT:    .cfi_offset %ebp, -8
-; 6860-NEXT:    movl %esp, %ebp
-; 6860-NEXT:    .cfi_def_cfa_register %ebp
-; 6860-NEXT:    andl $-8, %esp
-; 6860-NEXT:    subl $24, %esp
-; 6860-NEXT:    movzwl var_22, %eax
-; 6860-NEXT:    movl %eax, {{[0-9]+}}(%esp)
-; 6860-NEXT:    movl $0, {{[0-9]+}}(%esp)
-; 6860-NEXT:    movzwl var_22, %edx
-; 6860-NEXT:    movb var_27, %cl
-; 6860-NEXT:    addb $30, %cl
-; 6860-NEXT:    movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
-; 6860-NEXT:    xorl %eax, %eax
-; 6860-NEXT:    shrdl %cl, %eax, %edx
-; 6860-NEXT:    movb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Reload
-; 6860-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; 6860-NEXT:    testb $32, %cl
-; 6860-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; 6860-NEXT:    jne .LBB0_2
-; 6860-NEXT:  # %bb.1: # %bb
-; 6860-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; 6860-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; 6860-NEXT:  .LBB0_2: # %bb
-; 6860-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; 6860-NEXT:    movb %al, %cl
-; 6860-NEXT:    # implicit-def: $eax
-; 6860-NEXT:    movb %cl, (%eax)
-; 6860-NEXT:    movl %ebp, %esp
-; 6860-NEXT:    popl %ebp
-; 6860-NEXT:    .cfi_def_cfa %esp, 4
-; 6860-NEXT:    retl
+; X86-O0-LABEL: foo:
+; X86-O0:       # %bb.0: # %bb
+; X86-O0-NEXT:    pushl %ebp
+; X86-O0-NEXT:    .cfi_def_cfa_offset 8
+; X86-O0-NEXT:    .cfi_offset %ebp, -8
+; X86-O0-NEXT:    movl %esp, %ebp
+; X86-O0-NEXT:    .cfi_def_cfa_register %ebp
+; X86-O0-NEXT:    andl $-8, %esp
+; X86-O0-NEXT:    subl $24, %esp
+; X86-O0-NEXT:    movzwl var_22, %eax
+; X86-O0-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; X86-O0-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-O0-NEXT:    movzwl var_22, %edx
+; X86-O0-NEXT:    movb var_27, %cl
+; X86-O0-NEXT:    addb $30, %cl
+; X86-O0-NEXT:    movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-O0-NEXT:    xorl %eax, %eax
+; X86-O0-NEXT:    shrdl %cl, %eax, %edx
+; X86-O0-NEXT:    movb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Reload
+; X86-O0-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-O0-NEXT:    testb $32, %cl
+; X86-O0-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-O0-NEXT:    jne .LBB0_2
+; X86-O0-NEXT:  # %bb.1: # %bb
+; X86-O0-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-O0-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-O0-NEXT:  .LBB0_2: # %bb
+; X86-O0-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-O0-NEXT:    movb %al, %cl
+; X86-O0-NEXT:    # implicit-def: $eax
+; X86-O0-NEXT:    movb %cl, (%eax)
+; X86-O0-NEXT:    movl %ebp, %esp
+; X86-O0-NEXT:    popl %ebp
+; X86-O0-NEXT:    .cfi_def_cfa %esp, 4
+; X86-O0-NEXT:    retl
 ;
 ; X64-LABEL: foo:
 ; X64:       # %bb.0: # %bb
@@ -80,32 +80,32 @@ define void @foo() {
 ; X64-NEXT:    movb %al, (%rax)
 ; X64-NEXT:    retq
 ;
-; 686-LABEL: foo:
-; 686:       # %bb.0: # %bb
-; 686-NEXT:    pushl %ebp
-; 686-NEXT:    .cfi_def_cfa_offset 8
-; 686-NEXT:    .cfi_offset %ebp, -8
-; 686-NEXT:    movl %esp, %ebp
-; 686-NEXT:    .cfi_def_cfa_register %ebp
-; 686-NEXT:    andl $-8, %esp
-; 686-NEXT:    subl $8, %esp
-; 686-NEXT:    movzbl var_27, %ecx
-; 686-NEXT:    movzwl var_22, %eax
-; 686-NEXT:    movl %eax, (%esp)
-; 686-NEXT:    movl $0, {{[0-9]+}}(%esp)
-; 686-NEXT:    addb $30, %cl
-; 686-NEXT:    xorl %edx, %edx
-; 686-NEXT:    shrdl %cl, %edx, %eax
-; 686-NEXT:    testb $32, %cl
-; 686-NEXT:    jne .LBB0_2
-; 686-NEXT:  # %bb.1: # %bb
-; 686-NEXT:    movl %eax, %edx
-; 686-NEXT:  .LBB0_2: # %bb
-; 686-NEXT:    movb %dl, (%eax)
-; 686-NEXT:    movl %ebp, %esp
-; 686-NEXT:    popl %ebp
-; 686-NEXT:    .cfi_def_cfa %esp, 4
-; 686-NEXT:    retl
+; X86-LABEL: foo:
+; X86:       # %bb.0: # %bb
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    .cfi_offset %ebp, -8
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    .cfi_def_cfa_register %ebp
+; X86-NEXT:    andl $-8, %esp
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    movzbl var_27, %ecx
+; X86-NEXT:    movzwl var_22, %eax
+; X86-NEXT:    movl %eax, (%esp)
+; X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X86-NEXT:    addb $30, %cl
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    shrdl %cl, %edx, %eax
+; X86-NEXT:    testb $32, %cl
+; X86-NEXT:    jne .LBB0_2
+; X86-NEXT:  # %bb.1: # %bb
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:  .LBB0_2: # %bb
+; X86-NEXT:    movb %dl, (%eax)
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    .cfi_def_cfa %esp, 4
+; X86-NEXT:    retl
 bb:
   %tmp = alloca i64, align 8
   %tmp1 = load i16, ptr @var_22, align 2

diff  --git a/llvm/test/CodeGen/X86/x32-va_start.ll b/llvm/test/CodeGen/X86/x32-va_start.ll
index e61e5765f124a..31c8aee3fddec 100644
--- a/llvm/test/CodeGen/X86/x32-va_start.ll
+++ b/llvm/test/CodeGen/X86/x32-va_start.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-linux-gnux32 | FileCheck %s -check-prefix=SSE
 ; RUN: llc < %s -mtriple=x86_64-linux-gnux32 -mattr=-sse | FileCheck %s -check-prefix=NOSSE
-; RUN: llc < %s -mtriple=i386-linux-gnux32 | FileCheck %s -check-prefix=32BITABI
-; RUN: llc < %s -mtriple=i686-linux-gnux32 | FileCheck %s -check-prefix=32BITABI
+; RUN: llc < %s -mtriple=i386-linux-gnux32 | FileCheck %s -check-prefix=X32BITABI
+; RUN: llc < %s -mtriple=i686-linux-gnux32 | FileCheck %s -check-prefix=X32BITABI
 ;
 ; Verifies that x32 va_start lowering is sane. To regenerate this test, use
 ; cat <<EOF |
@@ -97,27 +97,27 @@ define i32 @foo(float %a, ptr nocapture readnone %fmt, ...) nounwind {
 ; NOSSE-NEXT:    movl (%eax), %eax
 ; NOSSE-NEXT:    retq
 ;
-; 32BITABI-LABEL: foo:
-; 32BITABI:       # %bb.0: # %entry
-; 32BITABI-NEXT:    subl $28, %esp
-; 32BITABI-NEXT:    leal {{[0-9]+}}(%esp), %ecx
-; 32BITABI-NEXT:    movl %ecx, (%esp)
-; 32BITABI-NEXT:    cmpl $40, %ecx
-; 32BITABI-NEXT:    ja .LBB0_2
-; 32BITABI-NEXT:  # %bb.1: # %vaarg.in_reg
-; 32BITABI-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; 32BITABI-NEXT:    addl %ecx, %eax
-; 32BITABI-NEXT:    addl $8, %ecx
-; 32BITABI-NEXT:    movl %ecx, (%esp)
-; 32BITABI-NEXT:    jmp .LBB0_3
-; 32BITABI-NEXT:  .LBB0_2: # %vaarg.in_mem
-; 32BITABI-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; 32BITABI-NEXT:    leal 8(%eax), %ecx
-; 32BITABI-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
-; 32BITABI-NEXT:  .LBB0_3: # %vaarg.end
-; 32BITABI-NEXT:    movl (%eax), %eax
-; 32BITABI-NEXT:    addl $28, %esp
-; 32BITABI-NEXT:    retl
+; X32BITABI-LABEL: foo:
+; X32BITABI:       # %bb.0: # %entry
+; X32BITABI-NEXT:    subl $28, %esp
+; X32BITABI-NEXT:    leal {{[0-9]+}}(%esp), %ecx
+; X32BITABI-NEXT:    movl %ecx, (%esp)
+; X32BITABI-NEXT:    cmpl $40, %ecx
+; X32BITABI-NEXT:    ja .LBB0_2
+; X32BITABI-NEXT:  # %bb.1: # %vaarg.in_reg
+; X32BITABI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32BITABI-NEXT:    addl %ecx, %eax
+; X32BITABI-NEXT:    addl $8, %ecx
+; X32BITABI-NEXT:    movl %ecx, (%esp)
+; X32BITABI-NEXT:    jmp .LBB0_3
+; X32BITABI-NEXT:  .LBB0_2: # %vaarg.in_mem
+; X32BITABI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32BITABI-NEXT:    leal 8(%eax), %ecx
+; X32BITABI-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
+; X32BITABI-NEXT:  .LBB0_3: # %vaarg.end
+; X32BITABI-NEXT:    movl (%eax), %eax
+; X32BITABI-NEXT:    addl $28, %esp
+; X32BITABI-NEXT:    retl
 entry:
   %ap = alloca [1 x %struct.__va_list_tag], align 16
   call void @llvm.lifetime.start.p0(i64 16, ptr %ap) #2


        


More information about the llvm-commits mailing list