[llvm] 0fbb320 - [X86] csr-split.ll - regenerate with standard X86/X64 prefixes

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 5 02:38:00 PDT 2024


Author: Simon Pilgrim
Date: 2024-07-05T10:37:34+01:00
New Revision: 0fbb3201c8c4dfdf5c9c2f5cf15449d1c4355f8d

URL: https://github.com/llvm/llvm-project/commit/0fbb3201c8c4dfdf5c9c2f5cf15449d1c4355f8d
DIFF: https://github.com/llvm/llvm-project/commit/0fbb3201c8c4dfdf5c9c2f5cf15449d1c4355f8d.diff

LOG: [X86] csr-split.ll - regenerate with standard X86/X64 prefixes

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/csr-split.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/csr-split.ll b/llvm/test/CodeGen/X86/csr-split.ll
index f90d14347bc5b..20d577efd2714 100644
--- a/llvm/test/CodeGen/X86/csr-split.ll
+++ b/llvm/test/CodeGen/X86/csr-split.ll
@@ -1,47 +1,47 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown-linux < %s | FileCheck %s
-; RUN: llc -verify-machineinstrs -mtriple=i386-unknown-linux < %s | FileCheck %s --check-prefix=CHECK-32BIT
+; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown-linux < %s | FileCheck %s --check-prefixes=X64
+; RUN: llc -verify-machineinstrs -mtriple=i386-unknown-linux < %s | FileCheck %s --check-prefixes=X86
 
 ; Check CSR split can work properly for tests below.
 
 @a = common dso_local local_unnamed_addr global i32 0, align 4
 
 define dso_local signext i32 @test1(ptr %b) local_unnamed_addr  {
-; CHECK-LABEL: test1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    movslq a(%rip), %rax
-; CHECK-NEXT:    cmpq %rdi, %rax
-; CHECK-NEXT:    je .LBB0_2
-; CHECK-NEXT:  # %bb.1: # %if.end
-; CHECK-NEXT:    retq
-; CHECK-NEXT:  .LBB0_2: # %if.then
-; CHECK-NEXT:    pushq %rbx
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset %rbx, -16
-; CHECK-NEXT:    movq %rdi, %rbx
-; CHECK-NEXT:    callq callVoid at PLT
-; CHECK-NEXT:    movq %rbx, %rdi
-; CHECK-NEXT:    popq %rbx
-; CHECK-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-NEXT:    jmp callNonVoid at PLT # TAILCALL
+; X64-LABEL: test1:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movslq a(%rip), %rax
+; X64-NEXT:    cmpq %rdi, %rax
+; X64-NEXT:    je .LBB0_2
+; X64-NEXT:  # %bb.1: # %if.end
+; X64-NEXT:    retq
+; X64-NEXT:  .LBB0_2: # %if.then
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    .cfi_def_cfa_offset 16
+; X64-NEXT:    .cfi_offset %rbx, -16
+; X64-NEXT:    movq %rdi, %rbx
+; X64-NEXT:    callq callVoid at PLT
+; X64-NEXT:    movq %rbx, %rdi
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    .cfi_def_cfa_offset 8
+; X64-NEXT:    jmp callNonVoid at PLT # TAILCALL
 ;
-; CHECK-32BIT-LABEL: test1:
-; CHECK-32BIT:       # %bb.0: # %entry
-; CHECK-32BIT-NEXT:    subl $12, %esp
-; CHECK-32BIT-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-32BIT-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-32BIT-NEXT:    cmpl %eax, a
-; CHECK-32BIT-NEXT:    je .LBB0_2
-; CHECK-32BIT-NEXT:  # %bb.1: # %if.end
-; CHECK-32BIT-NEXT:    addl $12, %esp
-; CHECK-32BIT-NEXT:    .cfi_def_cfa_offset 4
-; CHECK-32BIT-NEXT:    retl
-; CHECK-32BIT-NEXT:  .LBB0_2: # %if.then
-; CHECK-32BIT-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-32BIT-NEXT:    calll callVoid at PLT
-; CHECK-32BIT-NEXT:    addl $12, %esp
-; CHECK-32BIT-NEXT:    .cfi_def_cfa_offset 4
-; CHECK-32BIT-NEXT:    jmp callNonVoid at PLT # TAILCALL
+; X86-LABEL: test1:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    subl $12, %esp
+; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl %eax, a
+; X86-NEXT:    je .LBB0_2
+; X86-NEXT:  # %bb.1: # %if.end
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    .cfi_def_cfa_offset 4
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB0_2: # %if.then
+; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    calll callVoid at PLT
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    .cfi_def_cfa_offset 4
+; X86-NEXT:    jmp callNonVoid at PLT # TAILCALL
 entry:
   %0 = load i32, ptr @a, align 4, !tbaa !2
   %conv = sext i32 %0 to i64
@@ -64,49 +64,49 @@ declare signext i32 @callVoid(...) local_unnamed_addr
 declare signext i32 @callNonVoid(ptr) local_unnamed_addr
 
 define dso_local signext i32 @test2(ptr %p1) local_unnamed_addr  {
-; CHECK-LABEL: test2:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    testq %rdi, %rdi
-; CHECK-NEXT:    je .LBB1_2
-; CHECK-NEXT:  # %bb.1: # %if.end
-; CHECK-NEXT:    movslq a(%rip), %rax
-; CHECK-NEXT:    cmpq %rdi, %rax
-; CHECK-NEXT:    je .LBB1_3
-; CHECK-NEXT:  .LBB1_2: # %return
-; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    retq
-; CHECK-NEXT:  .LBB1_3: # %if.then2
-; CHECK-NEXT:    pushq %rbx
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset %rbx, -16
-; CHECK-NEXT:    movq %rdi, %rbx
-; CHECK-NEXT:    callq callVoid at PLT
-; CHECK-NEXT:    movq %rbx, %rdi
-; CHECK-NEXT:    popq %rbx
-; CHECK-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-NEXT:    jmp callNonVoid at PLT # TAILCALL
+; X64-LABEL: test2:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    testq %rdi, %rdi
+; X64-NEXT:    je .LBB1_2
+; X64-NEXT:  # %bb.1: # %if.end
+; X64-NEXT:    movslq a(%rip), %rax
+; X64-NEXT:    cmpq %rdi, %rax
+; X64-NEXT:    je .LBB1_3
+; X64-NEXT:  .LBB1_2: # %return
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    retq
+; X64-NEXT:  .LBB1_3: # %if.then2
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    .cfi_def_cfa_offset 16
+; X64-NEXT:    .cfi_offset %rbx, -16
+; X64-NEXT:    movq %rdi, %rbx
+; X64-NEXT:    callq callVoid at PLT
+; X64-NEXT:    movq %rbx, %rdi
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    .cfi_def_cfa_offset 8
+; X64-NEXT:    jmp callNonVoid at PLT # TAILCALL
 ;
-; CHECK-32BIT-LABEL: test2:
-; CHECK-32BIT:       # %bb.0: # %entry
-; CHECK-32BIT-NEXT:    subl $12, %esp
-; CHECK-32BIT-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-32BIT-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-32BIT-NEXT:    testl %eax, %eax
-; CHECK-32BIT-NEXT:    je .LBB1_2
-; CHECK-32BIT-NEXT:  # %bb.1: # %if.end
-; CHECK-32BIT-NEXT:    cmpl %eax, a
-; CHECK-32BIT-NEXT:    je .LBB1_3
-; CHECK-32BIT-NEXT:  .LBB1_2: # %return
-; CHECK-32BIT-NEXT:    xorl %eax, %eax
-; CHECK-32BIT-NEXT:    addl $12, %esp
-; CHECK-32BIT-NEXT:    .cfi_def_cfa_offset 4
-; CHECK-32BIT-NEXT:    retl
-; CHECK-32BIT-NEXT:  .LBB1_3: # %if.then2
-; CHECK-32BIT-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-32BIT-NEXT:    calll callVoid at PLT
-; CHECK-32BIT-NEXT:    addl $12, %esp
-; CHECK-32BIT-NEXT:    .cfi_def_cfa_offset 4
-; CHECK-32BIT-NEXT:    jmp callNonVoid at PLT # TAILCALL
+; X86-LABEL: test2:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    subl $12, %esp
+; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    testl %eax, %eax
+; X86-NEXT:    je .LBB1_2
+; X86-NEXT:  # %bb.1: # %if.end
+; X86-NEXT:    cmpl %eax, a
+; X86-NEXT:    je .LBB1_3
+; X86-NEXT:  .LBB1_2: # %return
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    .cfi_def_cfa_offset 4
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB1_3: # %if.then2
+; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    calll callVoid at PLT
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    .cfi_def_cfa_offset 4
+; X86-NEXT:    jmp callNonVoid at PLT # TAILCALL
 entry:
   %tobool = icmp eq ptr %p1, null
   br i1 %tobool, label %return, label %if.end
@@ -130,70 +130,70 @@ return:                                           ; preds = %if.end, %entry, %if
 
 
 define dso_local ptr @test3(ptr nocapture %p1, i8 zeroext %p2) local_unnamed_addr  {
-; CHECK-LABEL: test3:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    pushq %r14
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    pushq %rbx
-; CHECK-NEXT:    .cfi_def_cfa_offset 24
-; CHECK-NEXT:    pushq %rax
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
-; CHECK-NEXT:    .cfi_offset %rbx, -24
-; CHECK-NEXT:    .cfi_offset %r14, -16
-; CHECK-NEXT:    movq (%rdi), %rbx
-; CHECK-NEXT:    testq %rbx, %rbx
-; CHECK-NEXT:    je .LBB2_2
-; CHECK-NEXT:  # %bb.1: # %land.rhs
-; CHECK-NEXT:    movq %rdi, %r14
-; CHECK-NEXT:    movzbl %sil, %esi
-; CHECK-NEXT:    movq %rbx, %rdi
-; CHECK-NEXT:    callq bar at PLT
-; CHECK-NEXT:    movq %rax, (%r14)
-; CHECK-NEXT:  .LBB2_2: # %land.end
-; CHECK-NEXT:    movq %rbx, %rax
-; CHECK-NEXT:    addq $8, %rsp
-; CHECK-NEXT:    .cfi_def_cfa_offset 24
-; CHECK-NEXT:    popq %rbx
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    popq %r14
-; CHECK-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-NEXT:    retq
+; X64-LABEL: test3:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    pushq %r14
+; X64-NEXT:    .cfi_def_cfa_offset 16
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    .cfi_def_cfa_offset 24
+; X64-NEXT:    pushq %rax
+; X64-NEXT:    .cfi_def_cfa_offset 32
+; X64-NEXT:    .cfi_offset %rbx, -24
+; X64-NEXT:    .cfi_offset %r14, -16
+; X64-NEXT:    movq (%rdi), %rbx
+; X64-NEXT:    testq %rbx, %rbx
+; X64-NEXT:    je .LBB2_2
+; X64-NEXT:  # %bb.1: # %land.rhs
+; X64-NEXT:    movq %rdi, %r14
+; X64-NEXT:    movzbl %sil, %esi
+; X64-NEXT:    movq %rbx, %rdi
+; X64-NEXT:    callq bar at PLT
+; X64-NEXT:    movq %rax, (%r14)
+; X64-NEXT:  .LBB2_2: # %land.end
+; X64-NEXT:    movq %rbx, %rax
+; X64-NEXT:    addq $8, %rsp
+; X64-NEXT:    .cfi_def_cfa_offset 24
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    .cfi_def_cfa_offset 16
+; X64-NEXT:    popq %r14
+; X64-NEXT:    .cfi_def_cfa_offset 8
+; X64-NEXT:    retq
 ;
-; CHECK-32BIT-LABEL: test3:
-; CHECK-32BIT:       # %bb.0: # %entry
-; CHECK-32BIT-NEXT:    pushl %edi
-; CHECK-32BIT-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-32BIT-NEXT:    pushl %esi
-; CHECK-32BIT-NEXT:    .cfi_def_cfa_offset 12
-; CHECK-32BIT-NEXT:    pushl %eax
-; CHECK-32BIT-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-32BIT-NEXT:    .cfi_offset %esi, -12
-; CHECK-32BIT-NEXT:    .cfi_offset %edi, -8
-; CHECK-32BIT-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; CHECK-32BIT-NEXT:    movl (%edi), %esi
-; CHECK-32BIT-NEXT:    testl %esi, %esi
-; CHECK-32BIT-NEXT:    je .LBB2_2
-; CHECK-32BIT-NEXT:  # %bb.1: # %land.rhs
-; CHECK-32BIT-NEXT:    subl $8, %esp
-; CHECK-32BIT-NEXT:    .cfi_adjust_cfa_offset 8
-; CHECK-32BIT-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; CHECK-32BIT-NEXT:    pushl %eax
-; CHECK-32BIT-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK-32BIT-NEXT:    pushl %esi
-; CHECK-32BIT-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK-32BIT-NEXT:    calll bar at PLT
-; CHECK-32BIT-NEXT:    addl $16, %esp
-; CHECK-32BIT-NEXT:    .cfi_adjust_cfa_offset -16
-; CHECK-32BIT-NEXT:    movl %eax, (%edi)
-; CHECK-32BIT-NEXT:  .LBB2_2: # %land.end
-; CHECK-32BIT-NEXT:    movl %esi, %eax
-; CHECK-32BIT-NEXT:    addl $4, %esp
-; CHECK-32BIT-NEXT:    .cfi_def_cfa_offset 12
-; CHECK-32BIT-NEXT:    popl %esi
-; CHECK-32BIT-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-32BIT-NEXT:    popl %edi
-; CHECK-32BIT-NEXT:    .cfi_def_cfa_offset 4
-; CHECK-32BIT-NEXT:    retl
+; X86-LABEL: test3:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    .cfi_def_cfa_offset 12
+; X86-NEXT:    pushl %eax
+; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    .cfi_offset %esi, -12
+; X86-NEXT:    .cfi_offset %edi, -8
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl (%edi), %esi
+; X86-NEXT:    testl %esi, %esi
+; X86-NEXT:    je .LBB2_2
+; X86-NEXT:  # %bb.1: # %land.rhs
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    .cfi_adjust_cfa_offset 8
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    pushl %eax
+; X86-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NEXT:    calll bar at PLT
+; X86-NEXT:    addl $16, %esp
+; X86-NEXT:    .cfi_adjust_cfa_offset -16
+; X86-NEXT:    movl %eax, (%edi)
+; X86-NEXT:  .LBB2_2: # %land.end
+; X86-NEXT:    movl %esi, %eax
+; X86-NEXT:    addl $4, %esp
+; X86-NEXT:    .cfi_def_cfa_offset 12
+; X86-NEXT:    popl %esi
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    popl %edi
+; X86-NEXT:    .cfi_def_cfa_offset 4
+; X86-NEXT:    retl
 entry:
   %0 = load ptr, ptr %p1, align 8, !tbaa !6
   %tobool = icmp eq ptr %0, null


        


More information about the llvm-commits mailing list