[llvm] c60bd0e - [X86] Regenerate select-mmx.ll
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed Oct 25 10:11:01 PDT 2023
Author: Simon Pilgrim
Date: 2023-10-25T18:10:51+01:00
New Revision: c60bd0e57827e6596449f4e6a42ee29054c78af6
URL: https://github.com/llvm/llvm-project/commit/c60bd0e57827e6596449f4e6a42ee29054c78af6
DIFF: https://github.com/llvm/llvm-project/commit/c60bd0e57827e6596449f4e6a42ee29054c78af6.diff
LOG: [X86] Regenerate select-mmx.ll
Change i686 check-prefix to the more standard X86 instead of I32
Added:
Modified:
llvm/test/CodeGen/X86/select-mmx.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/select-mmx.ll b/llvm/test/CodeGen/X86/select-mmx.ll
index f6bd4adb02b4c4e..27b7ebb8381cd3a 100644
--- a/llvm/test/CodeGen/X86/select-mmx.ll
+++ b/llvm/test/CodeGen/X86/select-mmx.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+mmx < %s | FileCheck %s --check-prefix=X64
-; RUN: llc -mtriple=i686-unknown-unknown -mattr=+mmx < %s | FileCheck %s --check-prefix=I32
+; RUN: llc -mtriple=i686-unknown-unknown -mattr=+mmx < %s | FileCheck %s --check-prefix=X86
; From source: clang -02
@@ -27,33 +27,33 @@ define i64 @test47(i64 %arg) {
; X64-NEXT: movq %mm0, %rax
; X64-NEXT: retq
;
-; I32-LABEL: test47:
-; I32: # %bb.0:
-; I32-NEXT: pushl %ebp
-; I32-NEXT: .cfi_def_cfa_offset 8
-; I32-NEXT: .cfi_offset %ebp, -8
-; I32-NEXT: movl %esp, %ebp
-; I32-NEXT: .cfi_def_cfa_register %ebp
-; I32-NEXT: andl $-8, %esp
-; I32-NEXT: subl $8, %esp
-; I32-NEXT: movl 8(%ebp), %eax
-; I32-NEXT: orl 12(%ebp), %eax
-; I32-NEXT: je .LBB0_1
-; I32-NEXT: # %bb.2:
-; I32-NEXT: pxor %mm0, %mm0
-; I32-NEXT: jmp .LBB0_3
-; I32-NEXT: .LBB0_1:
-; I32-NEXT: movl $7, %eax
-; I32-NEXT: movd %eax, %mm0
-; I32-NEXT: .LBB0_3:
-; I32-NEXT: psllw %mm0, %mm0
-; I32-NEXT: movq %mm0, (%esp)
-; I32-NEXT: movl (%esp), %eax
-; I32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; I32-NEXT: movl %ebp, %esp
-; I32-NEXT: popl %ebp
-; I32-NEXT: .cfi_def_cfa %esp, 4
-; I32-NEXT: retl
+; X86-LABEL: test47:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: orl 12(%ebp), %eax
+; X86-NEXT: je .LBB0_1
+; X86-NEXT: # %bb.2:
+; X86-NEXT: pxor %mm0, %mm0
+; X86-NEXT: jmp .LBB0_3
+; X86-NEXT: .LBB0_1:
+; X86-NEXT: movl $7, %eax
+; X86-NEXT: movd %eax, %mm0
+; X86-NEXT: .LBB0_3:
+; X86-NEXT: psllw %mm0, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: retl
%cond = icmp eq i64 %arg, 0
%slct = select i1 %cond, x86_mmx bitcast (i64 7 to x86_mmx), x86_mmx bitcast (i64 0 to x86_mmx)
%psll = tail call x86_mmx @llvm.x86.mmx.psll.w(x86_mmx %slct, x86_mmx %slct)
@@ -85,33 +85,33 @@ define i64 @test49(i64 %arg, i64 %x, i64 %y) {
; X64-NEXT: movq %mm0, %rax
; X64-NEXT: retq
;
-; I32-LABEL: test49:
-; I32: # %bb.0:
-; I32-NEXT: pushl %ebp
-; I32-NEXT: .cfi_def_cfa_offset 8
-; I32-NEXT: .cfi_offset %ebp, -8
-; I32-NEXT: movl %esp, %ebp
-; I32-NEXT: .cfi_def_cfa_register %ebp
-; I32-NEXT: andl $-8, %esp
-; I32-NEXT: subl $8, %esp
-; I32-NEXT: movl 8(%ebp), %eax
-; I32-NEXT: orl 12(%ebp), %eax
-; I32-NEXT: je .LBB1_1
-; I32-NEXT: # %bb.2:
-; I32-NEXT: leal 24(%ebp), %eax
-; I32-NEXT: jmp .LBB1_3
-; I32-NEXT: .LBB1_1:
-; I32-NEXT: leal 16(%ebp), %eax
-; I32-NEXT: .LBB1_3:
-; I32-NEXT: movq (%eax), %mm0
-; I32-NEXT: psllw %mm0, %mm0
-; I32-NEXT: movq %mm0, (%esp)
-; I32-NEXT: movl (%esp), %eax
-; I32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; I32-NEXT: movl %ebp, %esp
-; I32-NEXT: popl %ebp
-; I32-NEXT: .cfi_def_cfa %esp, 4
-; I32-NEXT: retl
+; X86-LABEL: test49:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: orl 12(%ebp), %eax
+; X86-NEXT: je .LBB1_1
+; X86-NEXT: # %bb.2:
+; X86-NEXT: leal 24(%ebp), %eax
+; X86-NEXT: jmp .LBB1_3
+; X86-NEXT: .LBB1_1:
+; X86-NEXT: leal 16(%ebp), %eax
+; X86-NEXT: .LBB1_3:
+; X86-NEXT: movq (%eax), %mm0
+; X86-NEXT: psllw %mm0, %mm0
+; X86-NEXT: movq %mm0, (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: retl
%cond = icmp eq i64 %arg, 0
%xmmx = bitcast i64 %x to x86_mmx
%ymmx = bitcast i64 %y to x86_mmx
More information about the llvm-commits
mailing list