[llvm] 8a074c8 - [X86] fixup-bw-copy.ll - replace X32 check prefixes with X86
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 29 06:23:30 PST 2024
Author: Simon Pilgrim
Date: 2024-01-29T14:23:08Z
New Revision: 8a074c84ff01f8581b4a769c4af3abab9731fe5b
URL: https://github.com/llvm/llvm-project/commit/8a074c84ff01f8581b4a769c4af3abab9731fe5b
DIFF: https://github.com/llvm/llvm-project/commit/8a074c84ff01f8581b4a769c4af3abab9731fe5b.diff
LOG: [X86] fixup-bw-copy.ll - replace X32 check prefixes with X86
We try to only use X32 for gnux32 triple tests.
Added:
Modified:
llvm/test/CodeGen/X86/fixup-bw-copy.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/fixup-bw-copy.ll b/llvm/test/CodeGen/X86/fixup-bw-copy.ll
index 73907d336b194b3..2af90469f4cce9a 100644
--- a/llvm/test/CodeGen/X86/fixup-bw-copy.ll
+++ b/llvm/test/CodeGen/X86/fixup-bw-copy.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -fixup-byte-word-insts=1 -mtriple=x86_64-- < %s | FileCheck --check-prefix=X64 %s
; RUN: llc -verify-machineinstrs -fixup-byte-word-insts=0 -mtriple=x86_64-- < %s | FileCheck --check-prefix=X64 %s
-; RUN: llc -verify-machineinstrs -fixup-byte-word-insts=1 -mtriple=i386-- < %s | FileCheck --check-prefixes=X32,BWON32 %s
-; RUN: llc -verify-machineinstrs -fixup-byte-word-insts=0 -mtriple=i386-- < %s | FileCheck --check-prefixes=X32,BWOFF32 %s
+; RUN: llc -verify-machineinstrs -fixup-byte-word-insts=1 -mtriple=i386-- < %s | FileCheck --check-prefixes=X86,X86-BWON %s
+; RUN: llc -verify-machineinstrs -fixup-byte-word-insts=0 -mtriple=i386-- < %s | FileCheck --check-prefixes=X86,X86-BWOFF %s
target datalayout = "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128"
@@ -13,15 +13,15 @@ define i8 @test_movb(i8 %a0) nounwind {
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
;
-; BWON32-LABEL: test_movb:
-; BWON32: # %bb.0:
-; BWON32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; BWON32-NEXT: retl
+; X86-BWON-LABEL: test_movb:
+; X86-BWON: # %bb.0:
+; X86-BWON-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-BWON-NEXT: retl
;
-; BWOFF32-LABEL: test_movb:
-; BWOFF32: # %bb.0:
-; BWOFF32-NEXT: movb {{[0-9]+}}(%esp), %al
-; BWOFF32-NEXT: retl
+; X86-BWOFF-LABEL: test_movb:
+; X86-BWOFF: # %bb.0:
+; X86-BWOFF-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-BWOFF-NEXT: retl
ret i8 %a0
}
@@ -32,10 +32,10 @@ define i8 @test_movb_Os(i8 %a0) nounwind optsize {
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
;
-; X32-LABEL: test_movb_Os:
-; X32: # %bb.0:
-; X32-NEXT: movb {{[0-9]+}}(%esp), %al
-; X32-NEXT: retl
+; X86-LABEL: test_movb_Os:
+; X86: # %bb.0:
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: retl
ret i8 %a0
}
@@ -46,10 +46,10 @@ define i8 @test_movb_Oz(i8 %a0) nounwind minsize {
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
;
-; X32-LABEL: test_movb_Oz:
-; X32: # %bb.0:
-; X32-NEXT: movb {{[0-9]+}}(%esp), %al
-; X32-NEXT: retl
+; X86-LABEL: test_movb_Oz:
+; X86: # %bb.0:
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: retl
ret i8 %a0
}
@@ -60,15 +60,15 @@ define i16 @test_movw(i16 %a0) {
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
-; BWON32-LABEL: test_movw:
-; BWON32: # %bb.0:
-; BWON32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; BWON32-NEXT: retl
+; X86-BWON-LABEL: test_movw:
+; X86-BWON: # %bb.0:
+; X86-BWON-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-BWON-NEXT: retl
;
-; BWOFF32-LABEL: test_movw:
-; BWOFF32: # %bb.0:
-; BWOFF32-NEXT: movw {{[0-9]+}}(%esp), %ax
-; BWOFF32-NEXT: retl
+; X86-BWOFF-LABEL: test_movw:
+; X86-BWOFF: # %bb.0:
+; X86-BWOFF-NEXT: movw {{[0-9]+}}(%esp), %ax
+; X86-BWOFF-NEXT: retl
ret i16 %a0
}
@@ -83,12 +83,12 @@ define i8 @test_movb_hreg(i16 %a0) {
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
;
-; X32-LABEL: test_movb_hreg:
-; X32: # %bb.0:
-; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: addb %al, %ah
-; X32-NEXT: movb %ah, %al
-; X32-NEXT: retl
+; X86-LABEL: test_movb_hreg:
+; X86: # %bb.0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addb %al, %ah
+; X86-NEXT: movb %ah, %al
+; X86-NEXT: retl
%tmp0 = trunc i16 %a0 to i8
%tmp1 = lshr i16 %a0, 8
%tmp2 = trunc i16 %tmp1 to i8
More information about the llvm-commits
mailing list