[llvm] 6432658 - [X86] Replace X32 test check prefixes with X86
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed May 31 05:06:57 PDT 2023
Author: Simon Pilgrim
Date: 2023-05-31T13:01:50+01:00
New Revision: 6432658825ae35f0289bd10dfdf9614fc4e563ce
URL: https://github.com/llvm/llvm-project/commit/6432658825ae35f0289bd10dfdf9614fc4e563ce
DIFF: https://github.com/llvm/llvm-project/commit/6432658825ae35f0289bd10dfdf9614fc4e563ce.diff
LOG: [X86] Replace X32 test check prefixes with X86
We try to only use X32 for gnux32 triple test cases
Added:
Modified:
llvm/test/CodeGen/X86/uint_to_fp-3.ll
llvm/test/CodeGen/X86/uint_to_fp.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/uint_to_fp-3.ll b/llvm/test/CodeGen/X86/uint_to_fp-3.ll
index e539d66b5bdd..93a573d5fecc 100644
--- a/llvm/test/CodeGen/X86/uint_to_fp-3.ll
+++ b/llvm/test/CodeGen/X86/uint_to_fp-3.ll
@@ -1,23 +1,23 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32-SSE
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=X32-AVX
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86-SSE
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=X86-AVX
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64-SSE
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64-AVX
;PR29079
define <4 x float> @mask_ucvt_4i32_4f32(<4 x i32> %a) {
-; X32-SSE-LABEL: mask_ucvt_4i32_4f32:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
-; X32-SSE-NEXT: retl
+; X86-SSE-LABEL: mask_ucvt_4i32_4f32:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
+; X86-SSE-NEXT: retl
;
-; X32-AVX-LABEL: mask_ucvt_4i32_4f32:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X32-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: mask_ucvt_4i32_4f32:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X86-AVX-NEXT: retl
;
; X64-SSE-LABEL: mask_ucvt_4i32_4f32:
; X64-SSE: # %bb.0:
@@ -36,20 +36,20 @@ define <4 x float> @mask_ucvt_4i32_4f32(<4 x i32> %a) {
}
define <4 x double> @mask_ucvt_4i32_4f64(<4 x i32> %a) {
-; X32-SSE-LABEL: mask_ucvt_4i32_4f64:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-SSE-NEXT: cvtdq2pd %xmm0, %xmm2
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; X32-SSE-NEXT: cvtdq2pd %xmm0, %xmm1
-; X32-SSE-NEXT: movaps %xmm2, %xmm0
-; X32-SSE-NEXT: retl
+; X86-SSE-LABEL: mask_ucvt_4i32_4f64:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: cvtdq2pd %xmm0, %xmm2
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; X86-SSE-NEXT: cvtdq2pd %xmm0, %xmm1
+; X86-SSE-NEXT: movaps %xmm2, %xmm0
+; X86-SSE-NEXT: retl
;
-; X32-AVX-LABEL: mask_ucvt_4i32_4f64:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X32-AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: mask_ucvt_4i32_4f64:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
+; X86-AVX-NEXT: retl
;
; X64-SSE-LABEL: mask_ucvt_4i32_4f64:
; X64-SSE: # %bb.0:
@@ -72,26 +72,26 @@ define <4 x double> @mask_ucvt_4i32_4f64(<4 x i32> %a) {
; Regression noticed in D56387
define <4 x float> @lshr_truncate_mask_ucvt_4i64_4f32(ptr%p0) {
-; X32-SSE-LABEL: lshr_truncate_mask_ucvt_4i64_4f32:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT: movups (%eax), %xmm0
-; X32-SSE-NEXT: movups 16(%eax), %xmm1
-; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; X32-SSE-NEXT: psrld $16, %xmm0
-; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
-; X32-SSE-NEXT: mulps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-SSE-NEXT: retl
+; X86-SSE-LABEL: lshr_truncate_mask_ucvt_4i64_4f32:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT: movups (%eax), %xmm0
+; X86-SSE-NEXT: movups 16(%eax), %xmm1
+; X86-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; X86-SSE-NEXT: psrld $16, %xmm0
+; X86-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
+; X86-SSE-NEXT: mulps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: retl
;
-; X32-AVX-LABEL: lshr_truncate_mask_ucvt_4i64_4f32:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT: vmovups (%eax), %xmm0
-; X32-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],mem[0,2]
-; X32-AVX-NEXT: vpsrld $16, %xmm0, %xmm0
-; X32-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X32-AVX-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: lshr_truncate_mask_ucvt_4i64_4f32:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT: vmovups (%eax), %xmm0
+; X86-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],mem[0,2]
+; X86-AVX-NEXT: vpsrld $16, %xmm0, %xmm0
+; X86-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X86-AVX-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT: retl
;
; X64-SSE-LABEL: lshr_truncate_mask_ucvt_4i64_4f32:
; X64-SSE: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/uint_to_fp.ll b/llvm/test/CodeGen/X86/uint_to_fp.ll
index 97a739b53e8c..d8e0b61ed199 100644
--- a/llvm/test/CodeGen/X86/uint_to_fp.ll
+++ b/llvm/test/CodeGen/X86/uint_to_fp.ll
@@ -1,17 +1,17 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-apple-darwin8 -mattr=+sse2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i386-apple-darwin8 -mattr=+sse2 | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-apple-darwin8 -mattr=+sse2 | FileCheck %s --check-prefix=X64
; rdar://6034396
define void @test(i32 %x, ptr %y) nounwind {
-; X32-LABEL: test:
-; X32: ## %bb.0: ## %entry
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: shrl $23, %ecx
-; X32-NEXT: cvtsi2ss %ecx, %xmm0
-; X32-NEXT: movss %xmm0, (%eax)
-; X32-NEXT: retl
+; X86-LABEL: test:
+; X86: ## %bb.0: ## %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shrl $23, %ecx
+; X86-NEXT: cvtsi2ss %ecx, %xmm0
+; X86-NEXT: movss %xmm0, (%eax)
+; X86-NEXT: retl
;
; X64-LABEL: test:
; X64: ## %bb.0: ## %entry
More information about the llvm-commits
mailing list