[llvm] b697b80 - [X86] freeze-vector.ll - fix cut+pasta typo in frozen build vector tests
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 1 09:37:06 PDT 2025
Author: Simon Pilgrim
Date: 2025-07-01T17:36:43+01:00
New Revision: b697b801b13d94aa88f19a9f7dc7197b1f658186
URL: https://github.com/llvm/llvm-project/commit/b697b801b13d94aa88f19a9f7dc7197b1f658186
DIFF: https://github.com/llvm/llvm-project/commit/b697b801b13d94aa88f19a9f7dc7197b1f658186.diff
LOG: [X86] freeze-vector.ll - fix cut+pasta typo in frozen build vector tests
Ensure we load BOTH scalars, inserted into different positions into separate vectors with the freeze(poison) base
Noticed while triaging regressions in #145939
Added:
Modified:
llvm/test/CodeGen/X86/freeze-vector.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/freeze-vector.ll b/llvm/test/CodeGen/X86/freeze-vector.ll
index 362b3b945f962..953a5e7285fe4 100644
--- a/llvm/test/CodeGen/X86/freeze-vector.ll
+++ b/llvm/test/CodeGen/X86/freeze-vector.ll
@@ -342,42 +342,50 @@ define void @freeze_buildvector_single_repeated_maybe_poison_operand(ptr %origin
define void @freeze_two_frozen_buildvectors(ptr %origin0, ptr %origin1, ptr %dst0, ptr %dst1) nounwind {
; X86-LABEL: freeze_two_frozen_buildvectors:
; X86: # %bb.0:
+; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl (%esi), %esi
+; X86-NEXT: andl $15, %esi
; X86-NEXT: movl (%edx), %edx
; X86-NEXT: andl $15, %edx
-; X86-NEXT: vpinsrd $1, %edx, %xmm0, %xmm0
-; X86-NEXT: vbroadcastss {{.*#+}} xmm1 = [7,7,7,7]
-; X86-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X86-NEXT: vmovd %esi, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
+; X86-NEXT: vbroadcastss {{.*#+}} xmm2 = [7,7,7,7]
+; X86-NEXT: vpand %xmm2, %xmm0, %xmm0
; X86-NEXT: vmovdqa %xmm0, (%ecx)
; X86-NEXT: vmovd %edx, %xmm0
; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; X86-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5],xmm2[6,7]
-; X86-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7]
+; X86-NEXT: vpand %xmm2, %xmm0, %xmm0
; X86-NEXT: vmovdqa %xmm0, (%eax)
+; X86-NEXT: popl %esi
; X86-NEXT: retl
;
; X64-LABEL: freeze_two_frozen_buildvectors:
; X64: # %bb.0:
-; X64-NEXT: movl (%rdi), %eax
-; X64-NEXT: andl $15, %eax
-; X64-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
-; X64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [7,7,7,7]
-; X64-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-NEXT: movl (%rsi), %eax
+; X64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: vpbroadcastd %xmm0, %xmm0
+; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X64-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; X64-NEXT: vpbroadcastd {{.*#+}} xmm2 = [7,7,7,7]
+; X64-NEXT: vpand %xmm2, %xmm0, %xmm0
; X64-NEXT: vmovdqa %xmm0, (%rdx)
; X64-NEXT: vmovd %eax, %xmm0
; X64-NEXT: vpbroadcastd %xmm0, %xmm0
-; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; X64-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3]
-; X64-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3]
+; X64-NEXT: vpand %xmm2, %xmm0, %xmm0
; X64-NEXT: vmovdqa %xmm0, (%rcx)
; X64-NEXT: retq
%i0.src = load i32, ptr %origin0
%i0 = and i32 %i0.src, 15
%i1.src = load i32, ptr %origin1
- %i1 = and i32 %i0.src, 15
+ %i1 = and i32 %i1.src, 15
%i2 = insertelement <4 x i32> poison, i32 %i0, i64 1
%i3 = and <4 x i32> %i2, <i32 7, i32 7, i32 7, i32 7>
%i4 = freeze <4 x i32> %i3
@@ -392,41 +400,43 @@ define void @freeze_two_frozen_buildvectors(ptr %origin0, ptr %origin1, ptr %dst
define void @freeze_two_buildvectors_only_one_frozen(ptr %origin0, ptr %origin1, ptr %dst0, ptr %dst1) nounwind {
; X86-LABEL: freeze_two_buildvectors_only_one_frozen:
; X86: # %bb.0:
+; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movl (%edx), %edx
-; X86-NEXT: andl $15, %edx
-; X86-NEXT: vpxor %xmm0, %xmm0, %xmm0
-; X86-NEXT: vmovd %edx, %xmm1
-; X86-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,0,1,1]
-; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5,6,7]
-; X86-NEXT: vbroadcastss {{.*#+}} xmm2 = [7,7,7,7]
-; X86-NEXT: vpand %xmm2, %xmm0, %xmm0
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl (%esi), %esi
+; X86-NEXT: andl $15, %esi
+; X86-NEXT: vmovd %esi, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
+; X86-NEXT: vbroadcastss {{.*#+}} xmm1 = [7,7,7,7]
+; X86-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X86-NEXT: vbroadcastss (%edx), %xmm2
; X86-NEXT: vmovdqa %xmm0, (%ecx)
-; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[0,1,0,1]
-; X86-NEXT: vpand %xmm2, %xmm0, %xmm0
+; X86-NEXT: vpand %xmm1, %xmm2, %xmm0
; X86-NEXT: vmovdqa %xmm0, (%eax)
+; X86-NEXT: popl %esi
; X86-NEXT: retl
;
; X64-LABEL: freeze_two_buildvectors_only_one_frozen:
; X64: # %bb.0:
-; X64-NEXT: movl (%rdi), %eax
-; X64-NEXT: andl $15, %eax
-; X64-NEXT: vpxor %xmm0, %xmm0, %xmm0
-; X64-NEXT: vmovd %eax, %xmm1
-; X64-NEXT: vpbroadcastd %xmm1, %xmm1
-; X64-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
-; X64-NEXT: vpbroadcastd {{.*#+}} xmm2 = [7,7,7,7]
-; X64-NEXT: vpand %xmm2, %xmm0, %xmm0
-; X64-NEXT: vmovdqa %xmm0, (%rdx)
-; X64-NEXT: vpand %xmm2, %xmm1, %xmm0
-; X64-NEXT: vmovdqa %xmm0, (%rcx)
+; X64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: vbroadcastss %xmm0, %xmm0
+; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; X64-NEXT: vbroadcastss {{.*#+}} xmm1 = [7,7,7,7]
+; X64-NEXT: vandps %xmm1, %xmm0, %xmm0
+; X64-NEXT: vbroadcastss (%rsi), %xmm2
+; X64-NEXT: vmovaps %xmm0, (%rdx)
+; X64-NEXT: vandps %xmm1, %xmm2, %xmm0
+; X64-NEXT: vmovaps %xmm0, (%rcx)
; X64-NEXT: retq
%i0.src = load i32, ptr %origin0
%i0 = and i32 %i0.src, 15
%i1.src = load i32, ptr %origin1
- %i1 = and i32 %i0.src, 15
+ %i1 = and i32 %i1.src, 15
%i2 = insertelement <4 x i32> poison, i32 %i0, i64 1
%i3 = and <4 x i32> %i2, <i32 7, i32 7, i32 7, i32 7>
%i4 = freeze <4 x i32> %i3
@@ -440,34 +450,40 @@ define void @freeze_two_buildvectors_only_one_frozen(ptr %origin0, ptr %origin1,
define void @freeze_two_buildvectors_one_undef_elt(ptr %origin0, ptr %origin1, ptr %dst0, ptr %dst1) nounwind {
; X86-LABEL: freeze_two_buildvectors_one_undef_elt:
; X86: # %bb.0:
+; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movl (%edx), %edx
-; X86-NEXT: andl $15, %edx
-; X86-NEXT: vmovddup {{.*#+}} xmm0 = [7,0,7,0]
-; X86-NEXT: # xmm0 = mem[0,0]
-; X86-NEXT: vmovd %edx, %xmm1
-; X86-NEXT: vpand %xmm0, %xmm1, %xmm2
-; X86-NEXT: vmovdqa %xmm2, (%ecx)
-; X86-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
-; X86-NEXT: vpand %xmm0, %xmm1, %xmm0
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl (%esi), %esi
+; X86-NEXT: andl $15, %esi
+; X86-NEXT: vmovd %esi, %xmm0
+; X86-NEXT: vmovddup {{.*#+}} xmm1 = [7,0,7,0]
+; X86-NEXT: # xmm1 = mem[0,0]
+; X86-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X86-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
+; X86-NEXT: vmovdqa %xmm0, (%ecx)
+; X86-NEXT: vpand %xmm1, %xmm2, %xmm0
; X86-NEXT: vmovdqa %xmm0, (%eax)
+; X86-NEXT: popl %esi
; X86-NEXT: retl
;
; X64-LABEL: freeze_two_buildvectors_one_undef_elt:
; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: andl $15, %eax
; X64-NEXT: vmovd %eax, %xmm0
-; X64-NEXT: vpbroadcastd %xmm0, %xmm0
-; X64-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vpmovsxbq {{.*#+}} xmm1 = [7,7]
+; X64-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpbroadcastq (%rsi), %xmm2
; X64-NEXT: vmovdqa %xmm0, (%rdx)
+; X64-NEXT: vpand %xmm1, %xmm2, %xmm0
; X64-NEXT: vmovdqa %xmm0, (%rcx)
; X64-NEXT: retq
%i0.src = load i64, ptr %origin0
%i0 = and i64 %i0.src, 15
%i1.src = load i64, ptr %origin1
- %i1 = and i64 %i0.src, 15
+ %i1 = and i64 %i1.src, 15
%i2 = insertelement <2 x i64> poison, i64 %i0, i64 0
%i3 = and <2 x i64> %i2, <i64 7, i64 7>
%i4 = freeze <2 x i64> %i3
More information about the llvm-commits
mailing list