[llvm] [X86] Add EltsFromConsecutiveLoads test for infinite loop if we match reverse(vzload(ptr)) patterns (PR #170889)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 5 09:32:11 PST 2025


https://github.com/RKSimon created https://github.com/llvm/llvm-project/pull/170889

This was fixed by #170852 - previously we're get stuck in a loop: shuffle(movq(p),undef,1,0) -> bulid_vector(0, p[0]) -> shuffle(movq(p),1,0) .....

>From 125680eee34415a56158f1ca9869932cc140cf57 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Fri, 5 Dec 2025 17:31:20 +0000
Subject: [PATCH] [X86] Add EltsFromConsecutiveLoads test for infinite loop if
 we match reverse(vzload(ptr)) patterns

This was fixed by #170852 - previously we're get stuck in a loop: shuffle(movq(p),undef,1,0) -> bulid_vector(0, p[0]) -> shuffle(movq(p),1,0) .....
---
 .../X86/merge-consecutive-loads-128.ll        | 93 ++++++++++++++++++-
 1 file changed, 90 insertions(+), 3 deletions(-)

diff --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll
index b6aae486dc315..d8be4cfd27238 100644
--- a/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll
+++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512
 ;
 ; 32-bit SSE tests to make sure we do reasonable things.
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse | FileCheck %s --check-prefixes=X86-SSE,X86-SSE1
@@ -1637,3 +1637,90 @@ define <4 x i32> @load_i32_zext_i128_v4i32(ptr %ptr) {
   %3 = bitcast i128 %2 to <4 x i32>
   ret <4 x i32> %3
 }
+
+; Don't attempt to reverse a partial VZEXT_LOAD
+define <4 x i32> @no_reverse_vzload(ptr %p0) nounwind {
+; SSE2-LABEL: no_reverse_vzload:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    paddd %xmm1, %xmm1
+; SSE2-NEXT:    punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
+; SSE2-NEXT:    pcmpgtd %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: no_reverse_vzload:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
+; SSE41-NEXT:    pxor %xmm2, %xmm2
+; SSE41-NEXT:    paddd %xmm1, %xmm1
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: no_reverse_vzload:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm0, %xmm0, %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: no_reverse_vzload:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; AVX2-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: no_reverse_vzload:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; AVX512-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX512-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
+; X86-SSE1-LABEL: no_reverse_vzload:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    pushl %ebx
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-SSE1-NEXT:    xorl %ecx, %ecx
+; X86-SSE1-NEXT:    cmpl $0, (%edx)
+; X86-SSE1-NEXT:    setg %cl
+; X86-SSE1-NEXT:    negl %ecx
+; X86-SSE1-NEXT:    xorl %ebx, %ebx
+; X86-SSE1-NEXT:    cmpl $0, 4(%edx)
+; X86-SSE1-NEXT:    setg %bl
+; X86-SSE1-NEXT:    negl %ebx
+; X86-SSE1-NEXT:    movl %ebx, 4(%eax)
+; X86-SSE1-NEXT:    movl %ecx, (%eax)
+; X86-SSE1-NEXT:    movl $0, 12(%eax)
+; X86-SSE1-NEXT:    movl $0, 8(%eax)
+; X86-SSE1-NEXT:    popl %ebx
+; X86-SSE1-NEXT:    retl $4
+;
+; X86-SSE41-LABEL: no_reverse_vzload:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE41-NEXT:    movddup {{.*#+}} xmm1 = xmm0[0,0]
+; X86-SSE41-NEXT:    pxor %xmm2, %xmm2
+; X86-SSE41-NEXT:    paddd %xmm1, %xmm1
+; X86-SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; X86-SSE41-NEXT:    pcmpgtd %xmm1, %xmm0
+; X86-SSE41-NEXT:    retl
+  %i0 = load <2 x i32>, ptr %p0, align 4
+  %i1 = shufflevector <2 x i32> %i0, <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+  %i2 = shufflevector <4 x i32> zeroinitializer, <4 x i32> %i1, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+  %i3 = shl <4 x i32> %i2, <i32 4, i32 4, i32 1, i32 1>
+  %i4 = shufflevector <4 x i32> %i1, <4 x i32> poison, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+  %i5 = icmp slt <4 x i32> %i3, %i4
+  %i6 = sext <4 x i1> %i5 to <4 x i32>
+  ret <4 x i32> %i6
+}



More information about the llvm-commits mailing list