[llvm] [X86][AVX] Fix handling of out-of-bounds shift amounts in AVX2 vector logical shift nodes #83840 (PR #86922)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Mon Apr 8 04:21:25 PDT 2024
================
@@ -606,3 +606,119 @@ define <4 x i32> @combine_vec_lshr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
%3 = lshr <4 x i32> %x, %2
ret <4 x i32> %3
}
+
+define <4 x i32> @combine_vec_lshr_clamped1(<4 x i32> %sh, <4 x i32> %amt) {
+; SSE2-LABEL: combine_vec_lshr_clamped1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: pxor %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: psrld %xmm3, %xmm4
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[0,1,1,1,4,5,6,7]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: psrld %xmm3, %xmm5
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm4[0]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: psrld %xmm3, %xmm4
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
+; SSE2-NEXT: psrld %xmm1, %xmm0
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
+; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm0[0,3]
+; SSE2-NEXT: pandn %xmm5, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_lshr_clamped1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm2 = [31,31,31,31]
+; SSE41-NEXT: pminud %xmm1, %xmm2
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm2
+; SSE41-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm4
+; SSE41-NEXT: psrld %xmm3, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm5 = xmm3[2,3,3,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm6
+; SSE41-NEXT: psrld %xmm5, %xmm6
+; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4,5,6,7]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm4
+; SSE41-NEXT: psrld %xmm1, %xmm4
+; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[0,1,1,1,4,5,6,7]
+; SSE41-NEXT: psrld %xmm1, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5],xmm6[6,7]
+; SSE41-NEXT: pand %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: combine_vec_lshr_clamped1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %cmp.i = icmp ult <4 x i32> %amt, <i32 32, i32 32, i32 32, i32 32>
----------------
RKSimon wrote:
Add variants of the icmp with commuted select ops (icmp uge)
https://github.com/llvm/llvm-project/pull/86922
More information about the llvm-commits
mailing list