[llvm] [X86] Allow handling of i128/256/512 AND/OR/XOR bitlogic on the FPU (PR #171616)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 11 01:06:59 PST 2025


================
@@ -5679,29 +5679,56 @@ define void @vec512_v2f64(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 }
 
 define void @vec512_v2i128(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.ptr) nounwind {
-; ALL-LABEL: vec512_v2i128:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movq 16(%rdi), %rax
-; ALL-NEXT:    movq 24(%rdi), %rcx
-; ALL-NEXT:    movq (%rdi), %r8
-; ALL-NEXT:    movq 8(%rdi), %rdi
-; ALL-NEXT:    notq %rdi
-; ALL-NEXT:    notq %r8
-; ALL-NEXT:    notq %rcx
-; ALL-NEXT:    notq %rax
-; ALL-NEXT:    movq %rax, 16(%rsi)
-; ALL-NEXT:    movq %rcx, 24(%rsi)
-; ALL-NEXT:    movq %r8, (%rsi)
-; ALL-NEXT:    movq %rdi, 8(%rsi)
-; ALL-NEXT:    movq %rax, 16(%rdx)
-; ALL-NEXT:    movq %rcx, 24(%rdx)
-; ALL-NEXT:    movq %r8, (%rdx)
-; ALL-NEXT:    movq %rdi, 8(%rdx)
-; ALL-NEXT:    movq %rax, 48(%rdx)
-; ALL-NEXT:    movq %rcx, 56(%rdx)
-; ALL-NEXT:    movq %r8, 32(%rdx)
-; ALL-NEXT:    movq %rdi, 40(%rdx)
-; ALL-NEXT:    retq
+; SCALAR-LABEL: vec512_v2i128:
+; SCALAR:       # %bb.0:
+; SCALAR-NEXT:    movq 16(%rdi), %rax
+; SCALAR-NEXT:    movq 24(%rdi), %rcx
+; SCALAR-NEXT:    movq (%rdi), %r8
+; SCALAR-NEXT:    movq 8(%rdi), %rdi
+; SCALAR-NEXT:    notq %rdi
+; SCALAR-NEXT:    notq %r8
+; SCALAR-NEXT:    notq %rcx
+; SCALAR-NEXT:    notq %rax
+; SCALAR-NEXT:    movq %rax, 16(%rsi)
+; SCALAR-NEXT:    movq %rcx, 24(%rsi)
+; SCALAR-NEXT:    movq %r8, (%rsi)
+; SCALAR-NEXT:    movq %rdi, 8(%rsi)
+; SCALAR-NEXT:    movq %rax, 16(%rdx)
+; SCALAR-NEXT:    movq %rcx, 24(%rdx)
+; SCALAR-NEXT:    movq %r8, (%rdx)
+; SCALAR-NEXT:    movq %rdi, 8(%rdx)
+; SCALAR-NEXT:    movq %rax, 48(%rdx)
+; SCALAR-NEXT:    movq %rcx, 56(%rdx)
+; SCALAR-NEXT:    movq %r8, 32(%rdx)
+; SCALAR-NEXT:    movq %rdi, 40(%rdx)
+; SCALAR-NEXT:    retq
+;
+; SSE2-LABEL: vec512_v2i128:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pcmpeqd %xmm0, %xmm0
+; SSE2-NEXT:    movdqa (%rdi), %xmm1
+; SSE2-NEXT:    pxor %xmm0, %xmm1
+; SSE2-NEXT:    pxor 16(%rdi), %xmm0
+; SSE2-NEXT:    movdqa %xmm0, 16(%rsi)
+; SSE2-NEXT:    movdqa %xmm1, (%rsi)
+; SSE2-NEXT:    movdqa %xmm0, 16(%rdx)
+; SSE2-NEXT:    movdqa %xmm1, (%rdx)
+; SSE2-NEXT:    movdqa %xmm0, 48(%rdx)
+; SSE2-NEXT:    movdqa %xmm1, 32(%rdx)
+; SSE2-NEXT:    retq
+;
+; AVX-LABEL: vec512_v2i128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vpxor (%rdi), %xmm0, %xmm1
+; AVX-NEXT:    vpxor 16(%rdi), %xmm0, %xmm0
+; AVX-NEXT:    vmovdqa %xmm0, 16(%rsi)
+; AVX-NEXT:    vmovdqa %xmm1, (%rsi)
+; AVX-NEXT:    vmovdqa %xmm0, 16(%rdx)
+; AVX-NEXT:    vmovdqa %xmm1, (%rdx)
+; AVX-NEXT:    vmovdqa %xmm0, 48(%rdx)
+; AVX-NEXT:    vmovdqa %xmm1, 32(%rdx)
----------------
RKSimon wrote:

It'd involve adding a subvector splat which store combining won't attempt

https://github.com/llvm/llvm-project/pull/171616


More information about the llvm-commits mailing list