[llvm] 197bfa9 - [X86] Add tests showing potential for vectorising i128/256/512 select nodes (#180173)

via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 6 04:17:07 PST 2026


Author: Simon Pilgrim
Date: 2026-02-06T12:17:01Z
New Revision: 197bfa9a74d229bb5c5574ea50d8cc30382c4a2d

URL: https://github.com/llvm/llvm-project/commit/197bfa9a74d229bb5c5574ea50d8cc30382c4a2d
DIFF: https://github.com/llvm/llvm-project/commit/197bfa9a74d229bb5c5574ea50d8cc30382c4a2d.diff

LOG: [X86] Add tests showing potential for vectorising i128/256/512 select nodes (#180173)

Added: 
    llvm/test/CodeGen/X86/select-big-integer.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/select-big-integer.ll b/llvm/test/CodeGen/X86/select-big-integer.ll
new file mode 100644
index 0000000000000..292e063e97aa9
--- /dev/null
+++ b/llvm/test/CodeGen/X86/select-big-integer.ll
@@ -0,0 +1,730 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64    | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE,SSE4
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl | FileCheck %s --check-prefixes=AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX512,AVX512VL
+
+;
+; LOGIC
+;
+
+define void @test_not_i128(ptr %p0, ptr %p1, i1 zeroext %a2, ptr %p3) nounwind {
+; SSE2-LABEL: test_not_i128:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    testl %edx, %edx
+; SSE2-NEXT:    je .LBB0_1
+; SSE2-NEXT:  # %bb.2:
+; SSE2-NEXT:    movq 8(%rdi), %rax
+; SSE2-NEXT:    movq (%rdi), %rdx
+; SSE2-NEXT:    jmp .LBB0_3
+; SSE2-NEXT:  .LBB0_1:
+; SSE2-NEXT:    pcmpeqd %xmm0, %xmm0
+; SSE2-NEXT:    pxor (%rsi), %xmm0
+; SSE2-NEXT:    movq %xmm0, %rdx
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE2-NEXT:    movq %xmm0, %rax
+; SSE2-NEXT:  .LBB0_3:
+; SSE2-NEXT:    movq %rdx, (%rcx)
+; SSE2-NEXT:    movq %rax, 8(%rcx)
+; SSE2-NEXT:    retq
+;
+; SSE4-LABEL: test_not_i128:
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    testl %edx, %edx
+; SSE4-NEXT:    je .LBB0_1
+; SSE4-NEXT:  # %bb.2:
+; SSE4-NEXT:    movq 8(%rdi), %rax
+; SSE4-NEXT:    movq (%rdi), %rdx
+; SSE4-NEXT:    jmp .LBB0_3
+; SSE4-NEXT:  .LBB0_1:
+; SSE4-NEXT:    pcmpeqd %xmm0, %xmm0
+; SSE4-NEXT:    pxor (%rsi), %xmm0
+; SSE4-NEXT:    movq %xmm0, %rdx
+; SSE4-NEXT:    pextrq $1, %xmm0, %rax
+; SSE4-NEXT:  .LBB0_3:
+; SSE4-NEXT:    movq %rdx, (%rcx)
+; SSE4-NEXT:    movq %rax, 8(%rcx)
+; SSE4-NEXT:    retq
+;
+; AVX-LABEL: test_not_i128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    testl %edx, %edx
+; AVX-NEXT:    je .LBB0_1
+; AVX-NEXT:  # %bb.2:
+; AVX-NEXT:    movq 8(%rdi), %rax
+; AVX-NEXT:    movq (%rdi), %rdx
+; AVX-NEXT:    jmp .LBB0_3
+; AVX-NEXT:  .LBB0_1:
+; AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vpxor (%rsi), %xmm0, %xmm0
+; AVX-NEXT:    vmovq %xmm0, %rdx
+; AVX-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX-NEXT:  .LBB0_3:
+; AVX-NEXT:    movq %rdx, (%rcx)
+; AVX-NEXT:    movq %rax, 8(%rcx)
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: test_not_i128:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    testl %edx, %edx
+; AVX512-NEXT:    je .LBB0_1
+; AVX512-NEXT:  # %bb.2:
+; AVX512-NEXT:    movq 8(%rdi), %rax
+; AVX512-NEXT:    movq (%rdi), %rdx
+; AVX512-NEXT:    jmp .LBB0_3
+; AVX512-NEXT:  .LBB0_1:
+; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor (%rsi), %xmm0, %xmm0
+; AVX512-NEXT:    vmovq %xmm0, %rdx
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:  .LBB0_3:
+; AVX512-NEXT:    movq %rdx, (%rcx)
+; AVX512-NEXT:    movq %rax, 8(%rcx)
+; AVX512-NEXT:    retq
+  %ld0 = load i128, ptr %p0
+  %ld1 = load i128, ptr %p1
+  %neg1 = xor i128 %ld1, -1
+  %sel = select i1 %a2, i128 %ld0, i128 %neg1
+  store i128 %sel, ptr %p3
+  ret void
+}
+
+define void @test_not_i256(ptr %p0, ptr %p1, i1 zeroext %a2, ptr %p3) nounwind {
+; SSE2-LABEL: test_not_i256:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    testl %edx, %edx
+; SSE2-NEXT:    je .LBB1_1
+; SSE2-NEXT:  # %bb.2:
+; SSE2-NEXT:    movq 8(%rdi), %rdx
+; SSE2-NEXT:    movq 24(%rdi), %rsi
+; SSE2-NEXT:    movq (%rdi), %r8
+; SSE2-NEXT:    movq 16(%rdi), %rax
+; SSE2-NEXT:    jmp .LBB1_3
+; SSE2-NEXT:  .LBB1_1:
+; SSE2-NEXT:    pcmpeqd %xmm0, %xmm0
+; SSE2-NEXT:    movdqa 16(%rsi), %xmm1
+; SSE2-NEXT:    pxor %xmm0, %xmm1
+; SSE2-NEXT:    movq %xmm1, %rax
+; SSE2-NEXT:    pxor (%rsi), %xmm0
+; SSE2-NEXT:    movq %xmm0, %r8
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; SSE2-NEXT:    movq %xmm1, %rsi
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE2-NEXT:    movq %xmm0, %rdx
+; SSE2-NEXT:  .LBB1_3:
+; SSE2-NEXT:    movq %rax, 16(%rcx)
+; SSE2-NEXT:    movq %r8, (%rcx)
+; SSE2-NEXT:    movq %rsi, 24(%rcx)
+; SSE2-NEXT:    movq %rdx, 8(%rcx)
+; SSE2-NEXT:    retq
+;
+; SSE4-LABEL: test_not_i256:
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    testl %edx, %edx
+; SSE4-NEXT:    je .LBB1_1
+; SSE4-NEXT:  # %bb.2:
+; SSE4-NEXT:    movq 8(%rdi), %rax
+; SSE4-NEXT:    movq (%rdi), %rsi
+; SSE4-NEXT:    movq 16(%rdi), %r8
+; SSE4-NEXT:    movq 24(%rdi), %rdx
+; SSE4-NEXT:    jmp .LBB1_3
+; SSE4-NEXT:  .LBB1_1:
+; SSE4-NEXT:    pcmpeqd %xmm0, %xmm0
+; SSE4-NEXT:    movdqa 16(%rsi), %xmm1
+; SSE4-NEXT:    pxor %xmm0, %xmm1
+; SSE4-NEXT:    pextrq $1, %xmm1, %rdx
+; SSE4-NEXT:    pxor (%rsi), %xmm0
+; SSE4-NEXT:    movq %xmm1, %r8
+; SSE4-NEXT:    movq %xmm0, %rsi
+; SSE4-NEXT:    pextrq $1, %xmm0, %rax
+; SSE4-NEXT:  .LBB1_3:
+; SSE4-NEXT:    movq %rdx, 24(%rcx)
+; SSE4-NEXT:    movq %r8, 16(%rcx)
+; SSE4-NEXT:    movq %rsi, (%rcx)
+; SSE4-NEXT:    movq %rax, 8(%rcx)
+; SSE4-NEXT:    retq
+;
+; AVX1-LABEL: test_not_i256:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    testl %edx, %edx
+; AVX1-NEXT:    je .LBB1_1
+; AVX1-NEXT:  # %bb.2:
+; AVX1-NEXT:    movq 24(%rdi), %rax
+; AVX1-NEXT:    movq (%rdi), %rdx
+; AVX1-NEXT:    movq 8(%rdi), %rsi
+; AVX1-NEXT:    movq 16(%rdi), %rdi
+; AVX1-NEXT:    jmp .LBB1_3
+; AVX1-NEXT:  .LBB1_1:
+; AVX1-NEXT:    vmovaps (%rsi), %xmm0
+; AVX1-NEXT:    vinsertf128 $1, 16(%rsi), %ymm0, %ymm0
+; AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vcmptrueps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT:    vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpextrq $1, %xmm0, %rsi
+; AVX1-NEXT:    vmovq %xmm1, %rdi
+; AVX1-NEXT:    vmovq %xmm0, %rdx
+; AVX1-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX1-NEXT:  .LBB1_3:
+; AVX1-NEXT:    movq %rdi, 16(%rcx)
+; AVX1-NEXT:    movq %rsi, 8(%rcx)
+; AVX1-NEXT:    movq %rdx, (%rcx)
+; AVX1-NEXT:    movq %rax, 24(%rcx)
+; AVX1-NEXT:    vzeroupper
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test_not_i256:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    testl %edx, %edx
+; AVX2-NEXT:    je .LBB1_1
+; AVX2-NEXT:  # %bb.2:
+; AVX2-NEXT:    movq 24(%rdi), %rax
+; AVX2-NEXT:    movq 16(%rdi), %rdx
+; AVX2-NEXT:    movq (%rdi), %rsi
+; AVX2-NEXT:    movq 8(%rdi), %rdi
+; AVX2-NEXT:    jmp .LBB1_3
+; AVX2-NEXT:  .LBB1_1:
+; AVX2-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor (%rsi), %ymm0, %ymm0
+; AVX2-NEXT:    vpextrq $1, %xmm0, %rdi
+; AVX2-NEXT:    vmovq %xmm0, %rsi
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT:    vmovq %xmm0, %rdx
+; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX2-NEXT:  .LBB1_3:
+; AVX2-NEXT:    movq %rdi, 8(%rcx)
+; AVX2-NEXT:    movq %rsi, (%rcx)
+; AVX2-NEXT:    movq %rdx, 16(%rcx)
+; AVX2-NEXT:    movq %rax, 24(%rcx)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: test_not_i256:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    testl %edx, %edx
+; AVX512F-NEXT:    je .LBB1_1
+; AVX512F-NEXT:  # %bb.2:
+; AVX512F-NEXT:    movq 24(%rdi), %rax
+; AVX512F-NEXT:    movq 16(%rdi), %rdx
+; AVX512F-NEXT:    movq (%rdi), %rsi
+; AVX512F-NEXT:    movq 8(%rdi), %rdi
+; AVX512F-NEXT:    jmp .LBB1_3
+; AVX512F-NEXT:  .LBB1_1:
+; AVX512F-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512F-NEXT:    vpxor (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT:    vpextrq $1, %xmm0, %rdi
+; AVX512F-NEXT:    vmovq %xmm0, %rsi
+; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512F-NEXT:    vmovq %xmm0, %rdx
+; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512F-NEXT:  .LBB1_3:
+; AVX512F-NEXT:    movq %rdi, 8(%rcx)
+; AVX512F-NEXT:    movq %rsi, (%rcx)
+; AVX512F-NEXT:    movq %rdx, 16(%rcx)
+; AVX512F-NEXT:    movq %rax, 24(%rcx)
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: test_not_i256:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    testl %edx, %edx
+; AVX512VL-NEXT:    je .LBB1_1
+; AVX512VL-NEXT:  # %bb.2:
+; AVX512VL-NEXT:    movq 24(%rdi), %rax
+; AVX512VL-NEXT:    movq 16(%rdi), %rdx
+; AVX512VL-NEXT:    movq (%rdi), %rsi
+; AVX512VL-NEXT:    movq 8(%rdi), %rdi
+; AVX512VL-NEXT:    jmp .LBB1_3
+; AVX512VL-NEXT:  .LBB1_1:
+; AVX512VL-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpxor (%rsi), %ymm0, %ymm0
+; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rdi
+; AVX512VL-NEXT:    vmovq %xmm0, %rsi
+; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512VL-NEXT:    vmovq %xmm0, %rdx
+; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512VL-NEXT:  .LBB1_3:
+; AVX512VL-NEXT:    movq %rdi, 8(%rcx)
+; AVX512VL-NEXT:    movq %rsi, (%rcx)
+; AVX512VL-NEXT:    movq %rdx, 16(%rcx)
+; AVX512VL-NEXT:    movq %rax, 24(%rcx)
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+  %ld0 = load i256, ptr %p0
+  %ld1 = load i256, ptr %p1
+  %neg1 = xor i256 %ld1, -1
+  %sel = select i1 %a2, i256 %ld0, i256 %neg1
+  store i256 %sel, ptr %p3
+  ret void
+}
+
+define void @test_not_i512(ptr %p0, ptr %p1, i1 zeroext %a2, ptr %p3) nounwind {
+; SSE2-LABEL: test_not_i512:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pushq %rbx
+; SSE2-NEXT:    testl %edx, %edx
+; SSE2-NEXT:    je .LBB2_1
+; SSE2-NEXT:  # %bb.2:
+; SSE2-NEXT:    movq 56(%rdi), %rsi
+; SSE2-NEXT:    movq 40(%rdi), %r9
+; SSE2-NEXT:    movq 24(%rdi), %r10
+; SSE2-NEXT:    movq 8(%rdi), %r11
+; SSE2-NEXT:    movq 48(%rdi), %rbx
+; SSE2-NEXT:    movq 32(%rdi), %r8
+; SSE2-NEXT:    movq 16(%rdi), %rdx
+; SSE2-NEXT:    movq (%rdi), %rax
+; SSE2-NEXT:    jmp .LBB2_3
+; SSE2-NEXT:  .LBB2_1:
+; SSE2-NEXT:    pcmpeqd %xmm0, %xmm0
+; SSE2-NEXT:    movdqa (%rsi), %xmm1
+; SSE2-NEXT:    pxor %xmm0, %xmm1
+; SSE2-NEXT:    movq %xmm1, %rax
+; SSE2-NEXT:    movdqa 16(%rsi), %xmm2
+; SSE2-NEXT:    pxor %xmm0, %xmm2
+; SSE2-NEXT:    movq %xmm2, %rdx
+; SSE2-NEXT:    movdqa 32(%rsi), %xmm3
+; SSE2-NEXT:    pxor %xmm0, %xmm3
+; SSE2-NEXT:    movq %xmm3, %r8
+; SSE2-NEXT:    pxor 48(%rsi), %xmm0
+; SSE2-NEXT:    movq %xmm0, %rbx
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; SSE2-NEXT:    movq %xmm1, %r11
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
+; SSE2-NEXT:    movq %xmm1, %r10
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
+; SSE2-NEXT:    movq %xmm1, %r9
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE2-NEXT:    movq %xmm0, %rsi
+; SSE2-NEXT:  .LBB2_3:
+; SSE2-NEXT:    movq %rax, (%rcx)
+; SSE2-NEXT:    movq %rdx, 16(%rcx)
+; SSE2-NEXT:    movq %r8, 32(%rcx)
+; SSE2-NEXT:    movq %rbx, 48(%rcx)
+; SSE2-NEXT:    movq %r11, 8(%rcx)
+; SSE2-NEXT:    movq %r10, 24(%rcx)
+; SSE2-NEXT:    movq %r9, 40(%rcx)
+; SSE2-NEXT:    movq %rsi, 56(%rcx)
+; SSE2-NEXT:    popq %rbx
+; SSE2-NEXT:    retq
+;
+; SSE4-LABEL: test_not_i512:
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    pushq %rbx
+; SSE4-NEXT:    testl %edx, %edx
+; SSE4-NEXT:    je .LBB2_1
+; SSE4-NEXT:  # %bb.2:
+; SSE4-NEXT:    movq 56(%rdi), %rsi
+; SSE4-NEXT:    movq 48(%rdi), %r10
+; SSE4-NEXT:    movq 32(%rdi), %rbx
+; SSE4-NEXT:    movq 40(%rdi), %r9
+; SSE4-NEXT:    movq 16(%rdi), %rdx
+; SSE4-NEXT:    movq 24(%rdi), %r8
+; SSE4-NEXT:    movq (%rdi), %r11
+; SSE4-NEXT:    movq 8(%rdi), %rax
+; SSE4-NEXT:    jmp .LBB2_3
+; SSE4-NEXT:  .LBB2_1:
+; SSE4-NEXT:    pcmpeqd %xmm0, %xmm0
+; SSE4-NEXT:    movdqa (%rsi), %xmm1
+; SSE4-NEXT:    pxor %xmm0, %xmm1
+; SSE4-NEXT:    pextrq $1, %xmm1, %rax
+; SSE4-NEXT:    movdqa 16(%rsi), %xmm2
+; SSE4-NEXT:    pxor %xmm0, %xmm2
+; SSE4-NEXT:    pextrq $1, %xmm2, %r8
+; SSE4-NEXT:    movq %xmm1, %r11
+; SSE4-NEXT:    movq %xmm2, %rdx
+; SSE4-NEXT:    movdqa 32(%rsi), %xmm1
+; SSE4-NEXT:    pxor %xmm0, %xmm1
+; SSE4-NEXT:    pextrq $1, %xmm1, %r9
+; SSE4-NEXT:    pxor 48(%rsi), %xmm0
+; SSE4-NEXT:    movq %xmm1, %rbx
+; SSE4-NEXT:    movq %xmm0, %r10
+; SSE4-NEXT:    pextrq $1, %xmm0, %rsi
+; SSE4-NEXT:  .LBB2_3:
+; SSE4-NEXT:    movq %rax, 8(%rcx)
+; SSE4-NEXT:    movq %r11, (%rcx)
+; SSE4-NEXT:    movq %r8, 24(%rcx)
+; SSE4-NEXT:    movq %rdx, 16(%rcx)
+; SSE4-NEXT:    movq %r9, 40(%rcx)
+; SSE4-NEXT:    movq %rbx, 32(%rcx)
+; SSE4-NEXT:    movq %r10, 48(%rcx)
+; SSE4-NEXT:    movq %rsi, 56(%rcx)
+; SSE4-NEXT:    popq %rbx
+; SSE4-NEXT:    retq
+;
+; AVX1-LABEL: test_not_i512:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    testl %edx, %edx
+; AVX1-NEXT:    je .LBB2_1
+; AVX1-NEXT:  # %bb.2:
+; AVX1-NEXT:    movq 24(%rdi), %rax
+; AVX1-NEXT:    movq 32(%rdi), %rdx
+; AVX1-NEXT:    movq 40(%rdi), %rsi
+; AVX1-NEXT:    movq 48(%rdi), %r11
+; AVX1-NEXT:    movq 56(%rdi), %r10
+; AVX1-NEXT:    movq (%rdi), %r8
+; AVX1-NEXT:    movq 8(%rdi), %r9
+; AVX1-NEXT:    movq 16(%rdi), %rdi
+; AVX1-NEXT:    jmp .LBB2_3
+; AVX1-NEXT:  .LBB2_1:
+; AVX1-NEXT:    vmovaps (%rsi), %xmm0
+; AVX1-NEXT:    vmovaps 32(%rsi), %xmm1
+; AVX1-NEXT:    vinsertf128 $1, 48(%rsi), %ymm1, %ymm1
+; AVX1-NEXT:    vinsertf128 $1, 16(%rsi), %ymm0, %ymm0
+; AVX1-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vcmptrueps %ymm2, %ymm2, %ymm2
+; AVX1-NEXT:    vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpextrq $1, %xmm0, %r9
+; AVX1-NEXT:    vmovq %xmm3, %rdi
+; AVX1-NEXT:    vmovq %xmm0, %r8
+; AVX1-NEXT:    vxorps %ymm2, %ymm1, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpextrq $1, %xmm1, %r10
+; AVX1-NEXT:    vpextrq $1, %xmm0, %rsi
+; AVX1-NEXT:    vmovq %xmm1, %r11
+; AVX1-NEXT:    vmovq %xmm0, %rdx
+; AVX1-NEXT:    vpextrq $1, %xmm3, %rax
+; AVX1-NEXT:  .LBB2_3:
+; AVX1-NEXT:    movq %rdi, 16(%rcx)
+; AVX1-NEXT:    movq %r9, 8(%rcx)
+; AVX1-NEXT:    movq %r8, (%rcx)
+; AVX1-NEXT:    movq %r10, 56(%rcx)
+; AVX1-NEXT:    movq %r11, 48(%rcx)
+; AVX1-NEXT:    movq %rsi, 40(%rcx)
+; AVX1-NEXT:    movq %rdx, 32(%rcx)
+; AVX1-NEXT:    movq %rax, 24(%rcx)
+; AVX1-NEXT:    vzeroupper
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test_not_i512:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    testl %edx, %edx
+; AVX2-NEXT:    je .LBB2_1
+; AVX2-NEXT:  # %bb.2:
+; AVX2-NEXT:    movq 24(%rdi), %rax
+; AVX2-NEXT:    movq 16(%rdi), %rdx
+; AVX2-NEXT:    movq 32(%rdi), %rsi
+; AVX2-NEXT:    movq 40(%rdi), %r8
+; AVX2-NEXT:    movq 48(%rdi), %r9
+; AVX2-NEXT:    movq 56(%rdi), %r10
+; AVX2-NEXT:    movq (%rdi), %r11
+; AVX2-NEXT:    movq 8(%rdi), %rdi
+; AVX2-NEXT:    jmp .LBB2_3
+; AVX2-NEXT:  .LBB2_1:
+; AVX2-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor (%rsi), %ymm0, %ymm1
+; AVX2-NEXT:    vpextrq $1, %xmm1, %rdi
+; AVX2-NEXT:    vpxor 32(%rsi), %ymm0, %ymm0
+; AVX2-NEXT:    vmovq %xmm1, %r11
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT:    vpextrq $1, %xmm2, %r10
+; AVX2-NEXT:    vmovq %xmm2, %r9
+; AVX2-NEXT:    vpextrq $1, %xmm0, %r8
+; AVX2-NEXT:    vmovq %xmm0, %rsi
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm0
+; AVX2-NEXT:    vmovq %xmm0, %rdx
+; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX2-NEXT:  .LBB2_3:
+; AVX2-NEXT:    movq %rdi, 8(%rcx)
+; AVX2-NEXT:    movq %r11, (%rcx)
+; AVX2-NEXT:    movq %r10, 56(%rcx)
+; AVX2-NEXT:    movq %r9, 48(%rcx)
+; AVX2-NEXT:    movq %r8, 40(%rcx)
+; AVX2-NEXT:    movq %rsi, 32(%rcx)
+; AVX2-NEXT:    movq %rdx, 16(%rcx)
+; AVX2-NEXT:    movq %rax, 24(%rcx)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: test_not_i512:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    testl %edx, %edx
+; AVX512F-NEXT:    je .LBB2_1
+; AVX512F-NEXT:  # %bb.2:
+; AVX512F-NEXT:    movq 56(%rdi), %rax
+; AVX512F-NEXT:    movq 48(%rdi), %rdx
+; AVX512F-NEXT:    movq (%rdi), %rsi
+; AVX512F-NEXT:    movq 8(%rdi), %r8
+; AVX512F-NEXT:    movq 16(%rdi), %r9
+; AVX512F-NEXT:    movq 24(%rdi), %r11
+; AVX512F-NEXT:    movq 32(%rdi), %r10
+; AVX512F-NEXT:    movq 40(%rdi), %rdi
+; AVX512F-NEXT:    jmp .LBB2_3
+; AVX512F-NEXT:  .LBB2_1:
+; AVX512F-NEXT:    vpternlogd {{.*#+}} zmm0 = -1
+; AVX512F-NEXT:    vpxorq (%rsi), %zmm0, %zmm0
+; AVX512F-NEXT:    vextracti32x4 $2, %zmm0, %xmm1
+; AVX512F-NEXT:    vpextrq $1, %xmm1, %rdi
+; AVX512F-NEXT:    vmovq %xmm1, %r10
+; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512F-NEXT:    vpextrq $1, %xmm1, %r11
+; AVX512F-NEXT:    vmovq %xmm1, %r9
+; AVX512F-NEXT:    vpextrq $1, %xmm0, %r8
+; AVX512F-NEXT:    vmovq %xmm0, %rsi
+; AVX512F-NEXT:    vextracti32x4 $3, %zmm0, %xmm0
+; AVX512F-NEXT:    vmovq %xmm0, %rdx
+; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512F-NEXT:  .LBB2_3:
+; AVX512F-NEXT:    movq %rdi, 40(%rcx)
+; AVX512F-NEXT:    movq %r10, 32(%rcx)
+; AVX512F-NEXT:    movq %r11, 24(%rcx)
+; AVX512F-NEXT:    movq %r9, 16(%rcx)
+; AVX512F-NEXT:    movq %r8, 8(%rcx)
+; AVX512F-NEXT:    movq %rsi, (%rcx)
+; AVX512F-NEXT:    movq %rdx, 48(%rcx)
+; AVX512F-NEXT:    movq %rax, 56(%rcx)
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: test_not_i512:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    testl %edx, %edx
+; AVX512VL-NEXT:    je .LBB2_1
+; AVX512VL-NEXT:  # %bb.2:
+; AVX512VL-NEXT:    movq 56(%rdi), %rax
+; AVX512VL-NEXT:    movq 48(%rdi), %rdx
+; AVX512VL-NEXT:    movq (%rdi), %rsi
+; AVX512VL-NEXT:    movq 8(%rdi), %r8
+; AVX512VL-NEXT:    movq 16(%rdi), %r11
+; AVX512VL-NEXT:    movq 24(%rdi), %r10
+; AVX512VL-NEXT:    movq 32(%rdi), %r9
+; AVX512VL-NEXT:    movq 40(%rdi), %rdi
+; AVX512VL-NEXT:    jmp .LBB2_3
+; AVX512VL-NEXT:  .LBB2_1:
+; AVX512VL-NEXT:    vpternlogd {{.*#+}} zmm0 = -1
+; AVX512VL-NEXT:    vpxorq (%rsi), %zmm0, %zmm0
+; AVX512VL-NEXT:    vextracti32x4 $2, %zmm0, %xmm1
+; AVX512VL-NEXT:    vpextrq $1, %xmm1, %rdi
+; AVX512VL-NEXT:    vmovq %xmm1, %r9
+; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512VL-NEXT:    vpextrq $1, %xmm1, %r10
+; AVX512VL-NEXT:    vpextrq $1, %xmm0, %r8
+; AVX512VL-NEXT:    vmovq %xmm1, %r11
+; AVX512VL-NEXT:    vmovq %xmm0, %rsi
+; AVX512VL-NEXT:    vextracti32x4 $3, %zmm0, %xmm0
+; AVX512VL-NEXT:    vmovq %xmm0, %rdx
+; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512VL-NEXT:  .LBB2_3:
+; AVX512VL-NEXT:    movq %rdi, 40(%rcx)
+; AVX512VL-NEXT:    movq %r9, 32(%rcx)
+; AVX512VL-NEXT:    movq %r10, 24(%rcx)
+; AVX512VL-NEXT:    movq %r11, 16(%rcx)
+; AVX512VL-NEXT:    movq %r8, 8(%rcx)
+; AVX512VL-NEXT:    movq %rsi, (%rcx)
+; AVX512VL-NEXT:    movq %rdx, 48(%rcx)
+; AVX512VL-NEXT:    movq %rax, 56(%rcx)
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+  %ld0 = load i512, ptr %p0
+  %ld1 = load i512, ptr %p1
+  %neg1 = xor i512 %ld1, -1
+  %sel = select i1 %a2, i512 %ld0, i512 %neg1
+  store i512 %sel, ptr %p3
+  ret void
+}
+
+;
+; ADD/SUB
+;
+
+define void @test_neg_i512(ptr %p0, ptr %p1, i1 zeroext %a2, ptr %p3) nounwind {
+; SSE-LABEL: test_neg_i512:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pushq %r15
+; SSE-NEXT:    pushq %r14
+; SSE-NEXT:    pushq %rbx
+; SSE-NEXT:    xorl %r8d, %r8d
+; SSE-NEXT:    xorl %r10d, %r10d
+; SSE-NEXT:    subq (%rsi), %r10
+; SSE-NEXT:    movl $0, %eax
+; SSE-NEXT:    sbbq 8(%rsi), %rax
+; SSE-NEXT:    movl $0, %ebx
+; SSE-NEXT:    sbbq 16(%rsi), %rbx
+; SSE-NEXT:    movl $0, %r9d
+; SSE-NEXT:    sbbq 24(%rsi), %r9
+; SSE-NEXT:    movl $0, %r14d
+; SSE-NEXT:    sbbq 32(%rsi), %r14
+; SSE-NEXT:    movl $0, %r11d
+; SSE-NEXT:    sbbq 40(%rsi), %r11
+; SSE-NEXT:    movl $0, %r15d
+; SSE-NEXT:    sbbq 48(%rsi), %r15
+; SSE-NEXT:    sbbq 56(%rsi), %r8
+; SSE-NEXT:    testl %edx, %edx
+; SSE-NEXT:    je .LBB3_2
+; SSE-NEXT:  # %bb.1:
+; SSE-NEXT:    movq 8(%rdi), %rax
+; SSE-NEXT:    movq 24(%rdi), %r9
+; SSE-NEXT:    movq 40(%rdi), %r11
+; SSE-NEXT:    movq 56(%rdi), %r8
+; SSE-NEXT:    movq (%rdi), %r10
+; SSE-NEXT:    movq 16(%rdi), %rbx
+; SSE-NEXT:    movq 32(%rdi), %r14
+; SSE-NEXT:    movq 48(%rdi), %r15
+; SSE-NEXT:  .LBB3_2:
+; SSE-NEXT:    movq %r15, 48(%rcx)
+; SSE-NEXT:    movq %r14, 32(%rcx)
+; SSE-NEXT:    movq %rbx, 16(%rcx)
+; SSE-NEXT:    movq %r10, (%rcx)
+; SSE-NEXT:    movq %r8, 56(%rcx)
+; SSE-NEXT:    movq %r11, 40(%rcx)
+; SSE-NEXT:    movq %r9, 24(%rcx)
+; SSE-NEXT:    movq %rax, 8(%rcx)
+; SSE-NEXT:    popq %rbx
+; SSE-NEXT:    popq %r14
+; SSE-NEXT:    popq %r15
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test_neg_i512:
+; AVX:       # %bb.0:
+; AVX-NEXT:    pushq %r15
+; AVX-NEXT:    pushq %r14
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    xorl %r8d, %r8d
+; AVX-NEXT:    xorl %r10d, %r10d
+; AVX-NEXT:    subq (%rsi), %r10
+; AVX-NEXT:    movl $0, %eax
+; AVX-NEXT:    sbbq 8(%rsi), %rax
+; AVX-NEXT:    movl $0, %ebx
+; AVX-NEXT:    sbbq 16(%rsi), %rbx
+; AVX-NEXT:    movl $0, %r9d
+; AVX-NEXT:    sbbq 24(%rsi), %r9
+; AVX-NEXT:    movl $0, %r14d
+; AVX-NEXT:    sbbq 32(%rsi), %r14
+; AVX-NEXT:    movl $0, %r11d
+; AVX-NEXT:    sbbq 40(%rsi), %r11
+; AVX-NEXT:    movl $0, %r15d
+; AVX-NEXT:    sbbq 48(%rsi), %r15
+; AVX-NEXT:    sbbq 56(%rsi), %r8
+; AVX-NEXT:    testl %edx, %edx
+; AVX-NEXT:    je .LBB3_2
+; AVX-NEXT:  # %bb.1:
+; AVX-NEXT:    movq 8(%rdi), %rax
+; AVX-NEXT:    movq 24(%rdi), %r9
+; AVX-NEXT:    movq 40(%rdi), %r11
+; AVX-NEXT:    movq 56(%rdi), %r8
+; AVX-NEXT:    movq (%rdi), %r10
+; AVX-NEXT:    movq 16(%rdi), %rbx
+; AVX-NEXT:    movq 32(%rdi), %r14
+; AVX-NEXT:    movq 48(%rdi), %r15
+; AVX-NEXT:  .LBB3_2:
+; AVX-NEXT:    movq %r15, 48(%rcx)
+; AVX-NEXT:    movq %r14, 32(%rcx)
+; AVX-NEXT:    movq %rbx, 16(%rcx)
+; AVX-NEXT:    movq %r10, (%rcx)
+; AVX-NEXT:    movq %r8, 56(%rcx)
+; AVX-NEXT:    movq %r11, 40(%rcx)
+; AVX-NEXT:    movq %r9, 24(%rcx)
+; AVX-NEXT:    movq %rax, 8(%rcx)
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    popq %r14
+; AVX-NEXT:    popq %r15
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: test_neg_i512:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    testl %edx, %edx
+; AVX512F-NEXT:    je .LBB3_1
+; AVX512F-NEXT:  # %bb.2:
+; AVX512F-NEXT:    movq 56(%rdi), %rax
+; AVX512F-NEXT:    movq 48(%rdi), %rdx
+; AVX512F-NEXT:    movq (%rdi), %rsi
+; AVX512F-NEXT:    movq 8(%rdi), %r8
+; AVX512F-NEXT:    movq 16(%rdi), %r9
+; AVX512F-NEXT:    movq 24(%rdi), %r11
+; AVX512F-NEXT:    movq 32(%rdi), %r10
+; AVX512F-NEXT:    movq 40(%rdi), %rdi
+; AVX512F-NEXT:    jmp .LBB3_3
+; AVX512F-NEXT:  .LBB3_1:
+; AVX512F-NEXT:    vmovdqu64 (%rsi), %zmm0
+; AVX512F-NEXT:    vptestmq %zmm0, %zmm0, %k0
+; AVX512F-NEXT:    kmovw %k0, %eax
+; AVX512F-NEXT:    vptestnmq %zmm0, %zmm0, %k0
+; AVX512F-NEXT:    kmovw %k0, %edx
+; AVX512F-NEXT:    movzbl %dl, %edx
+; AVX512F-NEXT:    leal (%rdx,%rax,2), %eax
+; AVX512F-NEXT:    xorl %edx, %eax
+; AVX512F-NEXT:    kmovw %eax, %k1
+; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsubq %zmm0, %zmm1, %zmm1
+; AVX512F-NEXT:    vpternlogd {{.*#+}} zmm2 = -1
+; AVX512F-NEXT:    vpxorq %zmm2, %zmm0, %zmm1 {%k1}
+; AVX512F-NEXT:    vextracti32x4 $2, %zmm1, %xmm0
+; AVX512F-NEXT:    vpextrq $1, %xmm0, %rdi
+; AVX512F-NEXT:    vmovq %xmm0, %r10
+; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm0
+; AVX512F-NEXT:    vpextrq $1, %xmm0, %r11
+; AVX512F-NEXT:    vmovq %xmm0, %r9
+; AVX512F-NEXT:    vpextrq $1, %xmm1, %r8
+; AVX512F-NEXT:    vmovq %xmm1, %rsi
+; AVX512F-NEXT:    vextracti32x4 $3, %zmm1, %xmm0
+; AVX512F-NEXT:    vmovq %xmm0, %rdx
+; AVX512F-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512F-NEXT:  .LBB3_3:
+; AVX512F-NEXT:    movq %rdi, 40(%rcx)
+; AVX512F-NEXT:    movq %r10, 32(%rcx)
+; AVX512F-NEXT:    movq %r11, 24(%rcx)
+; AVX512F-NEXT:    movq %r9, 16(%rcx)
+; AVX512F-NEXT:    movq %r8, 8(%rcx)
+; AVX512F-NEXT:    movq %rsi, (%rcx)
+; AVX512F-NEXT:    movq %rdx, 48(%rcx)
+; AVX512F-NEXT:    movq %rax, 56(%rcx)
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: test_neg_i512:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    testl %edx, %edx
+; AVX512VL-NEXT:    je .LBB3_1
+; AVX512VL-NEXT:  # %bb.2:
+; AVX512VL-NEXT:    movq 56(%rdi), %rax
+; AVX512VL-NEXT:    movq 48(%rdi), %rdx
+; AVX512VL-NEXT:    movq (%rdi), %rsi
+; AVX512VL-NEXT:    movq 8(%rdi), %r8
+; AVX512VL-NEXT:    movq 16(%rdi), %r11
+; AVX512VL-NEXT:    movq 24(%rdi), %r10
+; AVX512VL-NEXT:    movq 32(%rdi), %r9
+; AVX512VL-NEXT:    movq 40(%rdi), %rdi
+; AVX512VL-NEXT:    jmp .LBB3_3
+; AVX512VL-NEXT:  .LBB3_1:
+; AVX512VL-NEXT:    vmovdqu64 (%rsi), %zmm0
+; AVX512VL-NEXT:    vptestmq %zmm0, %zmm0, %k0
+; AVX512VL-NEXT:    kmovd %k0, %eax
+; AVX512VL-NEXT:    vptestnmq %zmm0, %zmm0, %k0
+; AVX512VL-NEXT:    kmovb %k0, %edx
+; AVX512VL-NEXT:    leal (%rdx,%rax,2), %eax
+; AVX512VL-NEXT:    xorl %edx, %eax
+; AVX512VL-NEXT:    kmovd %eax, %k1
+; AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsubq %zmm0, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpternlogd {{.*#+}} zmm2 = -1
+; AVX512VL-NEXT:    vpxorq %zmm2, %zmm0, %zmm1 {%k1}
+; AVX512VL-NEXT:    vextracti32x4 $2, %zmm1, %xmm0
+; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rdi
+; AVX512VL-NEXT:    vmovq %xmm0, %r9
+; AVX512VL-NEXT:    vextracti128 $1, %ymm1, %xmm0
+; AVX512VL-NEXT:    vpextrq $1, %xmm0, %r10
+; AVX512VL-NEXT:    vpextrq $1, %xmm1, %r8
+; AVX512VL-NEXT:    vmovq %xmm0, %r11
+; AVX512VL-NEXT:    vmovq %xmm1, %rsi
+; AVX512VL-NEXT:    vextracti32x4 $3, %zmm1, %xmm0
+; AVX512VL-NEXT:    vmovq %xmm0, %rdx
+; AVX512VL-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512VL-NEXT:  .LBB3_3:
+; AVX512VL-NEXT:    movq %rdi, 40(%rcx)
+; AVX512VL-NEXT:    movq %r9, 32(%rcx)
+; AVX512VL-NEXT:    movq %r10, 24(%rcx)
+; AVX512VL-NEXT:    movq %r11, 16(%rcx)
+; AVX512VL-NEXT:    movq %r8, 8(%rcx)
+; AVX512VL-NEXT:    movq %rsi, (%rcx)
+; AVX512VL-NEXT:    movq %rdx, 48(%rcx)
+; AVX512VL-NEXT:    movq %rax, 56(%rcx)
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+  %ld0 = load i512, ptr %p0
+  %ld1 = load i512, ptr %p1
+  %neg1 = sub i512 0, %ld1
+  %sel = select i1 %a2, i512 %ld0, i512 %neg1
+  store i512 %sel, ptr %p3
+  ret void
+}
+
+


        


More information about the llvm-commits mailing list