[llvm] r308136 - [X86] Add BMI2 scheduling tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sun Jul 16 07:09:15 PDT 2017


Author: rksimon
Date: Sun Jul 16 07:09:15 2017
New Revision: 308136

URL: http://llvm.org/viewvc/llvm-project?rev=308136&view=rev
Log:
[X86] Add BMI2 scheduling tests

Added:
    llvm/trunk/test/CodeGen/X86/bmi2-schedule.ll

Added: llvm/trunk/test/CodeGen/X86/bmi2-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bmi2-schedule.ll?rev=308136&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bmi2-schedule.ll (added)
+++ llvm/trunk/test/CodeGen/X86/bmi2-schedule.ll Sun Jul 16 07:09:15 2017
@@ -0,0 +1,180 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mattr=+bmi2  | FileCheck %s --check-prefix=CHECK --check-prefix=GENERIC
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=knl     | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1  | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1
+
+define i32 @test_bzhi_i32(i32 %a0, i32 %a1, i32 *%a2) {
+; GENERIC-LABEL: test_bzhi_i32:
+; GENERIC:       # BB#0:
+; GENERIC-NEXT:    bzhil %edi, (%rdx), %ecx
+; GENERIC-NEXT:    bzhil %edi, %esi, %eax
+; GENERIC-NEXT:    addl %ecx, %eax
+; GENERIC-NEXT:    retq
+;
+; HASWELL-LABEL: test_bzhi_i32:
+; HASWELL:       # BB#0:
+; HASWELL-NEXT:    bzhil %edi, (%rdx), %ecx # sched: [4:0.50]
+; HASWELL-NEXT:    bzhil %edi, %esi, %eax # sched: [1:0.50]
+; HASWELL-NEXT:    addl %ecx, %eax # sched: [1:0.25]
+; HASWELL-NEXT:    retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_bzhi_i32:
+; ZNVER1:       # BB#0:
+; ZNVER1-NEXT:    bzhil %edi, (%rdx), %ecx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    bzhil %edi, %esi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.50]
+; ZNVER1-NEXT:    retq # sched: [4:1.00]
+  %1 = load i32, i32 *%a2
+  %2 = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %1, i32 %a0)
+  %3 = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %a1, i32 %a0)
+  %4 = add i32 %2, %3
+  ret i32 %4
+}
+declare i32 @llvm.x86.bmi.bzhi.32(i32, i32)
+
+define i64 @test_bzhi_i64(i64 %a0, i64 %a1, i64 *%a2) {
+; GENERIC-LABEL: test_bzhi_i64:
+; GENERIC:       # BB#0:
+; GENERIC-NEXT:    bzhiq %rdi, (%rdx), %rcx
+; GENERIC-NEXT:    bzhiq %rdi, %rsi, %rax
+; GENERIC-NEXT:    addq %rcx, %rax
+; GENERIC-NEXT:    retq
+;
+; HASWELL-LABEL: test_bzhi_i64:
+; HASWELL:       # BB#0:
+; HASWELL-NEXT:    bzhiq %rdi, (%rdx), %rcx # sched: [4:0.50]
+; HASWELL-NEXT:    bzhiq %rdi, %rsi, %rax # sched: [1:0.50]
+; HASWELL-NEXT:    addq %rcx, %rax # sched: [1:0.25]
+; HASWELL-NEXT:    retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_bzhi_i64:
+; ZNVER1:       # BB#0:
+; ZNVER1-NEXT:    bzhiq %rdi, (%rdx), %rcx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    bzhiq %rdi, %rsi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.50]
+; ZNVER1-NEXT:    retq # sched: [4:1.00]
+  %1 = load i64, i64 *%a2
+  %2 = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %1, i64 %a0)
+  %3 = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %a1, i64 %a0)
+  %4 = add i64 %2, %3
+  ret i64 %4
+}
+declare i64 @llvm.x86.bmi.bzhi.64(i64, i64)
+
+define i32 @test_pdep_i32(i32 %a0, i32 %a1, i32 *%a2) {
+; GENERIC-LABEL: test_pdep_i32:
+; GENERIC:       # BB#0:
+; GENERIC-NEXT:    pdepl (%rdx), %edi, %ecx
+; GENERIC-NEXT:    pdepl %esi, %edi, %eax
+; GENERIC-NEXT:    addl %ecx, %eax
+; GENERIC-NEXT:    retq
+;
+; HASWELL-LABEL: test_pdep_i32:
+; HASWELL:       # BB#0:
+; HASWELL-NEXT:    pdepl (%rdx), %edi, %ecx # sched: [7:1.00]
+; HASWELL-NEXT:    pdepl %esi, %edi, %eax # sched: [3:1.00]
+; HASWELL-NEXT:    addl %ecx, %eax # sched: [1:0.25]
+; HASWELL-NEXT:    retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_pdep_i32:
+; ZNVER1:       # BB#0:
+; ZNVER1-NEXT:    pdepl (%rdx), %edi, %ecx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    pdepl %esi, %edi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.50]
+; ZNVER1-NEXT:    retq # sched: [4:1.00]
+  %1 = load i32, i32 *%a2
+  %2 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %a0, i32 %1)
+  %3 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %a0, i32 %a1)
+  %4 = add i32 %2, %3
+  ret i32 %4
+}
+declare i32 @llvm.x86.bmi.pdep.32(i32, i32)
+
+define i64 @test_pdep_i64(i64 %a0, i64 %a1, i64 *%a2) {
+; GENERIC-LABEL: test_pdep_i64:
+; GENERIC:       # BB#0:
+; GENERIC-NEXT:    pdepq (%rdx), %rdi, %rcx
+; GENERIC-NEXT:    pdepq %rsi, %rdi, %rax
+; GENERIC-NEXT:    addq %rcx, %rax
+; GENERIC-NEXT:    retq
+;
+; HASWELL-LABEL: test_pdep_i64:
+; HASWELL:       # BB#0:
+; HASWELL-NEXT:    pdepq (%rdx), %rdi, %rcx # sched: [7:1.00]
+; HASWELL-NEXT:    pdepq %rsi, %rdi, %rax # sched: [3:1.00]
+; HASWELL-NEXT:    addq %rcx, %rax # sched: [1:0.25]
+; HASWELL-NEXT:    retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_pdep_i64:
+; ZNVER1:       # BB#0:
+; ZNVER1-NEXT:    pdepq (%rdx), %rdi, %rcx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    pdepq %rsi, %rdi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.50]
+; ZNVER1-NEXT:    retq # sched: [4:1.00]
+  %1 = load i64, i64 *%a2
+  %2 = tail call i64 @llvm.x86.bmi.pdep.64(i64 %a0, i64 %1)
+  %3 = tail call i64 @llvm.x86.bmi.pdep.64(i64 %a0, i64 %a1)
+  %4 = add i64 %2, %3
+  ret i64 %4
+}
+declare i64 @llvm.x86.bmi.pdep.64(i64, i64)
+
+define i32 @test_pext_i32(i32 %a0, i32 %a1, i32 *%a2) {
+; GENERIC-LABEL: test_pext_i32:
+; GENERIC:       # BB#0:
+; GENERIC-NEXT:    pextl (%rdx), %edi, %ecx
+; GENERIC-NEXT:    pextl %esi, %edi, %eax
+; GENERIC-NEXT:    addl %ecx, %eax
+; GENERIC-NEXT:    retq
+;
+; HASWELL-LABEL: test_pext_i32:
+; HASWELL:       # BB#0:
+; HASWELL-NEXT:    pextl (%rdx), %edi, %ecx # sched: [7:1.00]
+; HASWELL-NEXT:    pextl %esi, %edi, %eax # sched: [3:1.00]
+; HASWELL-NEXT:    addl %ecx, %eax # sched: [1:0.25]
+; HASWELL-NEXT:    retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_pext_i32:
+; ZNVER1:       # BB#0:
+; ZNVER1-NEXT:    pextl (%rdx), %edi, %ecx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    pextl %esi, %edi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.50]
+; ZNVER1-NEXT:    retq # sched: [4:1.00]
+  %1 = load i32, i32 *%a2
+  %2 = tail call i32 @llvm.x86.bmi.pext.32(i32 %a0, i32 %1)
+  %3 = tail call i32 @llvm.x86.bmi.pext.32(i32 %a0, i32 %a1)
+  %4 = add i32 %2, %3
+  ret i32 %4
+}
+declare i32 @llvm.x86.bmi.pext.32(i32, i32)
+
+define i64 @test_pext_i64(i64 %a0, i64 %a1, i64 *%a2) {
+; GENERIC-LABEL: test_pext_i64:
+; GENERIC:       # BB#0:
+; GENERIC-NEXT:    pextq (%rdx), %rdi, %rcx
+; GENERIC-NEXT:    pextq %rsi, %rdi, %rax
+; GENERIC-NEXT:    addq %rcx, %rax
+; GENERIC-NEXT:    retq
+;
+; HASWELL-LABEL: test_pext_i64:
+; HASWELL:       # BB#0:
+; HASWELL-NEXT:    pextq (%rdx), %rdi, %rcx # sched: [7:1.00]
+; HASWELL-NEXT:    pextq %rsi, %rdi, %rax # sched: [3:1.00]
+; HASWELL-NEXT:    addq %rcx, %rax # sched: [1:0.25]
+; HASWELL-NEXT:    retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_pext_i64:
+; ZNVER1:       # BB#0:
+; ZNVER1-NEXT:    pextq (%rdx), %rdi, %rcx # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    pextq %rsi, %rdi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.50]
+; ZNVER1-NEXT:    retq # sched: [4:1.00]
+  %1 = load i64, i64 *%a2
+  %2 = tail call i64 @llvm.x86.bmi.pext.64(i64 %a0, i64 %1)
+  %3 = tail call i64 @llvm.x86.bmi.pext.64(i64 %a0, i64 %a1)
+  %4 = add i64 %2, %3
+  ret i64 %4
+}
+declare i64 @llvm.x86.bmi.pext.64(i64, i64)




More information about the llvm-commits mailing list