[llvm] r273087 - [X86][TBM] Added fast-isel tests matching tools/clang/test/CodeGen/tbm-builtins.c
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sat Jun 18 10:20:53 PDT 2016
Author: rksimon
Date: Sat Jun 18 12:20:52 2016
New Revision: 273087
URL: http://llvm.org/viewvc/llvm-project?rev=273087&view=rev
Log:
[X86][TBM] Added fast-isel tests matching tools/clang/test/CodeGen/tbm-builtins.c
Added:
llvm/trunk/test/CodeGen/X86/tbm-intrinsics-fast-isel-x86_64.ll
llvm/trunk/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll
Added: llvm/trunk/test/CodeGen/X86/tbm-intrinsics-fast-isel-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/tbm-intrinsics-fast-isel-x86_64.ll?rev=273087&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/tbm-intrinsics-fast-isel-x86_64.ll (added)
+++ llvm/trunk/test/CodeGen/X86/tbm-intrinsics-fast-isel-x86_64.ll Sat Jun 18 12:20:52 2016
@@ -0,0 +1,133 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+tbm | FileCheck %s --check-prefix=X64
+
+; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/tbm-builtins.c
+
+define i64 @test__bextri_u64(i64 %a0) {
+; X64-LABEL: test__bextri_u64:
+; X64: # BB#0:
+; X64-NEXT: bextr $1, %rdi, %rax
+; X64-NEXT: retq
+ %1 = call i64 @llvm.x86.tbm.bextri.u64(i64 %a0, i64 1)
+ ret i64 %1
+}
+
+define i64 @test__blcfill_u64(i64 %a0) {
+; X64-LABEL: test__blcfill_u64:
+; X64: # BB#0:
+; X64-NEXT: leaq 1(%rdi), %rax
+; X64-NEXT: andq %rdi, %rax
+; X64-NEXT: retq
+ %1 = add i64 %a0, 1
+ %2 = and i64 %a0, %1
+ ret i64 %2
+}
+
+define i64 @test__blci_u64(i64 %a0) {
+; X64-LABEL: test__blci_u64:
+; X64: # BB#0:
+; X64-NEXT: leaq 1(%rdi), %rax
+; X64-NEXT: xorq $-1, %rax
+; X64-NEXT: orq %rdi, %rax
+; X64-NEXT: retq
+ %1 = add i64 %a0, 1
+ %2 = xor i64 %1, -1
+ %3 = or i64 %a0, %2
+ ret i64 %3
+}
+
+define i64 @test__blcic_u64(i64 %a0) {
+; X64-LABEL: test__blcic_u64:
+; X64: # BB#0:
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: xorq $-1, %rax
+; X64-NEXT: addq $1, %rdi
+; X64-NEXT: andq %rax, %rdi
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: retq
+ %1 = xor i64 %a0, -1
+ %2 = add i64 %a0, 1
+ %3 = and i64 %1, %2
+ ret i64 %3
+}
+
+define i64 @test__blcmsk_u64(i64 %a0) {
+; X64-LABEL: test__blcmsk_u64:
+; X64: # BB#0:
+; X64-NEXT: leaq 1(%rdi), %rax
+; X64-NEXT: xorq %rdi, %rax
+; X64-NEXT: retq
+ %1 = add i64 %a0, 1
+ %2 = xor i64 %a0, %1
+ ret i64 %2
+}
+
+define i64 @test__blcs_u64(i64 %a0) {
+; X64-LABEL: test__blcs_u64:
+; X64: # BB#0:
+; X64-NEXT: leaq 1(%rdi), %rax
+; X64-NEXT: orq %rdi, %rax
+; X64-NEXT: retq
+ %1 = add i64 %a0, 1
+ %2 = or i64 %a0, %1
+ ret i64 %2
+}
+
+define i64 @test__blsfill_u64(i64 %a0) {
+; X64-LABEL: test__blsfill_u64:
+; X64: # BB#0:
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: subq $1, %rax
+; X64-NEXT: orq %rdi, %rax
+; X64-NEXT: retq
+ %1 = sub i64 %a0, 1
+ %2 = or i64 %a0, %1
+ ret i64 %2
+}
+
+define i64 @test__blsic_u64(i64 %a0) {
+; X64-LABEL: test__blsic_u64:
+; X64: # BB#0:
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: xorq $-1, %rax
+; X64-NEXT: subq $1, %rdi
+; X64-NEXT: orq %rax, %rdi
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: retq
+ %1 = xor i64 %a0, -1
+ %2 = sub i64 %a0, 1
+ %3 = or i64 %1, %2
+ ret i64 %3
+}
+
+define i64 @test__t1mskc_u64(i64 %a0) {
+; X64-LABEL: test__t1mskc_u64:
+; X64: # BB#0:
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: xorq $-1, %rax
+; X64-NEXT: addq $1, %rdi
+; X64-NEXT: orq %rax, %rdi
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: retq
+ %1 = xor i64 %a0, -1
+ %2 = add i64 %a0, 1
+ %3 = or i64 %1, %2
+ ret i64 %3
+}
+
+define i64 @test__tzmsk_u64(i64 %a0) {
+; X64-LABEL: test__tzmsk_u64:
+; X64: # BB#0:
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: xorq $-1, %rax
+; X64-NEXT: subq $1, %rdi
+; X64-NEXT: andq %rax, %rdi
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: retq
+ %1 = xor i64 %a0, -1
+ %2 = sub i64 %a0, 1
+ %3 = and i64 %1, %2
+ ret i64 %3
+}
+
+declare i64 @llvm.x86.tbm.bextri.u64(i64, i64)
Added: llvm/trunk/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll?rev=273087&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll (added)
+++ llvm/trunk/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll Sat Jun 18 12:20:52 2016
@@ -0,0 +1,212 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -fast-isel -mtriple=i686-unknown-unknown -mattr=+tbm | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+tbm | FileCheck %s --check-prefix=X64
+
+; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/tbm-builtins.c
+
+define i32 @test__bextri_u32(i32 %a0) {
+; X32-LABEL: test__bextri_u32:
+; X32: # BB#0:
+; X32-NEXT: bextr $1, {{[0-9]+}}(%esp), %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: test__bextri_u32:
+; X64: # BB#0:
+; X64-NEXT: bextr $1, %edi, %eax
+; X64-NEXT: retq
+ %1 = call i32 @llvm.x86.tbm.bextri.u32(i32 %a0, i32 1)
+ ret i32 %1
+}
+
+define i32 @test__blcfill_u32(i32 %a0) {
+; X32-LABEL: test__blcfill_u32:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: leal 1(%ecx), %eax
+; X32-NEXT: andl %ecx, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: test__blcfill_u32:
+; X64: # BB#0:
+; X64-NEXT: leal 1(%rdi), %eax
+; X64-NEXT: andl %edi, %eax
+; X64-NEXT: retq
+ %1 = add i32 %a0, 1
+ %2 = and i32 %a0, %1
+ ret i32 %2
+}
+
+define i32 @test__blci_u32(i32 %a0) {
+; X32-LABEL: test__blci_u32:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: leal 1(%ecx), %eax
+; X32-NEXT: xorl $-1, %eax
+; X32-NEXT: orl %ecx, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: test__blci_u32:
+; X64: # BB#0:
+; X64-NEXT: leal 1(%rdi), %eax
+; X64-NEXT: xorl $-1, %eax
+; X64-NEXT: orl %edi, %eax
+; X64-NEXT: retq
+ %1 = add i32 %a0, 1
+ %2 = xor i32 %1, -1
+ %3 = or i32 %a0, %2
+ ret i32 %3
+}
+
+define i32 @test__blcic_u32(i32 %a0) {
+; X32-LABEL: test__blcic_u32:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: xorl $-1, %ecx
+; X32-NEXT: addl $1, %eax
+; X32-NEXT: andl %ecx, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: test__blcic_u32:
+; X64: # BB#0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: xorl $-1, %eax
+; X64-NEXT: addl $1, %edi
+; X64-NEXT: andl %eax, %edi
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: retq
+ %1 = xor i32 %a0, -1
+ %2 = add i32 %a0, 1
+ %3 = and i32 %1, %2
+ ret i32 %3
+}
+
+define i32 @test__blcmsk_u32(i32 %a0) {
+; X32-LABEL: test__blcmsk_u32:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: leal 1(%ecx), %eax
+; X32-NEXT: xorl %ecx, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: test__blcmsk_u32:
+; X64: # BB#0:
+; X64-NEXT: leal 1(%rdi), %eax
+; X64-NEXT: xorl %edi, %eax
+; X64-NEXT: retq
+ %1 = add i32 %a0, 1
+ %2 = xor i32 %a0, %1
+ ret i32 %2
+}
+
+define i32 @test__blcs_u32(i32 %a0) {
+; X32-LABEL: test__blcs_u32:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: leal 1(%ecx), %eax
+; X32-NEXT: orl %ecx, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: test__blcs_u32:
+; X64: # BB#0:
+; X64-NEXT: leal 1(%rdi), %eax
+; X64-NEXT: orl %edi, %eax
+; X64-NEXT: retq
+ %1 = add i32 %a0, 1
+ %2 = or i32 %a0, %1
+ ret i32 %2
+}
+
+define i32 @test__blsfill_u32(i32 %a0) {
+; X32-LABEL: test__blsfill_u32:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: subl $1, %eax
+; X32-NEXT: orl %ecx, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: test__blsfill_u32:
+; X64: # BB#0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: subl $1, %eax
+; X64-NEXT: orl %edi, %eax
+; X64-NEXT: retq
+ %1 = sub i32 %a0, 1
+ %2 = or i32 %a0, %1
+ ret i32 %2
+}
+
+define i32 @test__blsic_u32(i32 %a0) {
+; X32-LABEL: test__blsic_u32:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: xorl $-1, %ecx
+; X32-NEXT: subl $1, %eax
+; X32-NEXT: orl %ecx, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: test__blsic_u32:
+; X64: # BB#0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: xorl $-1, %eax
+; X64-NEXT: subl $1, %edi
+; X64-NEXT: orl %eax, %edi
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: retq
+ %1 = xor i32 %a0, -1
+ %2 = sub i32 %a0, 1
+ %3 = or i32 %1, %2
+ ret i32 %3
+}
+
+define i32 @test__t1mskc_u32(i32 %a0) {
+; X32-LABEL: test__t1mskc_u32:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: xorl $-1, %ecx
+; X32-NEXT: addl $1, %eax
+; X32-NEXT: orl %ecx, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: test__t1mskc_u32:
+; X64: # BB#0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: xorl $-1, %eax
+; X64-NEXT: addl $1, %edi
+; X64-NEXT: orl %eax, %edi
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: retq
+ %1 = xor i32 %a0, -1
+ %2 = add i32 %a0, 1
+ %3 = or i32 %1, %2
+ ret i32 %3
+}
+
+define i32 @test__tzmsk_u32(i32 %a0) {
+; X32-LABEL: test__tzmsk_u32:
+; X32: # BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: xorl $-1, %ecx
+; X32-NEXT: subl $1, %eax
+; X32-NEXT: andl %ecx, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: test__tzmsk_u32:
+; X64: # BB#0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: xorl $-1, %eax
+; X64-NEXT: subl $1, %edi
+; X64-NEXT: andl %eax, %edi
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: retq
+ %1 = xor i32 %a0, -1
+ %2 = sub i32 %a0, 1
+ %3 = and i32 %1, %2
+ ret i32 %3
+}
+
+declare i32 @llvm.x86.tbm.bextri.u32(i32, i32)
More information about the llvm-commits
mailing list