[llvm] r307024 - [x86] auto-generate complete checks for tests; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 3 07:29:45 PDT 2017


Author: spatel
Date: Mon Jul  3 07:29:45 2017
New Revision: 307024

URL: http://llvm.org/viewvc/llvm-project?rev=307024&view=rev
Log:
[x86] auto-generate complete checks for tests; NFC

These all used 'CHECK-NOT' which isn't necessary if we have complete checks.

Modified:
    llvm/trunk/test/CodeGen/X86/shift-codegen.ll
    llvm/trunk/test/CodeGen/X86/shift-folding.ll
    llvm/trunk/test/CodeGen/X86/swizzle-avx2.ll
    llvm/trunk/test/CodeGen/X86/tbm_patterns.ll

Modified: llvm/trunk/test/CodeGen/X86/shift-codegen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shift-codegen.ll?rev=307024&r1=307023&r2=307024&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shift-codegen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shift-codegen.ll Mon Jul  3 07:29:45 2017
@@ -1,38 +1,36 @@
-; RUN: llc < %s -relocation-model=static -march=x86 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -relocation-model=static -mtriple=i686-unknown-unknown | FileCheck %s
 
 ; This should produce two shll instructions, not any lea's.
 
 target triple = "i686-apple-darwin8"
- at Y = weak global i32 0          ; <i32*> [#uses=1]
- at X = weak global i32 0          ; <i32*> [#uses=2]
-
+ at Y = weak global i32 0
+ at X = weak global i32 0
 
 define void @fn1() {
 ; CHECK-LABEL: fn1:
-; CHECK-NOT: ret
-; CHECK-NOT: lea
-; CHECK: shll $3
-; CHECK-NOT: lea
-; CHECK: ret
-
-  %tmp = load i32, i32* @Y             ; <i32> [#uses=1]
-  %tmp1 = shl i32 %tmp, 3         ; <i32> [#uses=1]
-  %tmp2 = load i32, i32* @X            ; <i32> [#uses=1]
-  %tmp3 = or i32 %tmp1, %tmp2             ; <i32> [#uses=1]
+; CHECK:       # BB#0:
+; CHECK-NEXT:    movl Y, %eax
+; CHECK-NEXT:    shll $3, %eax
+; CHECK-NEXT:    orl %eax, X
+; CHECK-NEXT:    retl
+  %tmp = load i32, i32* @Y
+  %tmp1 = shl i32 %tmp, 3
+  %tmp2 = load i32, i32* @X
+  %tmp3 = or i32 %tmp1, %tmp2
   store i32 %tmp3, i32* @X
   ret void
 }
 
 define i32 @fn2(i32 %X, i32 %Y) {
 ; CHECK-LABEL: fn2:
-; CHECK-NOT: ret
-; CHECK-NOT: lea
-; CHECK: shll $3
-; CHECK-NOT: lea
-; CHECK: ret
-
-  %tmp2 = shl i32 %Y, 3           ; <i32> [#uses=1]
-  %tmp4 = or i32 %tmp2, %X                ; <i32> [#uses=1]
+; CHECK:       # BB#0:
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    shll $3, %eax
+; CHECK-NEXT:    orl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    retl
+  %tmp2 = shl i32 %Y, 3
+  %tmp4 = or i32 %tmp2, %X
   ret i32 %tmp4
 }
 

Modified: llvm/trunk/test/CodeGen/X86/shift-folding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shift-folding.ll?rev=307024&r1=307023&r2=307024&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shift-folding.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shift-folding.ll Mon Jul  3 07:29:45 2017
@@ -1,12 +1,13 @@
-; RUN: llc < %s -march=x86 -verify-coalescing | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -verify-coalescing | FileCheck %s
 
 define i32* @test1(i32* %P, i32 %X) {
 ; CHECK-LABEL: test1:
-; CHECK-NOT: shrl
-; CHECK-NOT: shll
-; CHECK: ret
-
-entry:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    andl $-4, %eax
+; CHECK-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    retl
   %Y = lshr i32 %X, 2
   %gep.upgrd.1 = zext i32 %Y to i64
   %P2 = getelementptr i32, i32* %P, i64 %gep.upgrd.1
@@ -15,11 +16,11 @@ entry:
 
 define i32* @test2(i32* %P, i32 %X) {
 ; CHECK-LABEL: test2:
-; CHECK: shll $4
-; CHECK-NOT: shll
-; CHECK: ret
-
-entry:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    shll $4, %eax
+; CHECK-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    retl
   %Y = shl i32 %X, 2
   %gep.upgrd.2 = zext i32 %Y to i64
   %P2 = getelementptr i32, i32* %P, i64 %gep.upgrd.2
@@ -28,11 +29,11 @@ entry:
 
 define i32* @test3(i32* %P, i32 %X) {
 ; CHECK-LABEL: test3:
-; CHECK-NOT: shrl
-; CHECK-NOT: shll
-; CHECK: ret
-
-entry:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    andl $-4, %eax
+; CHECK-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    retl
   %Y = ashr i32 %X, 2
   %P2 = getelementptr i32, i32* %P, i32 %Y
   ret i32* %P2
@@ -40,25 +41,27 @@ entry:
 
 define fastcc i32 @test4(i32* %d) {
 ; CHECK-LABEL: test4:
-; CHECK-NOT: shrl
-; CHECK: ret
-
-entry:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    movzbl 3(%ecx), %eax
+; CHECK-NEXT:    retl
   %tmp4 = load i32, i32* %d
   %tmp512 = lshr i32 %tmp4, 24
   ret i32 %tmp512
 }
 
-define i64 @test5(i16 %i, i32* %arr) {
 ; Ensure that we don't fold away shifts which have multiple uses, as they are
 ; just re-introduced for the second use.
-; CHECK-LABEL: test5:
-; CHECK-NOT: shrl
-; CHECK: shrl $11
-; CHECK-NOT: shrl
-; CHECK: ret
 
-entry:
+define i64 @test5(i16 %i, i32* %arr) {
+; CHECK-LABEL: test5:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    shrl $11, %eax
+; CHECK-NEXT:    xorl %edx, %edx
+; CHECK-NEXT:    addl (%ecx,%eax,4), %eax
+; CHECK-NEXT:    setb %dl
+; CHECK-NEXT:    retl
   %i.zext = zext i16 %i to i32
   %index = lshr i32 %i.zext, 11
   %index.zext = zext i32 %index to i64

Modified: llvm/trunk/test/CodeGen/X86/swizzle-avx2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/swizzle-avx2.ll?rev=307024&r1=307023&r2=307024&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/swizzle-avx2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/swizzle-avx2.ll Mon Jul  3 07:29:45 2017
@@ -1,4 +1,5 @@
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=avx2 | FileCheck %s
 
 ; Test that we correctly fold a shuffle that performs a swizzle of another
 ; shuffle node according to the rule
@@ -11,81 +12,77 @@
 ; Check that we produce a single vector permute / shuffle in all cases.
 
 define <8 x i32> @swizzle_1(<8 x i32> %v) {
+; CHECK-LABEL: swizzle_1:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovdqa {{.*#+}} ymm1 = [1,3,2,0,4,5,6,7]
+; CHECK-NEXT:    vpermd %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    retq
   %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 3, i32 1, i32 2, i32 0, i32 7, i32 5, i32 6, i32 4>
   %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 7, i32 5, i32 6, i32 4>
   ret <8 x i32> %2
 }
-; CHECK-LABEL: swizzle_1
-; CHECK: vpermd
-; CHECK-NOT: vpermd
-; CHECK: ret
-
 
 define <8 x i32> @swizzle_2(<8 x i32> %v) {
+; CHECK-LABEL: swizzle_2:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
+; CHECK-NEXT:    retq
   %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 0, i32 1, i32 2, i32 3>
   %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 6, i32 7, i32 4, i32 5, i32 0, i32 1, i32 2, i32 3>
   ret <8 x i32> %2
 }
-; CHECK-LABEL: swizzle_2
-; CHECK: vpshufd $78
-; CHECK-NOT: vpermd
-; CHECK-NOT: vpshufd
-; CHECK: ret
-
 
 define <8 x i32> @swizzle_3(<8 x i32> %v) {
+; CHECK-LABEL: swizzle_3:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
+; CHECK-NEXT:    retq
   %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 2, i32 3, i32 0, i32 1>
   %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 2, i32 3, i32 0, i32 1>
   ret <8 x i32> %2
 }
-; CHECK-LABEL: swizzle_3
-; CHECK: vpshufd $78
-; CHECK-NOT: vpermd
-; CHECK-NOT: vpshufd
-; CHECK: ret
-
 
 define <8 x i32> @swizzle_4(<8 x i32> %v) {
+; CHECK-LABEL: swizzle_4:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovdqa {{.*#+}} ymm1 = [3,1,2,0,6,5,4,7]
+; CHECK-NEXT:    vpermd %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    retq
   %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 4, i32 7, i32 5, i32 6, i32 3, i32 2, i32 0, i32 1>
   %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 4, i32 7, i32 5, i32 6, i32 3, i32 2, i32 0, i32 1>
   ret <8 x i32> %2
 }
-; CHECK-LABEL: swizzle_4
-; CHECK: vpermd
-; CHECK-NOT: vpermd
-; CHECK: ret
-
 
 define <8 x i32> @swizzle_5(<8 x i32> %v) {
+; CHECK-LABEL: swizzle_5:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovdqa {{.*#+}} ymm1 = [3,0,1,2,7,6,4,5]
+; CHECK-NEXT:    vpermd %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    retq
   %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 7, i32 4, i32 6, i32 5, i32 0, i32 2, i32 1, i32 3>
   %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 7, i32 4, i32 6, i32 5, i32 0, i32 2, i32 1, i32 3>
   ret <8 x i32> %2
 }
-; CHECK-LABEL: swizzle_5
-; CHECK: vpermd
-; CHECK-NOT: vpermd
-; CHECK: ret
-
 
 define <8 x i32> @swizzle_6(<8 x i32> %v) {
+; CHECK-LABEL: swizzle_6:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovdqa {{.*#+}} ymm1 = [3,1,0,2,4,5,6,7]
+; CHECK-NEXT:    vpermd %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    retq
   %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 2, i32 1, i32 3, i32 0, i32 4, i32 7, i32 6, i32 5>
   %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 2, i32 1, i32 3, i32 0, i32 4, i32 7, i32 6, i32 5>
   ret <8 x i32> %2
 }
-; CHECK-LABEL: swizzle_6
-; CHECK: vpermd
-; CHECK-NOT: vpermd
-; CHECK: ret
-
 
 define <8 x i32> @swizzle_7(<8 x i32> %v) {
+; CHECK-LABEL: swizzle_7:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,2,3,1,4,5,6,7]
+; CHECK-NEXT:    vpermd %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    retq
   %1 = shufflevector <8 x i32> %v, <8 x i32> undef, <8 x i32> <i32 0, i32 3, i32 1, i32 2, i32 5, i32 4, i32 6, i32 7>
   %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <8 x i32> <i32 0, i32 3, i32 1, i32 2, i32 5, i32 4, i32 6, i32 7>
   ret <8 x i32> %2
 }
-; CHECK-LABEL: swizzle_7
-; CHECK: vpermd
-; CHECK-NOT: vpermd
-; CHECK: ret
-
 

Modified: llvm/trunk/test/CodeGen/X86/tbm_patterns.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/tbm_patterns.ll?rev=307024&r1=307023&r2=307024&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/tbm_patterns.ll (original)
+++ llvm/trunk/test/CodeGen/X86/tbm_patterns.ll Mon Jul  3 07:29:45 2017
@@ -1,253 +1,255 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+tbm < %s | FileCheck %s
 
-define i32 @test_x86_tbm_bextri_u32(i32 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_bextri_u32:
-  ; CHECK-NOT: mov
-  ; CHECK: bextr $
-  %0 = lshr i32 %a, 4
-  %1 = and i32 %0, 4095
-  ret i32 %1
-}
-
-define i32 @test_x86_tbm_bextri_u32_m(i32* nocapture %a) nounwind readonly {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_bextri_u32_m:
-  ; CHECK-NOT: mov
-  ; CHECK: bextr $
-  %0 = load i32, i32* %a
-  %1 = lshr i32 %0, 4
-  %2 = and i32 %1, 4095
-  ret i32 %2
-}
-
-define i64 @test_x86_tbm_bextri_u64(i64 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_bextri_u64:
-  ; CHECK-NOT: mov
-  ; CHECK: bextr $
-  %0 = lshr i64 %a, 4
-  %1 = and i64 %0, 4095
-  ret i64 %1
-}
-
-define i64 @test_x86_tbm_bextri_u64_m(i64* nocapture %a) nounwind readonly {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_bextri_u64_m:
-  ; CHECK-NOT: mov
-  ; CHECK: bextr $
-  %0 = load i64, i64* %a
-  %1 = lshr i64 %0, 4
-  %2 = and i64 %1, 4095
-  ret i64 %2
-}
-
-define i32 @test_x86_tbm_blcfill_u32(i32 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_blcfill_u32:
-  ; CHECK-NOT: mov
-  ; CHECK: blcfill %
-  %0 = add i32 %a, 1
-  %1 = and i32 %0, %a
-  ret i32 %1
-}
-
-define i64 @test_x86_tbm_blcfill_u64(i64 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_blcfill_u64:
-  ; CHECK-NOT: mov
-  ; CHECK: blcfill %
-  %0 = add i64 %a, 1
-  %1 = and i64 %0, %a
-  ret i64 %1
-}
-
-define i32 @test_x86_tbm_blci_u32(i32 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_blci_u32:
-  ; CHECK-NOT: mov
-  ; CHECK: blci %
-  %0 = add i32 1, %a
-  %1 = xor i32 %0, -1
-  %2 = or i32 %1, %a
-  ret i32 %2
-}
-
-define i64 @test_x86_tbm_blci_u64(i64 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_blci_u64:
-  ; CHECK-NOT: mov
-  ; CHECK: blci %
-  %0 = add i64 1, %a
-  %1 = xor i64 %0, -1
-  %2 = or i64 %1, %a
-  ret i64 %2
-}
-
-define i32 @test_x86_tbm_blci_u32_b(i32 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_blci_u32_b:
-  ; CHECK-NOT: mov
-  ; CHECK: blci %
-  %0 = sub i32 -2, %a
-  %1 = or i32 %0, %a
-  ret i32 %1
-}
-
-define i64 @test_x86_tbm_blci_u64_b(i64 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_blci_u64_b:
-  ; CHECK-NOT: mov
-  ; CHECK: blci %
-  %0 = sub i64 -2, %a
-  %1 = or i64 %0, %a
-  ret i64 %1
-}
-
-define i32 @test_x86_tbm_blcic_u32(i32 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_blcic_u32:
-  ; CHECK-NOT: mov
-  ; CHECK: blcic %
-  %0 = xor i32 %a, -1
-  %1 = add i32 %a, 1
-  %2 = and i32 %1, %0
-  ret i32 %2
-}
-
-define i64 @test_x86_tbm_blcic_u64(i64 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_blcic_u64:
-  ; CHECK-NOT: mov
-  ; CHECK: blcic %
-  %0 = xor i64 %a, -1
-  %1 = add i64 %a, 1
-  %2 = and i64 %1, %0
-  ret i64 %2
-}
-
-define i32 @test_x86_tbm_blcmsk_u32(i32 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_blcmsk_u32:
-  ; CHECK-NOT: mov
-  ; CHECK: blcmsk %
-  %0 = add i32 %a, 1
-  %1 = xor i32 %0, %a
-  ret i32 %1
-}
-
-define i64 @test_x86_tbm_blcmsk_u64(i64 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_blcmsk_u64:
-  ; CHECK-NOT: mov
-  ; CHECK: blcmsk %
-  %0 = add i64 %a, 1
-  %1 = xor i64 %0, %a
-  ret i64 %1
-}
-
-define i32 @test_x86_tbm_blcs_u32(i32 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_blcs_u32:
-  ; CHECK-NOT: mov
-  ; CHECK: blcs %
-  %0 = add i32 %a, 1
-  %1 = or i32 %0, %a
-  ret i32 %1
-}
-
-define i64 @test_x86_tbm_blcs_u64(i64 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_blcs_u64:
-  ; CHECK-NOT: mov
-  ; CHECK: blcs %
-  %0 = add i64 %a, 1
-  %1 = or i64 %0, %a
-  ret i64 %1
-}
-
-define i32 @test_x86_tbm_blsfill_u32(i32 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_blsfill_u32:
-  ; CHECK-NOT: mov
-  ; CHECK: blsfill %
-  %0 = add i32 %a, -1
-  %1 = or i32 %0, %a
-  ret i32 %1
-}
-
-define i64 @test_x86_tbm_blsfill_u64(i64 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_blsfill_u64:
-  ; CHECK-NOT: mov
-  ; CHECK: blsfill %
-  %0 = add i64 %a, -1
-  %1 = or i64 %0, %a
-  ret i64 %1
-}
-
-define i32 @test_x86_tbm_blsic_u32(i32 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_blsic_u32:
-  ; CHECK-NOT: mov
-  ; CHECK: blsic %
-  %0 = xor i32 %a, -1
-  %1 = add i32 %a, -1
-  %2 = or i32 %0, %1
-  ret i32 %2
-}
-
-define i64 @test_x86_tbm_blsic_u64(i64 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_blsic_u64:
-  ; CHECK-NOT: mov
-  ; CHECK: blsic %
-  %0 = xor i64 %a, -1
-  %1 = add i64 %a, -1
-  %2 = or i64 %0, %1
-  ret i64 %2
-}
-
-define i32 @test_x86_tbm_t1mskc_u32(i32 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_t1mskc_u32:
-  ; CHECK-NOT: mov
-  ; CHECK: t1mskc %
-  %0 = xor i32 %a, -1
-  %1 = add i32 %a, 1
-  %2 = or i32 %0, %1
-  ret i32 %2
-}
-
-define i64 @Ttest_x86_tbm_t1mskc_u64(i64 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_t1mskc_u64:
-  ; CHECK-NOT: mov
-  ; CHECK: t1mskc %
-  %0 = xor i64 %a, -1
-  %1 = add i64 %a, 1
-  %2 = or i64 %0, %1
-  ret i64 %2
-}
-
-define i32 @test_x86_tbm_tzmsk_u32(i32 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_tzmsk_u32:
-  ; CHECK-NOT: mov
-  ; CHECK: tzmsk %
-  %0 = xor i32 %a, -1
-  %1 = add i32 %a, -1
-  %2 = and i32 %0, %1
-  ret i32 %2
-}
-
-define i64 @test_x86_tbm_tzmsk_u64(i64 %a) nounwind readnone {
-entry:
-  ; CHECK-LABEL: test_x86_tbm_tzmsk_u64:
-  ; CHECK-NOT: mov
-  ; CHECK: tzmsk %
-  %0 = xor i64 %a, -1
-  %1 = add i64 %a, -1
-  %2 = and i64 %0, %1
-  ret i64 %2
+define i32 @test_x86_tbm_bextri_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_bextri_u32:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    bextr $3076, %edi, %eax # imm = 0xC04
+; CHECK-NEXT:    retq
+  %t0 = lshr i32 %a, 4
+  %t1 = and i32 %t0, 4095
+  ret i32 %t1
+}
+
+define i32 @test_x86_tbm_bextri_u32_m(i32* nocapture %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_bextri_u32_m:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    bextr $3076, (%rdi), %eax # imm = 0xC04
+; CHECK-NEXT:    retq
+  %t0 = load i32, i32* %a
+  %t1 = lshr i32 %t0, 4
+  %t2 = and i32 %t1, 4095
+  ret i32 %t2
+}
+
+define i64 @test_x86_tbm_bextri_u64(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_bextri_u64:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    bextr $3076, %edi, %eax # imm = 0xC04
+; CHECK-NEXT:    retq
+  %t0 = lshr i64 %a, 4
+  %t1 = and i64 %t0, 4095
+  ret i64 %t1
+}
+
+define i64 @test_x86_tbm_bextri_u64_m(i64* nocapture %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_bextri_u64_m:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    bextr $3076, (%rdi), %eax # imm = 0xC04
+; CHECK-NEXT:    retq
+  %t0 = load i64, i64* %a
+  %t1 = lshr i64 %t0, 4
+  %t2 = and i64 %t1, 4095
+  ret i64 %t2
+}
+
+define i32 @test_x86_tbm_blcfill_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcfill_u32:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    blcfill %edi, %eax
+; CHECK-NEXT:    retq
+  %t0 = add i32 %a, 1
+  %t1 = and i32 %t0, %a
+  ret i32 %t1
+}
+
+define i64 @test_x86_tbm_blcfill_u64(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcfill_u64:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    blcfill %rdi, %rax
+; CHECK-NEXT:    retq
+  %t0 = add i64 %a, 1
+  %t1 = and i64 %t0, %a
+  ret i64 %t1
+}
+
+define i32 @test_x86_tbm_blci_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blci_u32:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    blci %edi, %eax
+; CHECK-NEXT:    retq
+  %t0 = add i32 1, %a
+  %t1 = xor i32 %t0, -1
+  %t2 = or i32 %t1, %a
+  ret i32 %t2
+}
+
+define i64 @test_x86_tbm_blci_u64(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blci_u64:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    blci %rdi, %rax
+; CHECK-NEXT:    retq
+  %t0 = add i64 1, %a
+  %t1 = xor i64 %t0, -1
+  %t2 = or i64 %t1, %a
+  ret i64 %t2
+}
+
+define i32 @test_x86_tbm_blci_u32_b(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blci_u32_b:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    blci %edi, %eax
+; CHECK-NEXT:    retq
+  %t0 = sub i32 -2, %a
+  %t1 = or i32 %t0, %a
+  ret i32 %t1
+}
+
+define i64 @test_x86_tbm_blci_u64_b(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blci_u64_b:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    blci %rdi, %rax
+; CHECK-NEXT:    retq
+  %t0 = sub i64 -2, %a
+  %t1 = or i64 %t0, %a
+  ret i64 %t1
+}
+
+define i32 @test_x86_tbm_blcic_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcic_u32:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    blcic %edi, %eax
+; CHECK-NEXT:    retq
+  %t0 = xor i32 %a, -1
+  %t1 = add i32 %a, 1
+  %t2 = and i32 %t1, %t0
+  ret i32 %t2
+}
+
+define i64 @test_x86_tbm_blcic_u64(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcic_u64:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    blcic %rdi, %rax
+; CHECK-NEXT:    retq
+  %t0 = xor i64 %a, -1
+  %t1 = add i64 %a, 1
+  %t2 = and i64 %t1, %t0
+  ret i64 %t2
+}
+
+define i32 @test_x86_tbm_blcmsk_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcmsk_u32:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    blcmsk %edi, %eax
+; CHECK-NEXT:    retq
+  %t0 = add i32 %a, 1
+  %t1 = xor i32 %t0, %a
+  ret i32 %t1
+}
+
+define i64 @test_x86_tbm_blcmsk_u64(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcmsk_u64:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    blcmsk %rdi, %rax
+; CHECK-NEXT:    retq
+  %t0 = add i64 %a, 1
+  %t1 = xor i64 %t0, %a
+  ret i64 %t1
+}
+
+define i32 @test_x86_tbm_blcs_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcs_u32:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    blcs %edi, %eax
+; CHECK-NEXT:    retq
+  %t0 = add i32 %a, 1
+  %t1 = or i32 %t0, %a
+  ret i32 %t1
+}
+
+define i64 @test_x86_tbm_blcs_u64(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcs_u64:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    blcs %rdi, %rax
+; CHECK-NEXT:    retq
+  %t0 = add i64 %a, 1
+  %t1 = or i64 %t0, %a
+  ret i64 %t1
+}
+
+define i32 @test_x86_tbm_blsfill_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blsfill_u32:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    blsfill %edi, %eax
+; CHECK-NEXT:    retq
+  %t0 = add i32 %a, -1
+  %t1 = or i32 %t0, %a
+  ret i32 %t1
+}
+
+define i64 @test_x86_tbm_blsfill_u64(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blsfill_u64:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    blsfill %rdi, %rax
+; CHECK-NEXT:    retq
+  %t0 = add i64 %a, -1
+  %t1 = or i64 %t0, %a
+  ret i64 %t1
+}
+
+define i32 @test_x86_tbm_blsic_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blsic_u32:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    blsic %edi, %eax
+; CHECK-NEXT:    retq
+  %t0 = xor i32 %a, -1
+  %t1 = add i32 %a, -1
+  %t2 = or i32 %t0, %t1
+  ret i32 %t2
+}
+
+define i64 @test_x86_tbm_blsic_u64(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_blsic_u64:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    blsic %rdi, %rax
+; CHECK-NEXT:    retq
+  %t0 = xor i64 %a, -1
+  %t1 = add i64 %a, -1
+  %t2 = or i64 %t0, %t1
+  ret i64 %t2
+}
+
+define i32 @test_x86_tbm_t1mskc_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_t1mskc_u32:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    t1mskc %edi, %eax
+; CHECK-NEXT:    retq
+  %t0 = xor i32 %a, -1
+  %t1 = add i32 %a, 1
+  %t2 = or i32 %t0, %t1
+  ret i32 %t2
+}
+
+define i64 @Ttest_x86_tbm_t1mskc_u64(i64 %a) nounwind {
+; CHECK-LABEL: Ttest_x86_tbm_t1mskc_u64:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    t1mskc %rdi, %rax
+; CHECK-NEXT:    retq
+  %t0 = xor i64 %a, -1
+  %t1 = add i64 %a, 1
+  %t2 = or i64 %t0, %t1
+  ret i64 %t2
+}
+
+define i32 @test_x86_tbm_tzmsk_u32(i32 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_tzmsk_u32:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    tzmsk %edi, %eax
+; CHECK-NEXT:    retq
+  %t0 = xor i32 %a, -1
+  %t1 = add i32 %a, -1
+  %t2 = and i32 %t0, %t1
+  ret i32 %t2
+}
+
+define i64 @test_x86_tbm_tzmsk_u64(i64 %a) nounwind {
+; CHECK-LABEL: test_x86_tbm_tzmsk_u64:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    tzmsk %rdi, %rax
+; CHECK-NEXT:    retq
+  %t0 = xor i64 %a, -1
+  %t1 = add i64 %a, -1
+  %t2 = and i64 %t0, %t1
+  ret i64 %t2
 }
+




More information about the llvm-commits mailing list