[llvm] e839d2a - [X86] andnot-patterns.ll - tweak #112425 test patterns to use separate source values for ANDNOT operands

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 16 06:33:12 PDT 2024


Author: Simon Pilgrim
Date: 2024-10-16T14:32:51+01:00
New Revision: e839d2a60ac3149f09b3cda0816cf5074075733c

URL: https://github.com/llvm/llvm-project/commit/e839d2a60ac3149f09b3cda0816cf5074075733c
DIFF: https://github.com/llvm/llvm-project/commit/e839d2a60ac3149f09b3cda0816cf5074075733c.diff

LOG: [X86] andnot-patterns.ll - tweak #112425 test patterns to use separate source values for ANDNOT operands

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/andnot-patterns.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/andnot-patterns.ll b/llvm/test/CodeGen/X86/andnot-patterns.ll
index 0ff4e3b47ae46a..46ebe6ba76567a 100644
--- a/llvm/test/CodeGen/X86/andnot-patterns.ll
+++ b/llvm/test/CodeGen/X86/andnot-patterns.ll
@@ -10,18 +10,14 @@ declare void @use_i64(i64)
 ; Fold (and X, (rotl (not Y), Z))) -> (and X, (not (rotl Y, Z)))
 ;
 
-define i64 @andnot_rotl_i64(i64 %a0, i64 %a1) nounwind {
+define i64 @andnot_rotl_i64(i64 %a0, i64 %a1, i64 %a2) nounwind {
 ; X86-LABEL: andnot_rotl_i64:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %ebx
-; X86-NEXT:    pushl %edi
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl %esi, %ebx
-; X86-NEXT:    notl %ebx
-; X86-NEXT:    movl %edi, %edx
+; X86-NEXT:    notl %esi
 ; X86-NEXT:    notl %edx
 ; X86-NEXT:    testb $32, %cl
 ; X86-NEXT:    jne .LBB0_1
@@ -29,116 +25,112 @@ define i64 @andnot_rotl_i64(i64 %a0, i64 %a1) nounwind {
 ; X86-NEXT:    movl %edx, %eax
 ; X86-NEXT:    jmp .LBB0_3
 ; X86-NEXT:  .LBB0_1:
-; X86-NEXT:    movl %ebx, %eax
-; X86-NEXT:    movl %edx, %ebx
+; X86-NEXT:    movl %esi, %eax
+; X86-NEXT:    movl %edx, %esi
 ; X86-NEXT:  .LBB0_3:
-; X86-NEXT:    movl %ebx, %edx
+; X86-NEXT:    movl %esi, %edx
 ; X86-NEXT:    shldl %cl, %eax, %edx
 ; X86-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X86-NEXT:    shldl %cl, %ebx, %eax
-; X86-NEXT:    andl %edi, %eax
-; X86-NEXT:    andl %esi, %edx
+; X86-NEXT:    shldl %cl, %esi, %eax
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    popl %esi
-; X86-NEXT:    popl %edi
-; X86-NEXT:    popl %ebx
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: andnot_rotl_i64:
 ; X64:       # %bb.0:
-; X64-NEXT:    movq %rsi, %rcx
-; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    movq %rdx, %rcx
+; X64-NEXT:    movq %rsi, %rax
 ; X64-NEXT:    notq %rax
 ; X64-NEXT:    # kill: def $cl killed $cl killed $rcx
 ; X64-NEXT:    rolq %cl, %rax
 ; X64-NEXT:    andq %rdi, %rax
 ; X64-NEXT:    retq
-  %not = xor i64 %a0, -1
-  %rot = tail call i64 @llvm.fshl.i64(i64 %not, i64 %not, i64 %a1)
+  %not = xor i64 %a1, -1
+  %rot = tail call i64 @llvm.fshl.i64(i64 %not, i64 %not, i64 %a2)
   %and = and i64 %rot, %a0
   ret i64 %and
 }
 
-define i32 @andnot_rotl_i32(i32 %a0, i32 %a1) nounwind {
+define i32 @andnot_rotl_i32(i32 %a0, i32 %a1, i32 %a2) nounwind {
 ; X86-LABEL: andnot_rotl_i32:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    notl %eax
 ; X86-NEXT:    roll %cl, %eax
-; X86-NEXT:    andl %edx, %eax
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: andnot_rotl_i32:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %esi, %ecx
-; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    movl %edx, %ecx
+; X64-NEXT:    movl %esi, %eax
 ; X64-NEXT:    notl %eax
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    roll %cl, %eax
 ; X64-NEXT:    andl %edi, %eax
 ; X64-NEXT:    retq
-  %not = xor i32 %a0, -1
-  %rot = tail call i32 @llvm.fshl.i32(i32 %not, i32 %not, i32 %a1)
+  %not = xor i32 %a1, -1
+  %rot = tail call i32 @llvm.fshl.i32(i32 %not, i32 %not, i32 %a2)
   %and = and i32 %rot, %a0
   ret i32 %and
 }
 
-define i16 @andnot_rotl_i16(i16 %a0, i16 %a1) nounwind {
+define i16 @andnot_rotl_i16(i16 %a0, i16 %a1, i16 %a2) nounwind {
 ; X86-LABEL: andnot_rotl_i16:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    notl %eax
 ; X86-NEXT:    rolw %cl, %ax
-; X86-NEXT:    andl %edx, %eax
+; X86-NEXT:    andw {{[0-9]+}}(%esp), %ax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: andnot_rotl_i16:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %esi, %ecx
-; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    movl %edx, %ecx
+; X64-NEXT:    movl %esi, %eax
 ; X64-NEXT:    notl %eax
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    rolw %cl, %ax
 ; X64-NEXT:    andl %edi, %eax
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
-  %not = xor i16 %a0, -1
-  %rot = tail call i16 @llvm.fshl.i16(i16 %not, i16 %not, i16 %a1)
+  %not = xor i16 %a1, -1
+  %rot = tail call i16 @llvm.fshl.i16(i16 %not, i16 %not, i16 %a2)
   %and = and i16 %rot, %a0
   ret i16 %and
 }
 
-define i8 @andnot_rotl_i8(i8 %a0, i8 %a1) nounwind {
+define i8 @andnot_rotl_i8(i8 %a0, i8 %a1, i8 %a2) nounwind {
 ; X86-LABEL: andnot_rotl_i8:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    notb %al
 ; X86-NEXT:    rolb %cl, %al
-; X86-NEXT:    andb %dl, %al
+; X86-NEXT:    andb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: andnot_rotl_i8:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %esi, %ecx
-; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    movl %edx, %ecx
+; X64-NEXT:    movl %esi, %eax
 ; X64-NEXT:    notb %al
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    rolb %cl, %al
 ; X64-NEXT:    andb %dil, %al
+; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
-  %not = xor i8 %a0, -1
-  %rot = tail call i8 @llvm.fshl.i8(i8 %not, i8 %not, i8 %a1)
+  %not = xor i8 %a1, -1
+  %rot = tail call i8 @llvm.fshl.i8(i8 %not, i8 %not, i8 %a2)
   %and = and i8 %rot, %a0
   ret i8 %and
 }
 
-define i64 @andnot_rotl_i64_multiuse(i64 %a0, i64 %a1) nounwind {
+define i64 @andnot_rotl_i64_multiuse(i64 %a0, i64 %a1, i64 %a2) nounwind {
 ; X86-LABEL: andnot_rotl_i64_multiuse:
 ; X86:       # %bb.0:
 ; X86-NEXT:    pushl %ebx
@@ -146,28 +138,28 @@ define i64 @andnot_rotl_i64_multiuse(i64 %a0, i64 %a1) nounwind {
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    movl %edi, %eax
-; X86-NEXT:    notl %eax
-; X86-NEXT:    movl %esi, %ebx
-; X86-NEXT:    notl %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    notl %edx
+; X86-NEXT:    notl %esi
 ; X86-NEXT:    testb $32, %cl
 ; X86-NEXT:    jne .LBB4_1
 ; X86-NEXT:  # %bb.2:
-; X86-NEXT:    movl %ebx, %edx
+; X86-NEXT:    movl %esi, %eax
 ; X86-NEXT:    jmp .LBB4_3
 ; X86-NEXT:  .LBB4_1:
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    movl %ebx, %eax
+; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    movl %esi, %edx
 ; X86-NEXT:  .LBB4_3:
-; X86-NEXT:    movl %eax, %ebx
-; X86-NEXT:    shldl %cl, %edx, %ebx
+; X86-NEXT:    movl %edx, %ebx
+; X86-NEXT:    shldl %cl, %eax, %ebx
 ; X86-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X86-NEXT:    shldl %cl, %eax, %edx
-; X86-NEXT:    andl %edx, %esi
+; X86-NEXT:    shldl %cl, %edx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    andl %eax, %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
 ; X86-NEXT:    andl %ebx, %edi
 ; X86-NEXT:    pushl %ebx
-; X86-NEXT:    pushl %edx
+; X86-NEXT:    pushl %eax
 ; X86-NEXT:    calll use_i64 at PLT
 ; X86-NEXT:    addl $8, %esp
 ; X86-NEXT:    movl %esi, %eax
@@ -180,18 +172,19 @@ define i64 @andnot_rotl_i64_multiuse(i64 %a0, i64 %a1) nounwind {
 ; X64-LABEL: andnot_rotl_i64_multiuse:
 ; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rbx
-; X64-NEXT:    movq %rsi, %rcx
+; X64-NEXT:    movq %rdx, %rcx
 ; X64-NEXT:    movq %rdi, %rbx
-; X64-NEXT:    notq %rdi
+; X64-NEXT:    notq %rsi
 ; X64-NEXT:    # kill: def $cl killed $cl killed $rcx
-; X64-NEXT:    rolq %cl, %rdi
-; X64-NEXT:    andq %rdi, %rbx
+; X64-NEXT:    rolq %cl, %rsi
+; X64-NEXT:    andq %rsi, %rbx
+; X64-NEXT:    movq %rsi, %rdi
 ; X64-NEXT:    callq use_i64 at PLT
 ; X64-NEXT:    movq %rbx, %rax
 ; X64-NEXT:    popq %rbx
 ; X64-NEXT:    retq
-  %not = xor i64 %a0, -1
-  %rot = tail call i64 @llvm.fshl.i64(i64 %not, i64 %not, i64 %a1)
+  %not = xor i64 %a1, -1
+  %rot = tail call i64 @llvm.fshl.i64(i64 %not, i64 %not, i64 %a2)
   %and = and i64 %rot, %a0
   call void @use_i64(i64 %rot)
   ret i64 %and
@@ -201,130 +194,122 @@ define i64 @andnot_rotl_i64_multiuse(i64 %a0, i64 %a1) nounwind {
 ; Fold (and X, (rotr (not Y), Z))) -> (and X, (not (rotr Y, Z)))
 ;
 
-define i64 @andnot_rotr_i64(i64 %a0, i64 %a1) nounwind {
+define i64 @andnot_rotr_i64(i64 %a0, i64 %a1, i64 %a2) nounwind {
 ; X86-LABEL: andnot_rotr_i64:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %ebx
-; X86-NEXT:    pushl %edi
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl %esi, %ebx
-; X86-NEXT:    notl %ebx
-; X86-NEXT:    movl %edi, %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    notl %esi
 ; X86-NEXT:    notl %edx
 ; X86-NEXT:    testb $32, %cl
-; X86-NEXT:    jne .LBB5_1
+; X86-NEXT:    je .LBB5_1
 ; X86-NEXT:  # %bb.2:
 ; X86-NEXT:    movl %edx, %eax
 ; X86-NEXT:    jmp .LBB5_3
 ; X86-NEXT:  .LBB5_1:
-; X86-NEXT:    movl %ebx, %eax
-; X86-NEXT:    movl %edx, %ebx
+; X86-NEXT:    movl %esi, %eax
+; X86-NEXT:    movl %edx, %esi
 ; X86-NEXT:  .LBB5_3:
-; X86-NEXT:    movl %ebx, %edx
-; X86-NEXT:    shldl %cl, %eax, %edx
+; X86-NEXT:    movl %esi, %edx
+; X86-NEXT:    shrdl %cl, %eax, %edx
 ; X86-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X86-NEXT:    shldl %cl, %ebx, %eax
-; X86-NEXT:    andl %edi, %eax
-; X86-NEXT:    andl %esi, %edx
+; X86-NEXT:    shrdl %cl, %esi, %eax
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    popl %esi
-; X86-NEXT:    popl %edi
-; X86-NEXT:    popl %ebx
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: andnot_rotr_i64:
 ; X64:       # %bb.0:
-; X64-NEXT:    movq %rsi, %rcx
-; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    movq %rdx, %rcx
+; X64-NEXT:    movq %rsi, %rax
 ; X64-NEXT:    notq %rax
 ; X64-NEXT:    # kill: def $cl killed $cl killed $rcx
-; X64-NEXT:    rolq %cl, %rax
+; X64-NEXT:    rorq %cl, %rax
 ; X64-NEXT:    andq %rdi, %rax
 ; X64-NEXT:    retq
-  %not = xor i64 %a0, -1
-  %rot = tail call i64 @llvm.fshl.i64(i64 %not, i64 %not, i64 %a1)
+  %not = xor i64 %a1, -1
+  %rot = tail call i64 @llvm.fshr.i64(i64 %not, i64 %not, i64 %a2)
   %and = and i64 %rot, %a0
   ret i64 %and
 }
 
-define i32 @andnot_rotr_i32(i32 %a0, i32 %a1) nounwind {
+define i32 @andnot_rotr_i32(i32 %a0, i32 %a1, i32 %a2) nounwind {
 ; X86-LABEL: andnot_rotr_i32:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    notl %eax
-; X86-NEXT:    roll %cl, %eax
-; X86-NEXT:    andl %edx, %eax
+; X86-NEXT:    rorl %cl, %eax
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: andnot_rotr_i32:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %esi, %ecx
-; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    movl %edx, %ecx
+; X64-NEXT:    movl %esi, %eax
 ; X64-NEXT:    notl %eax
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X64-NEXT:    roll %cl, %eax
+; X64-NEXT:    rorl %cl, %eax
 ; X64-NEXT:    andl %edi, %eax
 ; X64-NEXT:    retq
-  %not = xor i32 %a0, -1
-  %rot = tail call i32 @llvm.fshl.i32(i32 %not, i32 %not, i32 %a1)
+  %not = xor i32 %a1, -1
+  %rot = tail call i32 @llvm.fshr.i32(i32 %not, i32 %not, i32 %a2)
   %and = and i32 %rot, %a0
   ret i32 %and
 }
 
-define i16 @andnot_rotr_i16(i16 %a0, i16 %a1) nounwind {
+define i16 @andnot_rotr_i16(i16 %a0, i16 %a1, i16 %a2) nounwind {
 ; X86-LABEL: andnot_rotr_i16:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    notl %eax
-; X86-NEXT:    rolw %cl, %ax
-; X86-NEXT:    andl %edx, %eax
+; X86-NEXT:    rorw %cl, %ax
+; X86-NEXT:    andw {{[0-9]+}}(%esp), %ax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: andnot_rotr_i16:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %esi, %ecx
-; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    movl %edx, %ecx
+; X64-NEXT:    movl %esi, %eax
 ; X64-NEXT:    notl %eax
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X64-NEXT:    rolw %cl, %ax
+; X64-NEXT:    rorw %cl, %ax
 ; X64-NEXT:    andl %edi, %eax
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
-  %not = xor i16 %a0, -1
-  %rot = tail call i16 @llvm.fshl.i16(i16 %not, i16 %not, i16 %a1)
+  %not = xor i16 %a1, -1
+  %rot = tail call i16 @llvm.fshr.i16(i16 %not, i16 %not, i16 %a2)
   %and = and i16 %rot, %a0
   ret i16 %and
 }
 
-define i8 @andnot_rotr_i8(i8 %a0, i8 %a1) nounwind {
+define i8 @andnot_rotr_i8(i8 %a0, i8 %a1, i8 %a2) nounwind {
 ; X86-LABEL: andnot_rotr_i8:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    notb %al
-; X86-NEXT:    rolb %cl, %al
-; X86-NEXT:    andb %dl, %al
+; X86-NEXT:    rorb %cl, %al
+; X86-NEXT:    andb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: andnot_rotr_i8:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %esi, %ecx
-; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    movl %edx, %ecx
+; X64-NEXT:    movl %esi, %eax
 ; X64-NEXT:    notb %al
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X64-NEXT:    rolb %cl, %al
+; X64-NEXT:    rorb %cl, %al
 ; X64-NEXT:    andb %dil, %al
+; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
-  %not = xor i8 %a0, -1
-  %rot = tail call i8 @llvm.fshl.i8(i8 %not, i8 %not, i8 %a1)
+  %not = xor i8 %a1, -1
+  %rot = tail call i8 @llvm.fshr.i8(i8 %not, i8 %not, i8 %a2)
   %and = and i8 %rot, %a0
   ret i8 %and
 }
@@ -333,76 +318,73 @@ define i8 @andnot_rotr_i8(i8 %a0, i8 %a1) nounwind {
 ; Fold (and X, (bswap (not Y)))) -> (and X, (not (bswap Y)))
 ;
 
-define i64 @andnot_bswap_i64(i64 %a0) nounwind {
+define i64 @andnot_bswap_i64(i64 %a0, i64 %a1) nounwind {
 ; X86-LABEL: andnot_bswap_i64:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl %ecx, %edx
+; X86-NEXT:    notl %eax
 ; X86-NEXT:    notl %edx
 ; X86-NEXT:    bswapl %edx
-; X86-NEXT:    andl %eax, %edx
-; X86-NEXT:    notl %eax
 ; X86-NEXT:    bswapl %eax
-; X86-NEXT:    andl %ecx, %eax
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: andnot_bswap_i64:
 ; X64:       # %bb.0:
-; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    movq %rsi, %rax
 ; X64-NEXT:    notq %rax
 ; X64-NEXT:    bswapq %rax
 ; X64-NEXT:    andq %rdi, %rax
 ; X64-NEXT:    retq
-  %not = xor i64 %a0, -1
+  %not = xor i64 %a1, -1
   %bswap = tail call i64 @llvm.bswap.i64(i64 %not)
   %and = and i64 %bswap, %a0
   ret i64 %and
 }
 
-define i32 @andnot_bswap_i32(i32 %a0) nounwind {
+define i32 @andnot_bswap_i32(i32 %a0, i32 %a1) nounwind {
 ; X86-LABEL: andnot_bswap_i32:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    notl %eax
 ; X86-NEXT:    bswapl %eax
-; X86-NEXT:    andl %ecx, %eax
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: andnot_bswap_i32:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    movl %esi, %eax
 ; X64-NEXT:    notl %eax
 ; X64-NEXT:    bswapl %eax
 ; X64-NEXT:    andl %edi, %eax
 ; X64-NEXT:    retq
-  %not = xor i32 %a0, -1
+  %not = xor i32 %a1, -1
   %bswap = tail call i32 @llvm.bswap.i32(i32 %not)
   %and = and i32 %bswap, %a0
   ret i32 %and
 }
 
-define i16 @andnot_bswap_i16(i16 %a0) nounwind {
+define i16 @andnot_bswap_i16(i16 %a0, i16 %a1) nounwind {
 ; X86-LABEL: andnot_bswap_i16:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    notl %eax
 ; X86-NEXT:    rolw $8, %ax
-; X86-NEXT:    andl %ecx, %eax
+; X86-NEXT:    andw {{[0-9]+}}(%esp), %ax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: andnot_bswap_i16:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    movl %esi, %eax
 ; X64-NEXT:    notl %eax
 ; X64-NEXT:    rolw $8, %ax
 ; X64-NEXT:    andl %edi, %eax
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
-  %not = xor i16 %a0, -1
+  %not = xor i16 %a1, -1
   %bswap = tail call i16 @llvm.bswap.i16(i16 %not)
   %and = and i16 %bswap, %a0
   ret i16 %and
@@ -412,72 +394,68 @@ define i16 @andnot_bswap_i16(i16 %a0) nounwind {
 ; Fold (and X, (bitreverse (not Y)))) -> (and X, (not (bitreverse Y)))
 ;
 
-define i64 @andnot_bitreverse_i64(i64 %a0) nounwind {
+define i64 @andnot_bitreverse_i64(i64 %a0, i64 %a1) nounwind {
 ; X86-LABEL: andnot_bitreverse_i64:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    notl %eax
+; X86-NEXT:    notl %ecx
+; X86-NEXT:    bswapl %ecx
 ; X86-NEXT:    movl %ecx, %edx
-; X86-NEXT:    notl %edx
-; X86-NEXT:    bswapl %edx
-; X86-NEXT:    movl %edx, %esi
-; X86-NEXT:    andl $252645135, %esi # imm = 0xF0F0F0F
-; X86-NEXT:    shll $4, %esi
-; X86-NEXT:    shrl $4, %edx
 ; X86-NEXT:    andl $252645135, %edx # imm = 0xF0F0F0F
-; X86-NEXT:    orl %esi, %edx
-; X86-NEXT:    movl %edx, %esi
-; X86-NEXT:    andl $858993459, %esi # imm = 0x33333333
-; X86-NEXT:    shrl $2, %edx
+; X86-NEXT:    shll $4, %edx
+; X86-NEXT:    shrl $4, %ecx
+; X86-NEXT:    andl $252645135, %ecx # imm = 0xF0F0F0F
+; X86-NEXT:    orl %edx, %ecx
+; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    andl $858993459, %edx # imm = 0x33333333
-; X86-NEXT:    leal (%edx,%esi,4), %edx
-; X86-NEXT:    movl %edx, %esi
-; X86-NEXT:    andl $1431655765, %esi # imm = 0x55555555
-; X86-NEXT:    shrl %edx
+; X86-NEXT:    shrl $2, %ecx
+; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
+; X86-NEXT:    leal (%ecx,%edx,4), %ecx
+; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    andl $1431655765, %edx # imm = 0x55555555
-; X86-NEXT:    leal (%edx,%esi,2), %edx
-; X86-NEXT:    andl %eax, %edx
-; X86-NEXT:    notl %eax
+; X86-NEXT:    shrl %ecx
+; X86-NEXT:    andl $1431655765, %ecx # imm = 0x55555555
+; X86-NEXT:    leal (%ecx,%edx,2), %edx
 ; X86-NEXT:    bswapl %eax
-; X86-NEXT:    movl %eax, %esi
-; X86-NEXT:    andl $252645135, %esi # imm = 0xF0F0F0F
-; X86-NEXT:    shll $4, %esi
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $252645135, %ecx # imm = 0xF0F0F0F
+; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %esi, %eax
-; X86-NEXT:    movl %eax, %esi
-; X86-NEXT:    andl $858993459, %esi # imm = 0x33333333
+; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
 ; X86-NEXT:    andl $858993459, %eax # imm = 0x33333333
-; X86-NEXT:    leal (%eax,%esi,4), %eax
-; X86-NEXT:    movl %eax, %esi
-; X86-NEXT:    andl $1431655765, %esi # imm = 0x55555555
+; X86-NEXT:    leal (%eax,%ecx,4), %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $1431655765, %ecx # imm = 0x55555555
 ; X86-NEXT:    shrl %eax
 ; X86-NEXT:    andl $1431655765, %eax # imm = 0x55555555
-; X86-NEXT:    leal (%eax,%esi,2), %eax
-; X86-NEXT:    andl %ecx, %eax
-; X86-NEXT:    popl %esi
+; X86-NEXT:    leal (%eax,%ecx,2), %eax
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: andnot_bitreverse_i64:
 ; X64:       # %bb.0:
-; X64-NEXT:    movq %rdi, %rax
-; X64-NEXT:    notq %rax
-; X64-NEXT:    bswapq %rax
-; X64-NEXT:    movq %rax, %rcx
-; X64-NEXT:    shrq $4, %rcx
-; X64-NEXT:    movabsq $1085102592571150095, %rdx # imm = 0xF0F0F0F0F0F0F0F
-; X64-NEXT:    andq %rdx, %rcx
-; X64-NEXT:    andq %rdx, %rax
-; X64-NEXT:    shlq $4, %rax
-; X64-NEXT:    orq %rcx, %rax
-; X64-NEXT:    movabsq $3689348814741910323, %rcx # imm = 0x3333333333333333
-; X64-NEXT:    movq %rax, %rdx
-; X64-NEXT:    andq %rcx, %rdx
-; X64-NEXT:    shrq $2, %rax
+; X64-NEXT:    notq %rsi
+; X64-NEXT:    bswapq %rsi
+; X64-NEXT:    movq %rsi, %rax
+; X64-NEXT:    shrq $4, %rax
+; X64-NEXT:    movabsq $1085102592571150095, %rcx # imm = 0xF0F0F0F0F0F0F0F
 ; X64-NEXT:    andq %rcx, %rax
-; X64-NEXT:    leaq (%rax,%rdx,4), %rax
+; X64-NEXT:    andq %rcx, %rsi
+; X64-NEXT:    shlq $4, %rsi
+; X64-NEXT:    orq %rax, %rsi
+; X64-NEXT:    movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
+; X64-NEXT:    movq %rsi, %rcx
+; X64-NEXT:    andq %rax, %rcx
+; X64-NEXT:    shrq $2, %rsi
+; X64-NEXT:    andq %rax, %rsi
+; X64-NEXT:    leaq (%rsi,%rcx,4), %rax
 ; X64-NEXT:    movabsq $6148914691236517205, %rcx # imm = 0x5555555555555555
 ; X64-NEXT:    movq %rax, %rdx
 ; X64-NEXT:    andq %rcx, %rdx
@@ -486,54 +464,53 @@ define i64 @andnot_bitreverse_i64(i64 %a0) nounwind {
 ; X64-NEXT:    leaq (%rax,%rdx,2), %rax
 ; X64-NEXT:    andq %rdi, %rax
 ; X64-NEXT:    retq
-  %not = xor i64 %a0, -1
+  %not = xor i64 %a1, -1
   %bitrev = tail call i64 @llvm.bitreverse.i64(i64 %not)
   %and = and i64 %bitrev, %a0
   ret i64 %and
 }
 
-define i32 @andnot_bitreverse_i32(i32 %a0) nounwind {
+define i32 @andnot_bitreverse_i32(i32 %a0, i32 %a1) nounwind {
 ; X86-LABEL: andnot_bitreverse_i32:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    notl %eax
 ; X86-NEXT:    bswapl %eax
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    andl $252645135, %edx # imm = 0xF0F0F0F
-; X86-NEXT:    shll $4, %edx
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $252645135, %ecx # imm = 0xF0F0F0F
+; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %edx, %eax
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    andl $858993459, %edx # imm = 0x33333333
+; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
 ; X86-NEXT:    andl $858993459, %eax # imm = 0x33333333
-; X86-NEXT:    leal (%eax,%edx,4), %eax
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    andl $1431655765, %edx # imm = 0x55555555
+; X86-NEXT:    leal (%eax,%ecx,4), %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $1431655765, %ecx # imm = 0x55555555
 ; X86-NEXT:    shrl %eax
 ; X86-NEXT:    andl $1431655765, %eax # imm = 0x55555555
-; X86-NEXT:    leal (%eax,%edx,2), %eax
-; X86-NEXT:    andl %ecx, %eax
+; X86-NEXT:    leal (%eax,%ecx,2), %eax
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: andnot_bitreverse_i32:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    notl %eax
-; X64-NEXT:    bswapl %eax
-; X64-NEXT:    movl %eax, %ecx
-; X64-NEXT:    andl $252645135, %ecx # imm = 0xF0F0F0F
-; X64-NEXT:    shll $4, %ecx
-; X64-NEXT:    shrl $4, %eax
+; X64-NEXT:    # kill: def $esi killed $esi def $rsi
+; X64-NEXT:    notl %esi
+; X64-NEXT:    bswapl %esi
+; X64-NEXT:    movl %esi, %eax
 ; X64-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X64-NEXT:    orl %ecx, %eax
-; X64-NEXT:    movl %eax, %ecx
-; X64-NEXT:    andl $858993459, %ecx # imm = 0x33333333
-; X64-NEXT:    shrl $2, %eax
+; X64-NEXT:    shll $4, %eax
+; X64-NEXT:    shrl $4, %esi
+; X64-NEXT:    andl $252645135, %esi # imm = 0xF0F0F0F
+; X64-NEXT:    orl %eax, %esi
+; X64-NEXT:    movl %esi, %eax
 ; X64-NEXT:    andl $858993459, %eax # imm = 0x33333333
-; X64-NEXT:    leal (%rax,%rcx,4), %eax
+; X64-NEXT:    shrl $2, %esi
+; X64-NEXT:    andl $858993459, %esi # imm = 0x33333333
+; X64-NEXT:    leal (%rsi,%rax,4), %eax
 ; X64-NEXT:    movl %eax, %ecx
 ; X64-NEXT:    andl $1431655765, %ecx # imm = 0x55555555
 ; X64-NEXT:    shrl %eax
@@ -541,55 +518,54 @@ define i32 @andnot_bitreverse_i32(i32 %a0) nounwind {
 ; X64-NEXT:    leal (%rax,%rcx,2), %eax
 ; X64-NEXT:    andl %edi, %eax
 ; X64-NEXT:    retq
-  %not = xor i32 %a0, -1
+  %not = xor i32 %a1, -1
   %bitrev = tail call i32 @llvm.bitreverse.i32(i32 %not)
   %and = and i32 %bitrev, %a0
   ret i32 %and
 }
 
-define i16 @andnot_bitreverse_i16(i16 %a0) nounwind {
+define i16 @andnot_bitreverse_i16(i16 %a0, i16 %a1) nounwind {
 ; X86-LABEL: andnot_bitreverse_i16:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    notl %eax
 ; X86-NEXT:    rolw $8, %ax
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    andl $3855, %edx # imm = 0xF0F
-; X86-NEXT:    shll $4, %edx
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $3855, %ecx # imm = 0xF0F
+; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $3855, %eax # imm = 0xF0F
-; X86-NEXT:    orl %edx, %eax
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    andl $13107, %edx # imm = 0x3333
+; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $13107, %ecx # imm = 0x3333
 ; X86-NEXT:    shrl $2, %eax
 ; X86-NEXT:    andl $13107, %eax # imm = 0x3333
-; X86-NEXT:    leal (%eax,%edx,4), %eax
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    andl $21845, %edx # imm = 0x5555
+; X86-NEXT:    leal (%eax,%ecx,4), %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $21845, %ecx # imm = 0x5555
 ; X86-NEXT:    shrl %eax
 ; X86-NEXT:    andl $21845, %eax # imm = 0x5555
-; X86-NEXT:    leal (%eax,%edx,2), %eax
-; X86-NEXT:    andl %ecx, %eax
+; X86-NEXT:    leal (%eax,%ecx,2), %eax
+; X86-NEXT:    andw {{[0-9]+}}(%esp), %ax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: andnot_bitreverse_i16:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    notl %eax
-; X64-NEXT:    rolw $8, %ax
-; X64-NEXT:    movl %eax, %ecx
-; X64-NEXT:    andl $3855, %ecx # imm = 0xF0F
-; X64-NEXT:    shll $4, %ecx
-; X64-NEXT:    shrl $4, %eax
+; X64-NEXT:    # kill: def $esi killed $esi def $rsi
+; X64-NEXT:    notl %esi
+; X64-NEXT:    rolw $8, %si
+; X64-NEXT:    movl %esi, %eax
 ; X64-NEXT:    andl $3855, %eax # imm = 0xF0F
-; X64-NEXT:    orl %ecx, %eax
-; X64-NEXT:    movl %eax, %ecx
-; X64-NEXT:    andl $13107, %ecx # imm = 0x3333
-; X64-NEXT:    shrl $2, %eax
+; X64-NEXT:    shll $4, %eax
+; X64-NEXT:    shrl $4, %esi
+; X64-NEXT:    andl $3855, %esi # imm = 0xF0F
+; X64-NEXT:    orl %eax, %esi
+; X64-NEXT:    movl %esi, %eax
 ; X64-NEXT:    andl $13107, %eax # imm = 0x3333
-; X64-NEXT:    leal (%rax,%rcx,4), %eax
+; X64-NEXT:    shrl $2, %esi
+; X64-NEXT:    andl $13107, %esi # imm = 0x3333
+; X64-NEXT:    leal (%rsi,%rax,4), %eax
 ; X64-NEXT:    movl %eax, %ecx
 ; X64-NEXT:    andl $21845, %ecx # imm = 0x5555
 ; X64-NEXT:    shrl %eax
@@ -598,45 +574,43 @@ define i16 @andnot_bitreverse_i16(i16 %a0) nounwind {
 ; X64-NEXT:    andl %edi, %eax
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
-  %not = xor i16 %a0, -1
+  %not = xor i16 %a1, -1
   %bitrev = tail call i16 @llvm.bitreverse.i16(i16 %not)
   %and = and i16 %bitrev, %a0
   ret i16 %and
 }
 
-define i8 @andnot_bitreverse_i8(i8 %a0) nounwind {
+define i8 @andnot_bitreverse_i8(i8 %a0, i8 %a1) nounwind {
 ; X86-LABEL: andnot_bitreverse_i8:
 ; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    notb %al
 ; X86-NEXT:    rolb $4, %al
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    andb $51, %dl
-; X86-NEXT:    shlb $2, %dl
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andb $51, %cl
+; X86-NEXT:    shlb $2, %cl
 ; X86-NEXT:    shrb $2, %al
 ; X86-NEXT:    andb $51, %al
-; X86-NEXT:    orb %dl, %al
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    andb $85, %dl
-; X86-NEXT:    addb %dl, %dl
+; X86-NEXT:    orb %cl, %al
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andb $85, %cl
+; X86-NEXT:    addb %cl, %cl
 ; X86-NEXT:    shrb %al
 ; X86-NEXT:    andb $85, %al
-; X86-NEXT:    orb %dl, %al
-; X86-NEXT:    andb %cl, %al
+; X86-NEXT:    orb %cl, %al
+; X86-NEXT:    andb {{[0-9]+}}(%esp), %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: andnot_bitreverse_i8:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    notb %al
-; X64-NEXT:    rolb $4, %al
-; X64-NEXT:    movl %eax, %ecx
-; X64-NEXT:    andb $51, %cl
-; X64-NEXT:    shlb $2, %cl
-; X64-NEXT:    shrb $2, %al
+; X64-NEXT:    notb %sil
+; X64-NEXT:    rolb $4, %sil
+; X64-NEXT:    movl %esi, %eax
 ; X64-NEXT:    andb $51, %al
-; X64-NEXT:    orb %cl, %al
+; X64-NEXT:    shlb $2, %al
+; X64-NEXT:    shrb $2, %sil
+; X64-NEXT:    andb $51, %sil
+; X64-NEXT:    orb %sil, %al
 ; X64-NEXT:    movl %eax, %ecx
 ; X64-NEXT:    andb $85, %cl
 ; X64-NEXT:    addb %cl, %cl
@@ -645,7 +619,7 @@ define i8 @andnot_bitreverse_i8(i8 %a0) nounwind {
 ; X64-NEXT:    orb %cl, %al
 ; X64-NEXT:    andb %dil, %al
 ; X64-NEXT:    retq
-  %not = xor i8 %a0, -1
+  %not = xor i8 %a1, -1
   %bitrev = tail call i8 @llvm.bitreverse.i8(i8 %not)
   %and = and i8 %bitrev, %a0
   ret i8 %and


        


More information about the llvm-commits mailing list