[llvm] r334183 - [X86] Regenerate rotate tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 7 03:13:09 PDT 2018


Author: rksimon
Date: Thu Jun  7 03:13:09 2018
New Revision: 334183

URL: http://llvm.org/viewvc/llvm-project?rev=334183&view=rev
Log:
[X86] Regenerate rotate tests

Add 32-bit tests to show missed SHLD/SHRD cases

Modified:
    llvm/trunk/test/CodeGen/X86/rotate.ll
    llvm/trunk/test/CodeGen/X86/rotate2.ll
    llvm/trunk/test/CodeGen/X86/rotate4.ll

Modified: llvm/trunk/test/CodeGen/X86/rotate.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/rotate.ll?rev=334183&r1=334182&r2=334183&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/rotate.ll (original)
+++ llvm/trunk/test/CodeGen/X86/rotate.ll Thu Jun  7 03:13:09 2018
@@ -1,51 +1,51 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-linux | FileCheck %s --check-prefix=32
-; RUN: llc < %s -mtriple=x86_64-unknown-linux | FileCheck %s --check-prefix=64
+; RUN: llc < %s -mtriple=i686-unknown-linux | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-linux | FileCheck %s --check-prefixes=X64
 
 define i64 @rotl64(i64 %A, i8 %Amt) nounwind {
-; 32-LABEL: rotl64:
-; 32:       # %bb.0:
-; 32-NEXT:    pushl %ebx
-; 32-NEXT:    pushl %edi
-; 32-NEXT:    pushl %esi
-; 32-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; 32-NEXT:    movl %esi, %eax
-; 32-NEXT:    shll %cl, %eax
-; 32-NEXT:    movl %edi, %edx
-; 32-NEXT:    shldl %cl, %esi, %edx
-; 32-NEXT:    testb $32, %cl
-; 32-NEXT:    je .LBB0_2
-; 32-NEXT:  # %bb.1:
-; 32-NEXT:    movl %eax, %edx
-; 32-NEXT:    xorl %eax, %eax
-; 32-NEXT:  .LBB0_2:
-; 32-NEXT:    movb $64, %ch
-; 32-NEXT:    subb %cl, %ch
-; 32-NEXT:    movl %edi, %ebx
-; 32-NEXT:    movb %ch, %cl
-; 32-NEXT:    shrl %cl, %ebx
-; 32-NEXT:    shrdl %cl, %edi, %esi
-; 32-NEXT:    testb $32, %ch
-; 32-NEXT:    je .LBB0_4
-; 32-NEXT:  # %bb.3:
-; 32-NEXT:    movl %ebx, %esi
-; 32-NEXT:    xorl %ebx, %ebx
-; 32-NEXT:  .LBB0_4:
-; 32-NEXT:    orl %ebx, %edx
-; 32-NEXT:    orl %esi, %eax
-; 32-NEXT:    popl %esi
-; 32-NEXT:    popl %edi
-; 32-NEXT:    popl %ebx
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotl64:
-; 64:       # %bb.0:
-; 64-NEXT:    movl %esi, %ecx
-; 64-NEXT:    rolq %cl, %rdi
-; 64-NEXT:    movq %rdi, %rax
-; 64-NEXT:    retq
+; X86-LABEL: rotl64:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl %esi, %eax
+; X86-NEXT:    shll %cl, %eax
+; X86-NEXT:    movl %edi, %edx
+; X86-NEXT:    shldl %cl, %esi, %edx
+; X86-NEXT:    testb $32, %cl
+; X86-NEXT:    je .LBB0_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:  .LBB0_2:
+; X86-NEXT:    movb $64, %ch
+; X86-NEXT:    subb %cl, %ch
+; X86-NEXT:    movl %edi, %ebx
+; X86-NEXT:    movb %ch, %cl
+; X86-NEXT:    shrl %cl, %ebx
+; X86-NEXT:    shrdl %cl, %edi, %esi
+; X86-NEXT:    testb $32, %ch
+; X86-NEXT:    je .LBB0_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movl %ebx, %esi
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:  .LBB0_4:
+; X86-NEXT:    orl %ebx, %edx
+; X86-NEXT:    orl %esi, %eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotl64:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rolq %cl, %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 	%shift.upgrd.1 = zext i8 %Amt to i64
 	%B = shl i64 %A, %shift.upgrd.1
 	%Amt2 = sub i8 64, %Amt
@@ -56,49 +56,49 @@ define i64 @rotl64(i64 %A, i8 %Amt) noun
 }
 
 define i64 @rotr64(i64 %A, i8 %Amt) nounwind {
-; 32-LABEL: rotr64:
-; 32:       # %bb.0:
-; 32-NEXT:    pushl %ebx
-; 32-NEXT:    pushl %edi
-; 32-NEXT:    pushl %esi
-; 32-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; 32-NEXT:    movl %esi, %edx
-; 32-NEXT:    shrl %cl, %edx
-; 32-NEXT:    movl %edi, %eax
-; 32-NEXT:    shrdl %cl, %esi, %eax
-; 32-NEXT:    testb $32, %cl
-; 32-NEXT:    je .LBB1_2
-; 32-NEXT:  # %bb.1:
-; 32-NEXT:    movl %edx, %eax
-; 32-NEXT:    xorl %edx, %edx
-; 32-NEXT:  .LBB1_2:
-; 32-NEXT:    movb $64, %ch
-; 32-NEXT:    subb %cl, %ch
-; 32-NEXT:    movl %edi, %ebx
-; 32-NEXT:    movb %ch, %cl
-; 32-NEXT:    shll %cl, %ebx
-; 32-NEXT:    shldl %cl, %edi, %esi
-; 32-NEXT:    testb $32, %ch
-; 32-NEXT:    je .LBB1_4
-; 32-NEXT:  # %bb.3:
-; 32-NEXT:    movl %ebx, %esi
-; 32-NEXT:    xorl %ebx, %ebx
-; 32-NEXT:  .LBB1_4:
-; 32-NEXT:    orl %esi, %edx
-; 32-NEXT:    orl %ebx, %eax
-; 32-NEXT:    popl %esi
-; 32-NEXT:    popl %edi
-; 32-NEXT:    popl %ebx
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotr64:
-; 64:       # %bb.0:
-; 64-NEXT:    movl %esi, %ecx
-; 64-NEXT:    rorq %cl, %rdi
-; 64-NEXT:    movq %rdi, %rax
-; 64-NEXT:    retq
+; X86-LABEL: rotr64:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl %esi, %edx
+; X86-NEXT:    shrl %cl, %edx
+; X86-NEXT:    movl %edi, %eax
+; X86-NEXT:    shrdl %cl, %esi, %eax
+; X86-NEXT:    testb $32, %cl
+; X86-NEXT:    je .LBB1_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:  .LBB1_2:
+; X86-NEXT:    movb $64, %ch
+; X86-NEXT:    subb %cl, %ch
+; X86-NEXT:    movl %edi, %ebx
+; X86-NEXT:    movb %ch, %cl
+; X86-NEXT:    shll %cl, %ebx
+; X86-NEXT:    shldl %cl, %edi, %esi
+; X86-NEXT:    testb $32, %ch
+; X86-NEXT:    je .LBB1_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movl %ebx, %esi
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:  .LBB1_4:
+; X86-NEXT:    orl %esi, %edx
+; X86-NEXT:    orl %ebx, %eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotr64:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rorq %cl, %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 	%shift.upgrd.3 = zext i8 %Amt to i64
 	%B = lshr i64 %A, %shift.upgrd.3
 	%Amt2 = sub i8 64, %Amt
@@ -109,20 +109,20 @@ define i64 @rotr64(i64 %A, i8 %Amt) noun
 }
 
 define i64 @rotli64(i64 %A) nounwind {
-; 32-LABEL: rotli64:
-; 32:       # %bb.0:
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; 32-NEXT:    movl %ecx, %edx
-; 32-NEXT:    shldl $5, %eax, %edx
-; 32-NEXT:    shldl $5, %ecx, %eax
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotli64:
-; 64:       # %bb.0:
-; 64-NEXT:    rolq $5, %rdi
-; 64-NEXT:    movq %rdi, %rax
-; 64-NEXT:    retq
+; X86-LABEL: rotli64:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %ecx, %edx
+; X86-NEXT:    shldl $5, %eax, %edx
+; X86-NEXT:    shldl $5, %ecx, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotli64:
+; X64:       # %bb.0:
+; X64-NEXT:    rolq $5, %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 	%B = shl i64 %A, 5
 	%C = lshr i64 %A, 59
 	%D = or i64 %B, %C
@@ -130,20 +130,20 @@ define i64 @rotli64(i64 %A) nounwind {
 }
 
 define i64 @rotri64(i64 %A) nounwind {
-; 32-LABEL: rotri64:
-; 32:       # %bb.0:
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; 32-NEXT:    movl %ecx, %eax
-; 32-NEXT:    shldl $27, %edx, %eax
-; 32-NEXT:    shldl $27, %ecx, %edx
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotri64:
-; 64:       # %bb.0:
-; 64-NEXT:    rolq $59, %rdi
-; 64-NEXT:    movq %rdi, %rax
-; 64-NEXT:    retq
+; X86-LABEL: rotri64:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    shldl $27, %edx, %eax
+; X86-NEXT:    shldl $27, %ecx, %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotri64:
+; X64:       # %bb.0:
+; X64-NEXT:    rolq $59, %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 	%B = lshr i64 %A, 5
 	%C = shl i64 %A, 59
 	%D = or i64 %B, %C
@@ -151,20 +151,20 @@ define i64 @rotri64(i64 %A) nounwind {
 }
 
 define i64 @rotl1_64(i64 %A) nounwind {
-; 32-LABEL: rotl1_64:
-; 32:       # %bb.0:
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; 32-NEXT:    movl %ecx, %edx
-; 32-NEXT:    shldl $1, %eax, %edx
-; 32-NEXT:    shldl $1, %ecx, %eax
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotl1_64:
-; 64:       # %bb.0:
-; 64-NEXT:    rolq %rdi
-; 64-NEXT:    movq %rdi, %rax
-; 64-NEXT:    retq
+; X86-LABEL: rotl1_64:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %ecx, %edx
+; X86-NEXT:    shldl $1, %eax, %edx
+; X86-NEXT:    shldl $1, %ecx, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotl1_64:
+; X64:       # %bb.0:
+; X64-NEXT:    rolq %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 	%B = shl i64 %A, 1
 	%C = lshr i64 %A, 63
 	%D = or i64 %B, %C
@@ -172,20 +172,20 @@ define i64 @rotl1_64(i64 %A) nounwind {
 }
 
 define i64 @rotr1_64(i64 %A) nounwind {
-; 32-LABEL: rotr1_64:
-; 32:       # %bb.0:
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; 32-NEXT:    movl %ecx, %eax
-; 32-NEXT:    shldl $31, %edx, %eax
-; 32-NEXT:    shldl $31, %ecx, %edx
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotr1_64:
-; 64:       # %bb.0:
-; 64-NEXT:    rorq %rdi
-; 64-NEXT:    movq %rdi, %rax
-; 64-NEXT:    retq
+; X86-LABEL: rotr1_64:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    shldl $31, %edx, %eax
+; X86-NEXT:    shldl $31, %ecx, %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotr1_64:
+; X64:       # %bb.0:
+; X64-NEXT:    rorq %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 	%B = shl i64 %A, 63
 	%C = lshr i64 %A, 1
 	%D = or i64 %B, %C
@@ -193,19 +193,19 @@ define i64 @rotr1_64(i64 %A) nounwind {
 }
 
 define i32 @rotl32(i32 %A, i8 %Amt) nounwind {
-; 32-LABEL: rotl32:
-; 32:       # %bb.0:
-; 32-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    roll %cl, %eax
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotl32:
-; 64:       # %bb.0:
-; 64-NEXT:    movl %esi, %ecx
-; 64-NEXT:    roll %cl, %edi
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: rotl32:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    roll %cl, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotl32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    roll %cl, %edi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 	%shift.upgrd.1 = zext i8 %Amt to i32
 	%B = shl i32 %A, %shift.upgrd.1
 	%Amt2 = sub i8 32, %Amt
@@ -216,19 +216,19 @@ define i32 @rotl32(i32 %A, i8 %Amt) noun
 }
 
 define i32 @rotr32(i32 %A, i8 %Amt) nounwind {
-; 32-LABEL: rotr32:
-; 32:       # %bb.0:
-; 32-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    rorl %cl, %eax
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotr32:
-; 64:       # %bb.0:
-; 64-NEXT:    movl %esi, %ecx
-; 64-NEXT:    rorl %cl, %edi
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: rotr32:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rorl %cl, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotr32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rorl %cl, %edi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 	%shift.upgrd.3 = zext i8 %Amt to i32
 	%B = lshr i32 %A, %shift.upgrd.3
 	%Amt2 = sub i8 32, %Amt
@@ -239,17 +239,17 @@ define i32 @rotr32(i32 %A, i8 %Amt) noun
 }
 
 define i32 @rotli32(i32 %A) nounwind {
-; 32-LABEL: rotli32:
-; 32:       # %bb.0:
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    roll $5, %eax
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotli32:
-; 64:       # %bb.0:
-; 64-NEXT:    roll $5, %edi
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: rotli32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    roll $5, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotli32:
+; X64:       # %bb.0:
+; X64-NEXT:    roll $5, %edi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 	%B = shl i32 %A, 5
 	%C = lshr i32 %A, 27
 	%D = or i32 %B, %C
@@ -257,17 +257,17 @@ define i32 @rotli32(i32 %A) nounwind {
 }
 
 define i32 @rotri32(i32 %A) nounwind {
-; 32-LABEL: rotri32:
-; 32:       # %bb.0:
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    roll $27, %eax
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotri32:
-; 64:       # %bb.0:
-; 64-NEXT:    roll $27, %edi
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: rotri32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    roll $27, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotri32:
+; X64:       # %bb.0:
+; X64-NEXT:    roll $27, %edi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 	%B = lshr i32 %A, 5
 	%C = shl i32 %A, 27
 	%D = or i32 %B, %C
@@ -275,17 +275,17 @@ define i32 @rotri32(i32 %A) nounwind {
 }
 
 define i32 @rotl1_32(i32 %A) nounwind {
-; 32-LABEL: rotl1_32:
-; 32:       # %bb.0:
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    roll %eax
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotl1_32:
-; 64:       # %bb.0:
-; 64-NEXT:    roll %edi
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: rotl1_32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    roll %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotl1_32:
+; X64:       # %bb.0:
+; X64-NEXT:    roll %edi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 	%B = shl i32 %A, 1
 	%C = lshr i32 %A, 31
 	%D = or i32 %B, %C
@@ -293,17 +293,17 @@ define i32 @rotl1_32(i32 %A) nounwind {
 }
 
 define i32 @rotr1_32(i32 %A) nounwind {
-; 32-LABEL: rotr1_32:
-; 32:       # %bb.0:
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    rorl %eax
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotr1_32:
-; 64:       # %bb.0:
-; 64-NEXT:    rorl %edi
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: rotr1_32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rorl %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotr1_32:
+; X64:       # %bb.0:
+; X64-NEXT:    rorl %edi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 	%B = shl i32 %A, 31
 	%C = lshr i32 %A, 1
 	%D = or i32 %B, %C
@@ -311,19 +311,19 @@ define i32 @rotr1_32(i32 %A) nounwind {
 }
 
 define i16 @rotl16(i16 %A, i8 %Amt) nounwind {
-; 32-LABEL: rotl16:
-; 32:       # %bb.0:
-; 32-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; 32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    rolw %cl, %ax
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotl16:
-; 64:       # %bb.0:
-; 64-NEXT:    movl %esi, %ecx
-; 64-NEXT:    rolw %cl, %di
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: rotl16:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rolw %cl, %ax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotl16:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rolw %cl, %di
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 	%shift.upgrd.5 = zext i8 %Amt to i16
 	%B = shl i16 %A, %shift.upgrd.5
 	%Amt2 = sub i8 16, %Amt
@@ -334,19 +334,19 @@ define i16 @rotl16(i16 %A, i8 %Amt) noun
 }
 
 define i16 @rotr16(i16 %A, i8 %Amt) nounwind {
-; 32-LABEL: rotr16:
-; 32:       # %bb.0:
-; 32-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; 32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    rorw %cl, %ax
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotr16:
-; 64:       # %bb.0:
-; 64-NEXT:    movl %esi, %ecx
-; 64-NEXT:    rorw %cl, %di
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: rotr16:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rorw %cl, %ax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotr16:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rorw %cl, %di
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 	%shift.upgrd.7 = zext i8 %Amt to i16
 	%B = lshr i16 %A, %shift.upgrd.7
 	%Amt2 = sub i8 16, %Amt
@@ -357,17 +357,17 @@ define i16 @rotr16(i16 %A, i8 %Amt) noun
 }
 
 define i16 @rotli16(i16 %A) nounwind {
-; 32-LABEL: rotli16:
-; 32:       # %bb.0:
-; 32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    rolw $5, %ax
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotli16:
-; 64:       # %bb.0:
-; 64-NEXT:    rolw $5, %di
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: rotli16:
+; X86:       # %bb.0:
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rolw $5, %ax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotli16:
+; X64:       # %bb.0:
+; X64-NEXT:    rolw $5, %di
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 	%B = shl i16 %A, 5
 	%C = lshr i16 %A, 11
 	%D = or i16 %B, %C
@@ -375,17 +375,17 @@ define i16 @rotli16(i16 %A) nounwind {
 }
 
 define i16 @rotri16(i16 %A) nounwind {
-; 32-LABEL: rotri16:
-; 32:       # %bb.0:
-; 32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    rolw $11, %ax
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotri16:
-; 64:       # %bb.0:
-; 64-NEXT:    rolw $11, %di
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: rotri16:
+; X86:       # %bb.0:
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rolw $11, %ax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotri16:
+; X64:       # %bb.0:
+; X64-NEXT:    rolw $11, %di
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 	%B = lshr i16 %A, 5
 	%C = shl i16 %A, 11
 	%D = or i16 %B, %C
@@ -393,17 +393,17 @@ define i16 @rotri16(i16 %A) nounwind {
 }
 
 define i16 @rotl1_16(i16 %A) nounwind {
-; 32-LABEL: rotl1_16:
-; 32:       # %bb.0:
-; 32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    rolw %ax
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotl1_16:
-; 64:       # %bb.0:
-; 64-NEXT:    rolw %di
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: rotl1_16:
+; X86:       # %bb.0:
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rolw %ax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotl1_16:
+; X64:       # %bb.0:
+; X64-NEXT:    rolw %di
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 	%B = shl i16 %A, 1
 	%C = lshr i16 %A, 15
 	%D = or i16 %B, %C
@@ -411,17 +411,17 @@ define i16 @rotl1_16(i16 %A) nounwind {
 }
 
 define i16 @rotr1_16(i16 %A) nounwind {
-; 32-LABEL: rotr1_16:
-; 32:       # %bb.0:
-; 32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    rorw %ax
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotr1_16:
-; 64:       # %bb.0:
-; 64-NEXT:    rorw %di
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: rotr1_16:
+; X86:       # %bb.0:
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rorw %ax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotr1_16:
+; X64:       # %bb.0:
+; X64-NEXT:    rorw %di
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 	%B = lshr i16 %A, 1
 	%C = shl i16 %A, 15
 	%D = or i16 %B, %C
@@ -429,19 +429,19 @@ define i16 @rotr1_16(i16 %A) nounwind {
 }
 
 define i8 @rotl8(i8 %A, i8 %Amt) nounwind {
-; 32-LABEL: rotl8:
-; 32:       # %bb.0:
-; 32-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; 32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; 32-NEXT:    rolb %cl, %al
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotl8:
-; 64:       # %bb.0:
-; 64-NEXT:    movl %esi, %ecx
-; 64-NEXT:    rolb %cl, %dil
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: rotl8:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    rolb %cl, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotl8:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rolb %cl, %dil
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 	%B = shl i8 %A, %Amt
 	%Amt2 = sub i8 8, %Amt
 	%C = lshr i8 %A, %Amt2
@@ -450,19 +450,19 @@ define i8 @rotl8(i8 %A, i8 %Amt) nounwin
 }
 
 define i8 @rotr8(i8 %A, i8 %Amt) nounwind {
-; 32-LABEL: rotr8:
-; 32:       # %bb.0:
-; 32-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; 32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; 32-NEXT:    rorb %cl, %al
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotr8:
-; 64:       # %bb.0:
-; 64-NEXT:    movl %esi, %ecx
-; 64-NEXT:    rorb %cl, %dil
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: rotr8:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    rorb %cl, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotr8:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rorb %cl, %dil
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 	%B = lshr i8 %A, %Amt
 	%Amt2 = sub i8 8, %Amt
 	%C = shl i8 %A, %Amt2
@@ -471,17 +471,17 @@ define i8 @rotr8(i8 %A, i8 %Amt) nounwin
 }
 
 define i8 @rotli8(i8 %A) nounwind {
-; 32-LABEL: rotli8:
-; 32:       # %bb.0:
-; 32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; 32-NEXT:    rolb $5, %al
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotli8:
-; 64:       # %bb.0:
-; 64-NEXT:    rolb $5, %dil
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: rotli8:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    rolb $5, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotli8:
+; X64:       # %bb.0:
+; X64-NEXT:    rolb $5, %dil
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 	%B = shl i8 %A, 5
 	%C = lshr i8 %A, 3
 	%D = or i8 %B, %C
@@ -489,17 +489,17 @@ define i8 @rotli8(i8 %A) nounwind {
 }
 
 define i8 @rotri8(i8 %A) nounwind {
-; 32-LABEL: rotri8:
-; 32:       # %bb.0:
-; 32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; 32-NEXT:    rolb $3, %al
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotri8:
-; 64:       # %bb.0:
-; 64-NEXT:    rolb $3, %dil
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: rotri8:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    rolb $3, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotri8:
+; X64:       # %bb.0:
+; X64-NEXT:    rolb $3, %dil
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 	%B = lshr i8 %A, 5
 	%C = shl i8 %A, 3
 	%D = or i8 %B, %C
@@ -507,17 +507,17 @@ define i8 @rotri8(i8 %A) nounwind {
 }
 
 define i8 @rotl1_8(i8 %A) nounwind {
-; 32-LABEL: rotl1_8:
-; 32:       # %bb.0:
-; 32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; 32-NEXT:    rolb %al
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotl1_8:
-; 64:       # %bb.0:
-; 64-NEXT:    rolb %dil
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: rotl1_8:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    rolb %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotl1_8:
+; X64:       # %bb.0:
+; X64-NEXT:    rolb %dil
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 	%B = shl i8 %A, 1
 	%C = lshr i8 %A, 7
 	%D = or i8 %B, %C
@@ -525,17 +525,17 @@ define i8 @rotl1_8(i8 %A) nounwind {
 }
 
 define i8 @rotr1_8(i8 %A) nounwind {
-; 32-LABEL: rotr1_8:
-; 32:       # %bb.0:
-; 32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; 32-NEXT:    rorb %al
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotr1_8:
-; 64:       # %bb.0:
-; 64-NEXT:    rorb %dil
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: rotr1_8:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    rorb %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotr1_8:
+; X64:       # %bb.0:
+; X64-NEXT:    rorb %dil
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 	%B = lshr i8 %A, 1
 	%C = shl i8 %A, 7
 	%D = or i8 %B, %C
@@ -543,24 +543,24 @@ define i8 @rotr1_8(i8 %A) nounwind {
 }
 
 define void @rotr1_64_mem(i64* %Aptr) nounwind {
-; 32-LABEL: rotr1_64_mem:
-; 32:       # %bb.0:
-; 32-NEXT:    pushl %esi
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    movl (%eax), %ecx
-; 32-NEXT:    movl 4(%eax), %edx
-; 32-NEXT:    movl %edx, %esi
-; 32-NEXT:    shldl $31, %ecx, %esi
-; 32-NEXT:    shldl $31, %edx, %ecx
-; 32-NEXT:    movl %ecx, 4(%eax)
-; 32-NEXT:    movl %esi, (%eax)
-; 32-NEXT:    popl %esi
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotr1_64_mem:
-; 64:       # %bb.0:
-; 64-NEXT:    rorq (%rdi)
-; 64-NEXT:    retq
+; X86-LABEL: rotr1_64_mem:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %ecx
+; X86-NEXT:    movl 4(%eax), %edx
+; X86-NEXT:    movl %edx, %esi
+; X86-NEXT:    shldl $31, %ecx, %esi
+; X86-NEXT:    shldl $31, %edx, %ecx
+; X86-NEXT:    movl %ecx, 4(%eax)
+; X86-NEXT:    movl %esi, (%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotr1_64_mem:
+; X64:       # %bb.0:
+; X64-NEXT:    rorq (%rdi)
+; X64-NEXT:    retq
 
   %A = load i64, i64 *%Aptr
   %B = shl i64 %A, 63
@@ -571,16 +571,16 @@ define void @rotr1_64_mem(i64* %Aptr) no
 }
 
 define void @rotr1_32_mem(i32* %Aptr) nounwind {
-; 32-LABEL: rotr1_32_mem:
-; 32:       # %bb.0:
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    rorl (%eax)
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotr1_32_mem:
-; 64:       # %bb.0:
-; 64-NEXT:    rorl (%rdi)
-; 64-NEXT:    retq
+; X86-LABEL: rotr1_32_mem:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rorl (%eax)
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotr1_32_mem:
+; X64:       # %bb.0:
+; X64-NEXT:    rorl (%rdi)
+; X64-NEXT:    retq
   %A = load i32, i32 *%Aptr
   %B = shl i32 %A, 31
   %C = lshr i32 %A, 1
@@ -590,16 +590,16 @@ define void @rotr1_32_mem(i32* %Aptr) no
 }
 
 define void @rotr1_16_mem(i16* %Aptr) nounwind {
-; 32-LABEL: rotr1_16_mem:
-; 32:       # %bb.0:
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    rorw (%eax)
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotr1_16_mem:
-; 64:       # %bb.0:
-; 64-NEXT:    rorw (%rdi)
-; 64-NEXT:    retq
+; X86-LABEL: rotr1_16_mem:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rorw (%eax)
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotr1_16_mem:
+; X64:       # %bb.0:
+; X64-NEXT:    rorw (%rdi)
+; X64-NEXT:    retq
   %A = load i16, i16 *%Aptr
   %B = shl i16 %A, 15
   %C = lshr i16 %A, 1
@@ -609,16 +609,16 @@ define void @rotr1_16_mem(i16* %Aptr) no
 }
 
 define void @rotr1_8_mem(i8* %Aptr) nounwind {
-; 32-LABEL: rotr1_8_mem:
-; 32:       # %bb.0:
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; 32-NEXT:    rorb (%eax)
-; 32-NEXT:    retl
-;
-; 64-LABEL: rotr1_8_mem:
-; 64:       # %bb.0:
-; 64-NEXT:    rorb (%rdi)
-; 64-NEXT:    retq
+; X86-LABEL: rotr1_8_mem:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rorb (%eax)
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotr1_8_mem:
+; X64:       # %bb.0:
+; X64-NEXT:    rorb (%rdi)
+; X64-NEXT:    retq
   %A = load i8, i8 *%Aptr
   %B = shl i8 %A, 7
   %C = lshr i8 %A, 1
@@ -628,46 +628,46 @@ define void @rotr1_8_mem(i8* %Aptr) noun
 }
 
 define i64 @truncated_rot(i64 %x, i32 %amt) nounwind {
-; 32-LABEL: truncated_rot:
-; 32:       # %bb.0: # %entry
-; 32-NEXT:    pushl %ebx
-; 32-NEXT:    pushl %edi
-; 32-NEXT:    pushl %esi
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; 32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; 32-NEXT:    movl %esi, %eax
-; 32-NEXT:    shll %cl, %eax
-; 32-NEXT:    testb $32, %cl
-; 32-NEXT:    movl $0, %ebx
-; 32-NEXT:    jne .LBB28_2
-; 32-NEXT:  # %bb.1: # %entry
-; 32-NEXT:    movl %eax, %ebx
-; 32-NEXT:  .LBB28_2: # %entry
-; 32-NEXT:    movl $64, %edx
-; 32-NEXT:    subl %ecx, %edx
-; 32-NEXT:    movl %edi, %eax
-; 32-NEXT:    movl %edx, %ecx
-; 32-NEXT:    shrl %cl, %eax
-; 32-NEXT:    shrdl %cl, %edi, %esi
-; 32-NEXT:    testb $32, %dl
-; 32-NEXT:    jne .LBB28_4
-; 32-NEXT:  # %bb.3: # %entry
-; 32-NEXT:    movl %esi, %eax
-; 32-NEXT:  .LBB28_4: # %entry
-; 32-NEXT:    orl %ebx, %eax
-; 32-NEXT:    xorl %edx, %edx
-; 32-NEXT:    popl %esi
-; 32-NEXT:    popl %edi
-; 32-NEXT:    popl %ebx
-; 32-NEXT:    retl
-;
-; 64-LABEL: truncated_rot:
-; 64:       # %bb.0: # %entry
-; 64-NEXT:    movl %esi, %ecx
-; 64-NEXT:    rolq %cl, %rdi
-; 64-NEXT:    movl %edi, %eax
-; 64-NEXT:    retq
+; X86-LABEL: truncated_rot:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %esi, %eax
+; X86-NEXT:    shll %cl, %eax
+; X86-NEXT:    testb $32, %cl
+; X86-NEXT:    movl $0, %ebx
+; X86-NEXT:    jne .LBB28_2
+; X86-NEXT:  # %bb.1: # %entry
+; X86-NEXT:    movl %eax, %ebx
+; X86-NEXT:  .LBB28_2: # %entry
+; X86-NEXT:    movl $64, %edx
+; X86-NEXT:    subl %ecx, %edx
+; X86-NEXT:    movl %edi, %eax
+; X86-NEXT:    movl %edx, %ecx
+; X86-NEXT:    shrl %cl, %eax
+; X86-NEXT:    shrdl %cl, %edi, %esi
+; X86-NEXT:    testb $32, %dl
+; X86-NEXT:    jne .LBB28_4
+; X86-NEXT:  # %bb.3: # %entry
+; X86-NEXT:    movl %esi, %eax
+; X86-NEXT:  .LBB28_4: # %entry
+; X86-NEXT:    orl %ebx, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    retl
+;
+; X64-LABEL: truncated_rot:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rolq %cl, %rdi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
 entry:
   %sh_prom = zext i32 %amt to i64
   %shl = shl i64 %x, %sh_prom

Modified: llvm/trunk/test/CodeGen/X86/rotate2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/rotate2.ll?rev=334183&r1=334182&r2=334183&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/rotate2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/rotate2.ll Thu Jun  7 03:13:09 2018
@@ -1,6 +1,22 @@
-; RUN: llc < %s -mtriple=x86_64-- -mcpu=corei7 | grep rol | count 2
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefixes=X64
 
 define i64 @test1(i64 %x) nounwind  {
+; X86-LABEL: test1:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %ecx, %edx
+; X86-NEXT:    shldl $9, %eax, %edx
+; X86-NEXT:    shldl $9, %ecx, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: test1:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    rolq $9, %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 entry:
 	%tmp2 = lshr i64 %x, 55		; <i64> [#uses=1]
 	%tmp4 = shl i64 %x, 9		; <i64> [#uses=1]
@@ -9,6 +25,19 @@ entry:
 }
 
 define i64 @test2(i32 %x) nounwind  {
+; X86-LABEL: test2:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    roll $10, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: test2:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    # kill: def $edi killed $edi def $rdi
+; X64-NEXT:    roll $10, %edi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
 entry:
 	%tmp2 = lshr i32 %x, 22		; <i32> [#uses=1]
 	%tmp4 = shl i32 %x, 10		; <i32> [#uses=1]

Modified: llvm/trunk/test/CodeGen/X86/rotate4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/rotate4.ll?rev=334183&r1=334182&r2=334183&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/rotate4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/rotate4.ll Thu Jun  7 03:13:09 2018
@@ -1,16 +1,24 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64
 
 ; Check that we recognize this idiom for rotation too:
 ;    a << (b & (OpSize-1)) | a >> ((0 - b) & (OpSize-1))
 
 define i32 @rotate_left_32(i32 %a, i32 %b) {
-; CHECK-LABEL: rotate_left_32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    roll %cl, %edi
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_left_32:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    roll %cl, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_left_32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    roll %cl, %edi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
   %and = and i32 %b, 31
   %shl = shl i32 %a, %and
   %t0 = sub i32 0, %b
@@ -21,12 +29,19 @@ define i32 @rotate_left_32(i32 %a, i32 %
 }
 
 define i32 @rotate_right_32(i32 %a, i32 %b) {
-; CHECK-LABEL: rotate_right_32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    rorl %cl, %edi
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_right_32:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rorl %cl, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_right_32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rorl %cl, %edi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
   %and = and i32 %b, 31
   %shl = lshr i32 %a, %and
   %t0 = sub i32 0, %b
@@ -37,12 +52,56 @@ define i32 @rotate_right_32(i32 %a, i32
 }
 
 define i64 @rotate_left_64(i64 %a, i64 %b) {
-; CHECK-LABEL: rotate_left_64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    rolq %cl, %rdi
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_left_64:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    .cfi_def_cfa_offset 12
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    .cfi_offset %esi, -16
+; X86-NEXT:    .cfi_offset %edi, -12
+; X86-NEXT:    .cfi_offset %ebx, -8
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %esi, %eax
+; X86-NEXT:    shll %cl, %eax
+; X86-NEXT:    movl %edi, %edx
+; X86-NEXT:    shldl %cl, %esi, %edx
+; X86-NEXT:    testb $32, %cl
+; X86-NEXT:    je .LBB2_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:  .LBB2_2:
+; X86-NEXT:    negl %ecx
+; X86-NEXT:    movl %edi, %ebx
+; X86-NEXT:    shrl %cl, %ebx
+; X86-NEXT:    shrdl %cl, %edi, %esi
+; X86-NEXT:    testb $32, %cl
+; X86-NEXT:    je .LBB2_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movl %ebx, %esi
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:  .LBB2_4:
+; X86-NEXT:    orl %ebx, %edx
+; X86-NEXT:    orl %esi, %eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    .cfi_def_cfa_offset 12
+; X86-NEXT:    popl %edi
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    .cfi_def_cfa_offset 4
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_left_64:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rolq %cl, %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
   %and = and i64 %b, 63
   %shl = shl i64 %a, %and
   %t0 = sub i64 0, %b
@@ -53,12 +112,56 @@ define i64 @rotate_left_64(i64 %a, i64 %
 }
 
 define i64 @rotate_right_64(i64 %a, i64 %b) {
-; CHECK-LABEL: rotate_right_64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    rorq %cl, %rdi
-; CHECK-NEXT:    movq %rdi, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_right_64:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    .cfi_def_cfa_offset 12
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    .cfi_offset %esi, -16
+; X86-NEXT:    .cfi_offset %edi, -12
+; X86-NEXT:    .cfi_offset %ebx, -8
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %esi, %edx
+; X86-NEXT:    shrl %cl, %edx
+; X86-NEXT:    movl %edi, %eax
+; X86-NEXT:    shrdl %cl, %esi, %eax
+; X86-NEXT:    testb $32, %cl
+; X86-NEXT:    je .LBB3_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:  .LBB3_2:
+; X86-NEXT:    negl %ecx
+; X86-NEXT:    movl %edi, %ebx
+; X86-NEXT:    shll %cl, %ebx
+; X86-NEXT:    shldl %cl, %edi, %esi
+; X86-NEXT:    testb $32, %cl
+; X86-NEXT:    je .LBB3_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movl %ebx, %esi
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:  .LBB3_4:
+; X86-NEXT:    orl %esi, %edx
+; X86-NEXT:    orl %ebx, %eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    .cfi_def_cfa_offset 12
+; X86-NEXT:    popl %edi
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    .cfi_def_cfa_offset 4
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_right_64:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rorq %cl, %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
   %and = and i64 %b, 63
   %shl = lshr i64 %a, %and
   %t0 = sub i64 0, %b
@@ -71,11 +174,18 @@ define i64 @rotate_right_64(i64 %a, i64
 ; Also check mem operand.
 
 define void @rotate_left_m32(i32 *%pa, i32 %b) {
-; CHECK-LABEL: rotate_left_m32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    roll %cl, (%rdi)
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_left_m32:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    roll %cl, (%eax)
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_left_m32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    roll %cl, (%rdi)
+; X64-NEXT:    retq
   %a = load i32, i32* %pa, align 16
   %and = and i32 %b, 31
   %shl = shl i32 %a, %and
@@ -88,11 +198,18 @@ define void @rotate_left_m32(i32 *%pa, i
 }
 
 define void @rotate_right_m32(i32 *%pa, i32 %b) {
-; CHECK-LABEL: rotate_right_m32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    rorl %cl, (%rdi)
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_right_m32:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rorl %cl, (%eax)
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_right_m32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rorl %cl, (%rdi)
+; X64-NEXT:    retq
   %a = load i32, i32* %pa, align 16
   %and = and i32 %b, 31
   %shl = lshr i32 %a, %and
@@ -105,11 +222,63 @@ define void @rotate_right_m32(i32 *%pa,
 }
 
 define void @rotate_left_m64(i64 *%pa, i64 %b) {
-; CHECK-LABEL: rotate_left_m64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    rolq %cl, (%rdi)
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_left_m64:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    .cfi_def_cfa_offset 12
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    .cfi_def_cfa_offset 20
+; X86-NEXT:    .cfi_offset %esi, -20
+; X86-NEXT:    .cfi_offset %edi, -16
+; X86-NEXT:    .cfi_offset %ebx, -12
+; X86-NEXT:    .cfi_offset %ebp, -8
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %edx
+; X86-NEXT:    movl 4(%eax), %ebx
+; X86-NEXT:    movl %edx, %esi
+; X86-NEXT:    shll %cl, %esi
+; X86-NEXT:    movl %ebx, %edi
+; X86-NEXT:    shldl %cl, %edx, %edi
+; X86-NEXT:    testb $32, %cl
+; X86-NEXT:    je .LBB6_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %esi, %edi
+; X86-NEXT:    xorl %esi, %esi
+; X86-NEXT:  .LBB6_2:
+; X86-NEXT:    negl %ecx
+; X86-NEXT:    movl %ebx, %ebp
+; X86-NEXT:    shrl %cl, %ebp
+; X86-NEXT:    shrdl %cl, %ebx, %edx
+; X86-NEXT:    testb $32, %cl
+; X86-NEXT:    je .LBB6_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movl %ebp, %edx
+; X86-NEXT:    xorl %ebp, %ebp
+; X86-NEXT:  .LBB6_4:
+; X86-NEXT:    orl %ebp, %edi
+; X86-NEXT:    orl %edx, %esi
+; X86-NEXT:    movl %edi, 4(%eax)
+; X86-NEXT:    movl %esi, (%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    popl %edi
+; X86-NEXT:    .cfi_def_cfa_offset 12
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    .cfi_def_cfa_offset 4
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_left_m64:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rolq %cl, (%rdi)
+; X64-NEXT:    retq
   %a = load i64, i64* %pa, align 16
   %and = and i64 %b, 63
   %shl = shl i64 %a, %and
@@ -122,11 +291,63 @@ define void @rotate_left_m64(i64 *%pa, i
 }
 
 define void @rotate_right_m64(i64 *%pa, i64 %b) {
-; CHECK-LABEL: rotate_right_m64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    rorq %cl, (%rdi)
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_right_m64:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    .cfi_def_cfa_offset 12
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    .cfi_def_cfa_offset 20
+; X86-NEXT:    .cfi_offset %esi, -20
+; X86-NEXT:    .cfi_offset %edi, -16
+; X86-NEXT:    .cfi_offset %ebx, -12
+; X86-NEXT:    .cfi_offset %ebp, -8
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %ebx
+; X86-NEXT:    movl 4(%eax), %edx
+; X86-NEXT:    movl %edx, %esi
+; X86-NEXT:    shrl %cl, %esi
+; X86-NEXT:    movl %ebx, %edi
+; X86-NEXT:    shrdl %cl, %edx, %edi
+; X86-NEXT:    testb $32, %cl
+; X86-NEXT:    je .LBB7_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %esi, %edi
+; X86-NEXT:    xorl %esi, %esi
+; X86-NEXT:  .LBB7_2:
+; X86-NEXT:    negl %ecx
+; X86-NEXT:    movl %ebx, %ebp
+; X86-NEXT:    shll %cl, %ebp
+; X86-NEXT:    shldl %cl, %ebx, %edx
+; X86-NEXT:    testb $32, %cl
+; X86-NEXT:    je .LBB7_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movl %ebp, %edx
+; X86-NEXT:    xorl %ebp, %ebp
+; X86-NEXT:  .LBB7_4:
+; X86-NEXT:    orl %edx, %esi
+; X86-NEXT:    orl %ebp, %edi
+; X86-NEXT:    movl %esi, 4(%eax)
+; X86-NEXT:    movl %edi, (%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    .cfi_def_cfa_offset 16
+; X86-NEXT:    popl %edi
+; X86-NEXT:    .cfi_def_cfa_offset 12
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    .cfi_def_cfa_offset 4
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_right_m64:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rorq %cl, (%rdi)
+; X64-NEXT:    retq
   %a = load i64, i64* %pa, align 16
   %and = and i64 %b, 63
   %shl = lshr i64 %a, %and
@@ -142,12 +363,19 @@ define void @rotate_right_m64(i64 *%pa,
 ; These patterns are produced by instcombine after r310509.
 
 define i8 @rotate_left_8(i8 %x, i32 %amount) {
-; CHECK-LABEL: rotate_left_8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    rolb %cl, %dil
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_left_8:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    rolb %cl, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_left_8:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rolb %cl, %dil
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
   %amt = trunc i32 %amount to i8
   %sub = sub i8 0, %amt
   %maskamt = and i8 %amt, 7
@@ -159,12 +387,19 @@ define i8 @rotate_left_8(i8 %x, i32 %amo
 }
 
 define i8 @rotate_right_8(i8 %x, i32 %amount) {
-; CHECK-LABEL: rotate_right_8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    rorb %cl, %dil
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_right_8:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    rorb %cl, %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_right_8:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rorb %cl, %dil
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
   %amt = trunc i32 %amount to i8
   %sub = sub i8 0, %amt
   %maskamt = and i8 %amt, 7
@@ -176,12 +411,19 @@ define i8 @rotate_right_8(i8 %x, i32 %am
 }
 
 define i16 @rotate_left_16(i16 %x, i32 %amount) {
-; CHECK-LABEL: rotate_left_16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    rolw %cl, %di
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_left_16:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rolw %cl, %ax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_left_16:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rolw %cl, %di
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
   %amt = trunc i32 %amount to i16
   %sub = sub i16 0, %amt
   %maskamt = and i16 %amt, 15
@@ -193,12 +435,19 @@ define i16 @rotate_left_16(i16 %x, i32 %
 }
 
 define i16 @rotate_right_16(i16 %x, i32 %amount) {
-; CHECK-LABEL: rotate_right_16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    rorw %cl, %di
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_right_16:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rorw %cl, %ax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_right_16:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rorw %cl, %di
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
   %amt = trunc i32 %amount to i16
   %sub = sub i16 0, %amt
   %maskamt = and i16 %amt, 15
@@ -210,11 +459,18 @@ define i16 @rotate_right_16(i16 %x, i32
 }
 
 define void @rotate_left_m8(i8* %p, i32 %amount) {
-; CHECK-LABEL: rotate_left_m8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    rolb %cl, (%rdi)
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_left_m8:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rolb %cl, (%eax)
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_left_m8:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rolb %cl, (%rdi)
+; X64-NEXT:    retq
   %x = load i8, i8* %p, align 1
   %amt = trunc i32 %amount to i8
   %sub = sub i8 0, %amt
@@ -228,11 +484,18 @@ define void @rotate_left_m8(i8* %p, i32
 }
 
 define void @rotate_right_m8(i8* %p, i32 %amount) {
-; CHECK-LABEL: rotate_right_m8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    rorb %cl, (%rdi)
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_right_m8:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rorb %cl, (%eax)
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_right_m8:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rorb %cl, (%rdi)
+; X64-NEXT:    retq
   %x = load i8, i8* %p, align 1
   %amt = trunc i32 %amount to i8
   %sub = sub i8 0, %amt
@@ -246,11 +509,18 @@ define void @rotate_right_m8(i8* %p, i32
 }
 
 define void @rotate_left_m16(i16* %p, i32 %amount) {
-; CHECK-LABEL: rotate_left_m16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    rolw %cl, (%rdi)
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_left_m16:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rolw %cl, (%eax)
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_left_m16:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rolw %cl, (%rdi)
+; X64-NEXT:    retq
   %x = load i16, i16* %p, align 1
   %amt = trunc i32 %amount to i16
   %sub = sub i16 0, %amt
@@ -264,11 +534,18 @@ define void @rotate_left_m16(i16* %p, i3
 }
 
 define void @rotate_right_m16(i16* %p, i32 %amount) {
-; CHECK-LABEL: rotate_right_m16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    rorw %cl, (%rdi)
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_right_m16:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rorw %cl, (%eax)
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_right_m16:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    rorw %cl, (%rdi)
+; X64-NEXT:    retq
   %x = load i16, i16* %p, align 1
   %amt = trunc i32 %amount to i16
   %sub = sub i16 0, %amt
@@ -282,13 +559,21 @@ define void @rotate_right_m16(i16* %p, i
 }
 
 define i32 @rotate_demanded_bits(i32, i32) {
-; CHECK-LABEL: rotate_demanded_bits:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    andb $30, %sil
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    roll %cl, %edi
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_demanded_bits:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    andb $30, %cl
+; X86-NEXT:    roll %cl, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_demanded_bits:
+; X64:       # %bb.0:
+; X64-NEXT:    andb $30, %sil
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    roll %cl, %edi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
   %3 = and i32 %1, 30
   %4 = shl i32 %0, %3
   %5 = sub nsw i32 0, %3
@@ -299,13 +584,21 @@ define i32 @rotate_demanded_bits(i32, i3
 }
 
 define i32 @rotate_demanded_bits_2(i32, i32) {
-; CHECK-LABEL: rotate_demanded_bits_2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    andb $23, %sil
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    roll %cl, %edi
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_demanded_bits_2:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    andb $23, %cl
+; X86-NEXT:    roll %cl, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_demanded_bits_2:
+; X64:       # %bb.0:
+; X64-NEXT:    andb $23, %sil
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    roll %cl, %edi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
   %3 = and i32 %1, 23
   %4 = shl i32 %0, %3
   %5 = sub nsw i32 0, %3
@@ -316,14 +609,23 @@ define i32 @rotate_demanded_bits_2(i32,
 }
 
 define i32 @rotate_demanded_bits_3(i32, i32) {
-; CHECK-LABEL: rotate_demanded_bits_3:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addb %sil, %sil
-; CHECK-NEXT:    andb $30, %sil
-; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    roll %cl, %edi
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    retq
+; X86-LABEL: rotate_demanded_bits_3:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    addb %cl, %cl
+; X86-NEXT:    andb $30, %cl
+; X86-NEXT:    roll %cl, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: rotate_demanded_bits_3:
+; X64:       # %bb.0:
+; X64-NEXT:    addb %sil, %sil
+; X64-NEXT:    andb $30, %sil
+; X64-NEXT:    movl %esi, %ecx
+; X64-NEXT:    roll %cl, %edi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    retq
   %3 = shl i32 %1, 1
   %4 = and i32 %3, 30
   %5 = shl i32 %0, %4




More information about the llvm-commits mailing list