[llvm] r283695 - [X86] Improve the rotate ISel test

Zvi Rackover via llvm-commits llvm-commits at lists.llvm.org
Sun Oct 9 06:07:26 PDT 2016


Author: zvi
Date: Sun Oct  9 08:07:25 2016
New Revision: 283695

URL: http://llvm.org/viewvc/llvm-project?rev=283695&view=rev
Log:
[X86] Improve the rotate ISel test

Summary:
- Added 64-bit target testing.
- Added 64-bit operand test cases.
- Added cases that demonstrate pr30644

Reviewers: RKSimon, craig.topper, igorb

Differential Revision: https://reviews.llvm.org/D25401

Modified:
    llvm/trunk/test/CodeGen/X86/rotate.ll

Modified: llvm/trunk/test/CodeGen/X86/rotate.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/rotate.ll?rev=283695&r1=283694&r2=283695&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/rotate.ll (original)
+++ llvm/trunk/test/CodeGen/X86/rotate.ll Sun Oct  9 08:07:25 2016
@@ -1,100 +1,567 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | \
-; RUN:   grep "ro[rl]" | count 12
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-linux | FileCheck %s --check-prefix=32
+; RUN: llc < %s -mtriple=x86_64-unknown-linux | FileCheck %s --check-prefix=64
+
+define i64 @rotl64(i64 %A, i8 %Amt) {
+; 32-LABEL: rotl64:
+; 32:       # BB#0:
+; 32-NEXT:    pushl %ebx
+; 32-NEXT:  .Ltmp0:
+; 32-NEXT:    .cfi_def_cfa_offset 8
+; 32-NEXT:    pushl %edi
+; 32-NEXT:  .Ltmp1:
+; 32-NEXT:    .cfi_def_cfa_offset 12
+; 32-NEXT:    pushl %esi
+; 32-NEXT:  .Ltmp2:
+; 32-NEXT:    .cfi_def_cfa_offset 16
+; 32-NEXT:  .Ltmp3:
+; 32-NEXT:    .cfi_offset %esi, -16
+; 32-NEXT:  .Ltmp4:
+; 32-NEXT:    .cfi_offset %edi, -12
+; 32-NEXT:  .Ltmp5:
+; 32-NEXT:    .cfi_offset %ebx, -8
+; 32-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; 32-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; 32-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; 32-NEXT:    movl %esi, %eax
+; 32-NEXT:    shll %cl, %eax
+; 32-NEXT:    movl %edi, %edx
+; 32-NEXT:    shldl %cl, %esi, %edx
+; 32-NEXT:    testb $32, %cl
+; 32-NEXT:    je .LBB0_2
+; 32-NEXT:  # BB#1:
+; 32-NEXT:    movl %eax, %edx
+; 32-NEXT:    xorl %eax, %eax
+; 32-NEXT:  .LBB0_2:
+; 32-NEXT:    movb $64, %ch
+; 32-NEXT:    subb %cl, %ch
+; 32-NEXT:    movl %edi, %ebx
+; 32-NEXT:    movb %ch, %cl
+; 32-NEXT:    shrl %cl, %ebx
+; 32-NEXT:    shrdl %cl, %edi, %esi
+; 32-NEXT:    testb $32, %ch
+; 32-NEXT:    je .LBB0_4
+; 32-NEXT:  # BB#3:
+; 32-NEXT:    movl %ebx, %esi
+; 32-NEXT:    xorl %ebx, %ebx
+; 32-NEXT:  .LBB0_4:
+; 32-NEXT:    orl %esi, %eax
+; 32-NEXT:    orl %ebx, %edx
+; 32-NEXT:    popl %esi
+; 32-NEXT:    popl %edi
+; 32-NEXT:    popl %ebx
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotl64:
+; 64:       # BB#0:
+; 64-NEXT:    movl %esi, %ecx
+; 64-NEXT:    rolq %cl, %rdi
+; 64-NEXT:    movq %rdi, %rax
+; 64-NEXT:    retq
+	%shift.upgrd.1 = zext i8 %Amt to i64
+	%B = shl i64 %A, %shift.upgrd.1
+	%Amt2 = sub i8 64, %Amt
+	%shift.upgrd.2 = zext i8 %Amt2 to i64
+	%C = lshr i64 %A, %shift.upgrd.2
+	%D = or i64 %B, %C
+	ret i64 %D
+}
+
+define i64 @rotr64(i64 %A, i8 %Amt) {
+; 32-LABEL: rotr64:
+; 32:       # BB#0:
+; 32-NEXT:    pushl %ebx
+; 32-NEXT:  .Ltmp6:
+; 32-NEXT:    .cfi_def_cfa_offset 8
+; 32-NEXT:    pushl %edi
+; 32-NEXT:  .Ltmp7:
+; 32-NEXT:    .cfi_def_cfa_offset 12
+; 32-NEXT:    pushl %esi
+; 32-NEXT:  .Ltmp8:
+; 32-NEXT:    .cfi_def_cfa_offset 16
+; 32-NEXT:  .Ltmp9:
+; 32-NEXT:    .cfi_offset %esi, -16
+; 32-NEXT:  .Ltmp10:
+; 32-NEXT:    .cfi_offset %edi, -12
+; 32-NEXT:  .Ltmp11:
+; 32-NEXT:    .cfi_offset %ebx, -8
+; 32-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; 32-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; 32-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; 32-NEXT:    movl %esi, %edx
+; 32-NEXT:    shrl %cl, %edx
+; 32-NEXT:    movl %edi, %eax
+; 32-NEXT:    shrdl %cl, %esi, %eax
+; 32-NEXT:    testb $32, %cl
+; 32-NEXT:    je .LBB1_2
+; 32-NEXT:  # BB#1:
+; 32-NEXT:    movl %edx, %eax
+; 32-NEXT:    xorl %edx, %edx
+; 32-NEXT:  .LBB1_2:
+; 32-NEXT:    movb $64, %ch
+; 32-NEXT:    subb %cl, %ch
+; 32-NEXT:    movl %edi, %ebx
+; 32-NEXT:    movb %ch, %cl
+; 32-NEXT:    shll %cl, %ebx
+; 32-NEXT:    shldl %cl, %edi, %esi
+; 32-NEXT:    testb $32, %ch
+; 32-NEXT:    je .LBB1_4
+; 32-NEXT:  # BB#3:
+; 32-NEXT:    movl %ebx, %esi
+; 32-NEXT:    xorl %ebx, %ebx
+; 32-NEXT:  .LBB1_4:
+; 32-NEXT:    orl %ebx, %eax
+; 32-NEXT:    orl %esi, %edx
+; 32-NEXT:    popl %esi
+; 32-NEXT:    popl %edi
+; 32-NEXT:    popl %ebx
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotr64:
+; 64:       # BB#0:
+; 64-NEXT:    movl %esi, %ecx
+; 64-NEXT:    rorq %cl, %rdi
+; 64-NEXT:    movq %rdi, %rax
+; 64-NEXT:    retq
+	%shift.upgrd.3 = zext i8 %Amt to i64
+	%B = lshr i64 %A, %shift.upgrd.3
+	%Amt2 = sub i8 64, %Amt
+	%shift.upgrd.4 = zext i8 %Amt2 to i64
+	%C = shl i64 %A, %shift.upgrd.4
+	%D = or i64 %B, %C
+	ret i64 %D
+}
+
+define i64 @rotli64(i64 %A) {
+; 32-LABEL: rotli64:
+; 32:       # BB#0:
+; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; 32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; 32-NEXT:    movl %ecx, %edx
+; 32-NEXT:    shldl $5, %eax, %edx
+; 32-NEXT:    shldl $5, %ecx, %eax
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotli64:
+; 64:       # BB#0:
+; 64-NEXT:    rolq $5, %rdi
+; 64-NEXT:    movq %rdi, %rax
+; 64-NEXT:    retq
+	%B = shl i64 %A, 5
+	%C = lshr i64 %A, 59
+	%D = or i64 %B, %C
+	ret i64 %D
+}
+
+define i64 @rotri64(i64 %A) {
+; 32-LABEL: rotri64:
+; 32:       # BB#0:
+; 32-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; 32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; 32-NEXT:    movl %ecx, %eax
+; 32-NEXT:    shldl $27, %edx, %eax
+; 32-NEXT:    shldl $27, %ecx, %edx
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotri64:
+; 64:       # BB#0:
+; 64-NEXT:    rolq $59, %rdi
+; 64-NEXT:    movq %rdi, %rax
+; 64-NEXT:    retq
+	%B = lshr i64 %A, 5
+	%C = shl i64 %A, 59
+	%D = or i64 %B, %C
+	ret i64 %D
+}
+
+define i64 @rotl1_64(i64 %A) {
+; 32-LABEL: rotl1_64:
+; 32:       # BB#0:
+; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; 32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; 32-NEXT:    movl %ecx, %edx
+; 32-NEXT:    shldl $1, %eax, %edx
+; 32-NEXT:    shldl $1, %ecx, %eax
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotl1_64:
+; 64:       # BB#0:
+; 64-NEXT:    rolq %rdi
+; 64-NEXT:    movq %rdi, %rax
+; 64-NEXT:    retq
+	%B = shl i64 %A, 1
+	%C = lshr i64 %A, 63
+	%D = or i64 %B, %C
+	ret i64 %D
+}
+
+define i64 @rotr1_64(i64 %A) {
+; 32-LABEL: rotr1_64:
+; 32:       # BB#0:
+; 32-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; 32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; 32-NEXT:    movl %ecx, %eax
+; 32-NEXT:    shldl $31, %edx, %eax
+; 32-NEXT:    shldl $31, %ecx, %edx
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotr1_64:
+; 64:       # BB#0:
+; 64-NEXT:    rolq $63, %rdi
+; 64-NEXT:    movq %rdi, %rax
+; 64-NEXT:    retq
+	%B = shl i64 %A, 63
+	%C = lshr i64 %A, 1
+	%D = or i64 %B, %C
+	ret i64 %D
+}
 
 define i32 @rotl32(i32 %A, i8 %Amt) {
-	%shift.upgrd.1 = zext i8 %Amt to i32		; <i32> [#uses=1]
-	%B = shl i32 %A, %shift.upgrd.1		; <i32> [#uses=1]
-	%Amt2 = sub i8 32, %Amt		; <i8> [#uses=1]
-	%shift.upgrd.2 = zext i8 %Amt2 to i32		; <i32> [#uses=1]
-	%C = lshr i32 %A, %shift.upgrd.2		; <i32> [#uses=1]
-	%D = or i32 %B, %C		; <i32> [#uses=1]
+; 32-LABEL: rotl32:
+; 32:       # BB#0:
+; 32-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; 32-NEXT:    roll %cl, %eax
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotl32:
+; 64:       # BB#0:
+; 64-NEXT:    movl %esi, %ecx
+; 64-NEXT:    roll %cl, %edi
+; 64-NEXT:    movl %edi, %eax
+; 64-NEXT:    retq
+	%shift.upgrd.1 = zext i8 %Amt to i32
+	%B = shl i32 %A, %shift.upgrd.1
+	%Amt2 = sub i8 32, %Amt
+	%shift.upgrd.2 = zext i8 %Amt2 to i32
+	%C = lshr i32 %A, %shift.upgrd.2
+	%D = or i32 %B, %C
 	ret i32 %D
 }
 
 define i32 @rotr32(i32 %A, i8 %Amt) {
-	%shift.upgrd.3 = zext i8 %Amt to i32		; <i32> [#uses=1]
-	%B = lshr i32 %A, %shift.upgrd.3		; <i32> [#uses=1]
-	%Amt2 = sub i8 32, %Amt		; <i8> [#uses=1]
-	%shift.upgrd.4 = zext i8 %Amt2 to i32		; <i32> [#uses=1]
-	%C = shl i32 %A, %shift.upgrd.4		; <i32> [#uses=1]
-	%D = or i32 %B, %C		; <i32> [#uses=1]
+; 32-LABEL: rotr32:
+; 32:       # BB#0:
+; 32-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; 32-NEXT:    rorl %cl, %eax
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotr32:
+; 64:       # BB#0:
+; 64-NEXT:    movl %esi, %ecx
+; 64-NEXT:    rorl %cl, %edi
+; 64-NEXT:    movl %edi, %eax
+; 64-NEXT:    retq
+	%shift.upgrd.3 = zext i8 %Amt to i32
+	%B = lshr i32 %A, %shift.upgrd.3
+	%Amt2 = sub i8 32, %Amt
+	%shift.upgrd.4 = zext i8 %Amt2 to i32
+	%C = shl i32 %A, %shift.upgrd.4
+	%D = or i32 %B, %C
 	ret i32 %D
 }
 
 define i32 @rotli32(i32 %A) {
-	%B = shl i32 %A, 5		; <i32> [#uses=1]
-	%C = lshr i32 %A, 27		; <i32> [#uses=1]
-	%D = or i32 %B, %C		; <i32> [#uses=1]
+; 32-LABEL: rotli32:
+; 32:       # BB#0:
+; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; 32-NEXT:    roll $5, %eax
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotli32:
+; 64:       # BB#0:
+; 64-NEXT:    roll $5, %edi
+; 64-NEXT:    movl %edi, %eax
+; 64-NEXT:    retq
+	%B = shl i32 %A, 5
+	%C = lshr i32 %A, 27
+	%D = or i32 %B, %C
 	ret i32 %D
 }
 
 define i32 @rotri32(i32 %A) {
-	%B = lshr i32 %A, 5		; <i32> [#uses=1]
-	%C = shl i32 %A, 27		; <i32> [#uses=1]
-	%D = or i32 %B, %C		; <i32> [#uses=1]
+; 32-LABEL: rotri32:
+; 32:       # BB#0:
+; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; 32-NEXT:    roll $27, %eax
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotri32:
+; 64:       # BB#0:
+; 64-NEXT:    roll $27, %edi
+; 64-NEXT:    movl %edi, %eax
+; 64-NEXT:    retq
+	%B = lshr i32 %A, 5
+	%C = shl i32 %A, 27
+	%D = or i32 %B, %C
+	ret i32 %D
+}
+
+define i32 @rotl1_32(i32 %A) {
+; 32-LABEL: rotl1_32:
+; 32:       # BB#0:
+; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; 32-NEXT:    roll %eax
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotl1_32:
+; 64:       # BB#0:
+; 64-NEXT:    roll %edi
+; 64-NEXT:    movl %edi, %eax
+; 64-NEXT:    retq
+	%B = shl i32 %A, 1
+	%C = lshr i32 %A, 31
+	%D = or i32 %B, %C
+	ret i32 %D
+}
+
+define i32 @rotr1_32(i32 %A) {
+; 32-LABEL: rotr1_32:
+; 32:       # BB#0:
+; 32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; 32-NEXT:    roll $31, %eax
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotr1_32:
+; 64:       # BB#0:
+; 64-NEXT:    roll $31, %edi
+; 64-NEXT:    movl %edi, %eax
+; 64-NEXT:    retq
+	%B = shl i32 %A, 31
+	%C = lshr i32 %A, 1
+	%D = or i32 %B, %C
 	ret i32 %D
 }
 
 define i16 @rotl16(i16 %A, i8 %Amt) {
-	%shift.upgrd.5 = zext i8 %Amt to i16		; <i16> [#uses=1]
-	%B = shl i16 %A, %shift.upgrd.5		; <i16> [#uses=1]
-	%Amt2 = sub i8 16, %Amt		; <i8> [#uses=1]
-	%shift.upgrd.6 = zext i8 %Amt2 to i16		; <i16> [#uses=1]
-	%C = lshr i16 %A, %shift.upgrd.6		; <i16> [#uses=1]
-	%D = or i16 %B, %C		; <i16> [#uses=1]
+; 32-LABEL: rotl16:
+; 32:       # BB#0:
+; 32-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; 32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; 32-NEXT:    rolw %cl, %ax
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotl16:
+; 64:       # BB#0:
+; 64-NEXT:    movl %esi, %ecx
+; 64-NEXT:    rolw %cl, %di
+; 64-NEXT:    movl %edi, %eax
+; 64-NEXT:    retq
+	%shift.upgrd.5 = zext i8 %Amt to i16
+	%B = shl i16 %A, %shift.upgrd.5
+	%Amt2 = sub i8 16, %Amt
+	%shift.upgrd.6 = zext i8 %Amt2 to i16
+	%C = lshr i16 %A, %shift.upgrd.6
+	%D = or i16 %B, %C
 	ret i16 %D
 }
 
 define i16 @rotr16(i16 %A, i8 %Amt) {
-	%shift.upgrd.7 = zext i8 %Amt to i16		; <i16> [#uses=1]
-	%B = lshr i16 %A, %shift.upgrd.7		; <i16> [#uses=1]
-	%Amt2 = sub i8 16, %Amt		; <i8> [#uses=1]
-	%shift.upgrd.8 = zext i8 %Amt2 to i16		; <i16> [#uses=1]
-	%C = shl i16 %A, %shift.upgrd.8		; <i16> [#uses=1]
-	%D = or i16 %B, %C		; <i16> [#uses=1]
+; 32-LABEL: rotr16:
+; 32:       # BB#0:
+; 32-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; 32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; 32-NEXT:    rorw %cl, %ax
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotr16:
+; 64:       # BB#0:
+; 64-NEXT:    movl %esi, %ecx
+; 64-NEXT:    rorw %cl, %di
+; 64-NEXT:    movl %edi, %eax
+; 64-NEXT:    retq
+	%shift.upgrd.7 = zext i8 %Amt to i16
+	%B = lshr i16 %A, %shift.upgrd.7
+	%Amt2 = sub i8 16, %Amt
+	%shift.upgrd.8 = zext i8 %Amt2 to i16
+	%C = shl i16 %A, %shift.upgrd.8
+	%D = or i16 %B, %C
 	ret i16 %D
 }
 
 define i16 @rotli16(i16 %A) {
-	%B = shl i16 %A, 5		; <i16> [#uses=1]
-	%C = lshr i16 %A, 11		; <i16> [#uses=1]
-	%D = or i16 %B, %C		; <i16> [#uses=1]
+; 32-LABEL: rotli16:
+; 32:       # BB#0:
+; 32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; 32-NEXT:    rolw $5, %ax
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotli16:
+; 64:       # BB#0:
+; 64-NEXT:    rolw $5, %di
+; 64-NEXT:    movl %edi, %eax
+; 64-NEXT:    retq
+	%B = shl i16 %A, 5
+	%C = lshr i16 %A, 11
+	%D = or i16 %B, %C
 	ret i16 %D
 }
 
 define i16 @rotri16(i16 %A) {
-	%B = lshr i16 %A, 5		; <i16> [#uses=1]
-	%C = shl i16 %A, 11		; <i16> [#uses=1]
-	%D = or i16 %B, %C		; <i16> [#uses=1]
+; 32-LABEL: rotri16:
+; 32:       # BB#0:
+; 32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; 32-NEXT:    rolw $11, %ax
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotri16:
+; 64:       # BB#0:
+; 64-NEXT:    rolw $11, %di
+; 64-NEXT:    movl %edi, %eax
+; 64-NEXT:    retq
+	%B = lshr i16 %A, 5
+	%C = shl i16 %A, 11
+	%D = or i16 %B, %C
+	ret i16 %D
+}
+
+define i16 @rotl1_16(i16 %A) {
+; 32-LABEL: rotl1_16:
+; 32:       # BB#0:
+; 32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; 32-NEXT:    rolw %ax
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotl1_16:
+; 64:       # BB#0:
+; 64-NEXT:    rolw %di
+; 64-NEXT:    movl %edi, %eax
+; 64-NEXT:    retq
+	%B = shl i16 %A, 1
+	%C = lshr i16 %A, 15
+	%D = or i16 %B, %C
+	ret i16 %D
+}
+
+define i16 @rotr1_16(i16 %A) {
+; 32-LABEL: rotr1_16:
+; 32:       # BB#0:
+; 32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; 32-NEXT:    rolw $15, %ax
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotr1_16:
+; 64:       # BB#0:
+; 64-NEXT:    rolw $15, %di
+; 64-NEXT:    movl %edi, %eax
+; 64-NEXT:    retq
+	%B = lshr i16 %A, 1
+	%C = shl i16 %A, 15
+	%D = or i16 %B, %C
 	ret i16 %D
 }
 
 define i8 @rotl8(i8 %A, i8 %Amt) {
-	%B = shl i8 %A, %Amt		; <i8> [#uses=1]
-	%Amt2 = sub i8 8, %Amt		; <i8> [#uses=1]
-	%C = lshr i8 %A, %Amt2		; <i8> [#uses=1]
-	%D = or i8 %B, %C		; <i8> [#uses=1]
+; 32-LABEL: rotl8:
+; 32:       # BB#0:
+; 32-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; 32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; 32-NEXT:    rolb %cl, %al
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotl8:
+; 64:       # BB#0:
+; 64-NEXT:    movl %esi, %ecx
+; 64-NEXT:    rolb %cl, %dil
+; 64-NEXT:    movl %edi, %eax
+; 64-NEXT:    retq
+	%B = shl i8 %A, %Amt
+	%Amt2 = sub i8 8, %Amt
+	%C = lshr i8 %A, %Amt2
+	%D = or i8 %B, %C
 	ret i8 %D
 }
 
 define i8 @rotr8(i8 %A, i8 %Amt) {
-	%B = lshr i8 %A, %Amt		; <i8> [#uses=1]
-	%Amt2 = sub i8 8, %Amt		; <i8> [#uses=1]
-	%C = shl i8 %A, %Amt2		; <i8> [#uses=1]
-	%D = or i8 %B, %C		; <i8> [#uses=1]
+; 32-LABEL: rotr8:
+; 32:       # BB#0:
+; 32-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; 32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; 32-NEXT:    rorb %cl, %al
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotr8:
+; 64:       # BB#0:
+; 64-NEXT:    movl %esi, %ecx
+; 64-NEXT:    rorb %cl, %dil
+; 64-NEXT:    movl %edi, %eax
+; 64-NEXT:    retq
+	%B = lshr i8 %A, %Amt
+	%Amt2 = sub i8 8, %Amt
+	%C = shl i8 %A, %Amt2
+	%D = or i8 %B, %C
 	ret i8 %D
 }
 
 define i8 @rotli8(i8 %A) {
-	%B = shl i8 %A, 5		; <i8> [#uses=1]
-	%C = lshr i8 %A, 3		; <i8> [#uses=1]
-	%D = or i8 %B, %C		; <i8> [#uses=1]
+; 32-LABEL: rotli8:
+; 32:       # BB#0:
+; 32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; 32-NEXT:    rolb $5, %al
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotli8:
+; 64:       # BB#0:
+; 64-NEXT:    rolb $5, %dil
+; 64-NEXT:    movl %edi, %eax
+; 64-NEXT:    retq
+	%B = shl i8 %A, 5
+	%C = lshr i8 %A, 3
+	%D = or i8 %B, %C
 	ret i8 %D
 }
 
 define i8 @rotri8(i8 %A) {
-	%B = lshr i8 %A, 5		; <i8> [#uses=1]
-	%C = shl i8 %A, 3		; <i8> [#uses=1]
-	%D = or i8 %B, %C		; <i8> [#uses=1]
+; 32-LABEL: rotri8:
+; 32:       # BB#0:
+; 32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; 32-NEXT:    rolb $3, %al
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotri8:
+; 64:       # BB#0:
+; 64-NEXT:    rolb $3, %dil
+; 64-NEXT:    movl %edi, %eax
+; 64-NEXT:    retq
+	%B = lshr i8 %A, 5
+	%C = shl i8 %A, 3
+	%D = or i8 %B, %C
+	ret i8 %D
+}
+
+define i8 @rotl1_8(i8 %A) {
+; 32-LABEL: rotl1_8:
+; 32:       # BB#0:
+; 32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; 32-NEXT:    rolb %al
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotl1_8:
+; 64:       # BB#0:
+; 64-NEXT:    rolb %dil
+; 64-NEXT:    movl %edi, %eax
+; 64-NEXT:    retq
+	%B = shl i8 %A, 1
+	%C = lshr i8 %A, 7
+	%D = or i8 %B, %C
+	ret i8 %D
+}
+
+define i8 @rotr1_8(i8 %A) {
+; 32-LABEL: rotr1_8:
+; 32:       # BB#0:
+; 32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; 32-NEXT:    rolb $7, %al
+; 32-NEXT:    retl
+;
+; 64-LABEL: rotr1_8:
+; 64:       # BB#0:
+; 64-NEXT:    rolb $7, %dil
+; 64-NEXT:    movl %edi, %eax
+; 64-NEXT:    retq
+	%B = lshr i8 %A, 1
+	%C = shl i8 %A, 7
+	%D = or i8 %B, %C
 	ret i8 %D
 }




More information about the llvm-commits mailing list