[llvm] r356120 - [X86] Add various test cases for PR41057. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 14 00:07:25 PDT 2019


Author: ctopper
Date: Thu Mar 14 00:07:24 2019
New Revision: 356120

URL: http://llvm.org/viewvc/llvm-project?rev=356120&view=rev
Log:
[X86] Add various test cases for PR41057. NFC

Modified:
    llvm/trunk/test/CodeGen/X86/funnel-shift-rot.ll
    llvm/trunk/test/CodeGen/X86/rot32.ll
    llvm/trunk/test/CodeGen/X86/rot64.ll

Modified: llvm/trunk/test/CodeGen/X86/funnel-shift-rot.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/funnel-shift-rot.ll?rev=356120&r1=356119&r2=356120&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/funnel-shift-rot.ll (original)
+++ llvm/trunk/test/CodeGen/X86/funnel-shift-rot.ll Thu Mar 14 00:07:24 2019
@@ -33,6 +33,40 @@ define i8 @rotl_i8_const_shift(i8 %x) no
   ret i8 %f
 }
 
+define i8 @rotl_i8_const_shift1(i8 %x) nounwind {
+; X32-SSE2-LABEL: rotl_i8_const_shift1:
+; X32-SSE2:       # %bb.0:
+; X32-SSE2-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-SSE2-NEXT:    rolb %al
+; X32-SSE2-NEXT:    retl
+;
+; X64-AVX2-LABEL: rotl_i8_const_shift1:
+; X64-AVX2:       # %bb.0:
+; X64-AVX2-NEXT:    movl %edi, %eax
+; X64-AVX2-NEXT:    rolb %al
+; X64-AVX2-NEXT:    # kill: def $al killed $al killed $eax
+; X64-AVX2-NEXT:    retq
+  %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 1)
+  ret i8 %f
+}
+
+define i8 @rotl_i8_const_shift7(i8 %x) nounwind {
+; X32-SSE2-LABEL: rotl_i8_const_shift7:
+; X32-SSE2:       # %bb.0:
+; X32-SSE2-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-SSE2-NEXT:    rorb %al
+; X32-SSE2-NEXT:    retl
+;
+; X64-AVX2-LABEL: rotl_i8_const_shift7:
+; X64-AVX2:       # %bb.0:
+; X64-AVX2-NEXT:    movl %edi, %eax
+; X64-AVX2-NEXT:    rorb %al
+; X64-AVX2-NEXT:    # kill: def $al killed $al killed $eax
+; X64-AVX2-NEXT:    retq
+  %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 7)
+  ret i8 %f
+}
+
 define i64 @rotl_i64_const_shift(i64 %x) nounwind {
 ; X32-SSE2-LABEL: rotl_i64_const_shift:
 ; X32-SSE2:       # %bb.0:
@@ -167,6 +201,40 @@ define i8 @rotr_i8_const_shift(i8 %x) no
   ret i8 %f
 }
 
+define i8 @rotr_i8_const_shift1(i8 %x) nounwind {
+; X32-SSE2-LABEL: rotr_i8_const_shift1:
+; X32-SSE2:       # %bb.0:
+; X32-SSE2-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-SSE2-NEXT:    rorb $1, %al
+; X32-SSE2-NEXT:    retl
+;
+; X64-AVX2-LABEL: rotr_i8_const_shift1:
+; X64-AVX2:       # %bb.0:
+; X64-AVX2-NEXT:    movl %edi, %eax
+; X64-AVX2-NEXT:    rorb $1, %al
+; X64-AVX2-NEXT:    # kill: def $al killed $al killed $eax
+; X64-AVX2-NEXT:    retq
+  %f = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 1)
+  ret i8 %f
+}
+
+define i8 @rotr_i8_const_shift7(i8 %x) nounwind {
+; X32-SSE2-LABEL: rotr_i8_const_shift7:
+; X32-SSE2:       # %bb.0:
+; X32-SSE2-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-SSE2-NEXT:    rorb $7, %al
+; X32-SSE2-NEXT:    retl
+;
+; X64-AVX2-LABEL: rotr_i8_const_shift7:
+; X64-AVX2:       # %bb.0:
+; X64-AVX2-NEXT:    movl %edi, %eax
+; X64-AVX2-NEXT:    rorb $7, %al
+; X64-AVX2-NEXT:    # kill: def $al killed $al killed $eax
+; X64-AVX2-NEXT:    retq
+  %f = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 7)
+  ret i8 %f
+}
+
 define i32 @rotr_i32_const_shift(i32 %x) nounwind {
 ; X32-SSE2-LABEL: rotr_i32_const_shift:
 ; X32-SSE2:       # %bb.0:

Modified: llvm/trunk/test/CodeGen/X86/rot32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/rot32.ll?rev=356120&r1=356119&r2=356120&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/rot32.ll (original)
+++ llvm/trunk/test/CodeGen/X86/rot32.ll Thu Mar 14 00:07:24 2019
@@ -313,3 +313,227 @@ entry:
 	%2 = or i32 %0, %1
 	ret i32 %2
 }
+
+define i32 @fshl(i32 %x) nounwind {
+; X86-LABEL: fshl:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    roll $7, %eax
+; X86-NEXT:    retl
+;
+; SHLD-LABEL: fshl:
+; SHLD:       # %bb.0:
+; SHLD-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; SHLD-NEXT:    shldl $7, %eax, %eax
+; SHLD-NEXT:    retl
+;
+; BMI2-LABEL: fshl:
+; BMI2:       # %bb.0:
+; BMI2-NEXT:    rorxl $25, {{[0-9]+}}(%esp), %eax
+; BMI2-NEXT:    retl
+;
+; X64-LABEL: fshl:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    roll $7, %eax
+; X64-NEXT:    retq
+;
+; SHLD64-LABEL: fshl:
+; SHLD64:       # %bb.0:
+; SHLD64-NEXT:    movl %edi, %eax
+; SHLD64-NEXT:    shldl $7, %edi, %eax
+; SHLD64-NEXT:    retq
+;
+; BMI264-LABEL: fshl:
+; BMI264:       # %bb.0:
+; BMI264-NEXT:    rorxl $25, %edi, %eax
+; BMI264-NEXT:    retq
+  %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 7)
+  ret i32 %f
+}
+declare i32 @llvm.fshl.i32(i32, i32, i32)
+
+define i32 @fshl1(i32 %x) nounwind {
+; X86-LABEL: fshl1:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    roll %eax
+; X86-NEXT:    retl
+;
+; SHLD-LABEL: fshl1:
+; SHLD:       # %bb.0:
+; SHLD-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; SHLD-NEXT:    shldl $1, %eax, %eax
+; SHLD-NEXT:    retl
+;
+; BMI2-LABEL: fshl1:
+; BMI2:       # %bb.0:
+; BMI2-NEXT:    rorxl $31, {{[0-9]+}}(%esp), %eax
+; BMI2-NEXT:    retl
+;
+; X64-LABEL: fshl1:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    roll %eax
+; X64-NEXT:    retq
+;
+; SHLD64-LABEL: fshl1:
+; SHLD64:       # %bb.0:
+; SHLD64-NEXT:    movl %edi, %eax
+; SHLD64-NEXT:    shldl $1, %edi, %eax
+; SHLD64-NEXT:    retq
+;
+; BMI264-LABEL: fshl1:
+; BMI264:       # %bb.0:
+; BMI264-NEXT:    rorxl $31, %edi, %eax
+; BMI264-NEXT:    retq
+  %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 1)
+  ret i32 %f
+}
+
+define i32 @fshl31(i32 %x) nounwind {
+; X86-LABEL: fshl31:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    rorl %eax
+; X86-NEXT:    retl
+;
+; SHLD-LABEL: fshl31:
+; SHLD:       # %bb.0:
+; SHLD-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; SHLD-NEXT:    shldl $31, %eax, %eax
+; SHLD-NEXT:    retl
+;
+; BMI2-LABEL: fshl31:
+; BMI2:       # %bb.0:
+; BMI2-NEXT:    rorxl $1, {{[0-9]+}}(%esp), %eax
+; BMI2-NEXT:    retl
+;
+; X64-LABEL: fshl31:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    rorl %eax
+; X64-NEXT:    retq
+;
+; SHLD64-LABEL: fshl31:
+; SHLD64:       # %bb.0:
+; SHLD64-NEXT:    movl %edi, %eax
+; SHLD64-NEXT:    shldl $31, %edi, %eax
+; SHLD64-NEXT:    retq
+;
+; BMI264-LABEL: fshl31:
+; BMI264:       # %bb.0:
+; BMI264-NEXT:    rorxl $1, %edi, %eax
+; BMI264-NEXT:    retq
+  %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 31)
+  ret i32 %f
+}
+
+define i32 @fshl_load(i32* %p) nounwind {
+; X86-LABEL: fshl_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    roll $7, %eax
+; X86-NEXT:    retl
+;
+; SHLD-LABEL: fshl_load:
+; SHLD:       # %bb.0:
+; SHLD-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; SHLD-NEXT:    movl (%eax), %eax
+; SHLD-NEXT:    shldl $7, %eax, %eax
+; SHLD-NEXT:    retl
+;
+; BMI2-LABEL: fshl_load:
+; BMI2:       # %bb.0:
+; BMI2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; BMI2-NEXT:    rorxl $25, (%eax), %eax
+; BMI2-NEXT:    retl
+;
+; X64-LABEL: fshl_load:
+; X64:       # %bb.0:
+; X64-NEXT:    movl (%rdi), %eax
+; X64-NEXT:    roll $7, %eax
+; X64-NEXT:    retq
+;
+; SHLD64-LABEL: fshl_load:
+; SHLD64:       # %bb.0:
+; SHLD64-NEXT:    movl (%rdi), %eax
+; SHLD64-NEXT:    shldl $7, %eax, %eax
+; SHLD64-NEXT:    retq
+;
+; BMI264-LABEL: fshl_load:
+; BMI264:       # %bb.0:
+; BMI264-NEXT:    rorxl $25, (%rdi), %eax
+; BMI264-NEXT:    retq
+  %x = load i32, i32* %p
+  %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 7)
+  ret i32 %f
+}
+
+define i32 @fshr(i32 %x) nounwind {
+; CHECK32-LABEL: fshr:
+; CHECK32:       # %bb.0:
+; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT:    rorl $7, %eax
+; CHECK32-NEXT:    retl
+;
+; CHECK64-LABEL: fshr:
+; CHECK64:       # %bb.0:
+; CHECK64-NEXT:    movl %edi, %eax
+; CHECK64-NEXT:    rorl $7, %eax
+; CHECK64-NEXT:    retq
+  %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 7)
+  ret i32 %f
+}
+declare i32 @llvm.fshr.i32(i32, i32, i32)
+
+define i32 @fshr1(i32 %x) nounwind {
+; CHECK32-LABEL: fshr1:
+; CHECK32:       # %bb.0:
+; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT:    rorl $1, %eax
+; CHECK32-NEXT:    retl
+;
+; CHECK64-LABEL: fshr1:
+; CHECK64:       # %bb.0:
+; CHECK64-NEXT:    movl %edi, %eax
+; CHECK64-NEXT:    rorl $1, %eax
+; CHECK64-NEXT:    retq
+  %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 1)
+  ret i32 %f
+}
+
+define i32 @fshr31(i32 %x) nounwind {
+; CHECK32-LABEL: fshr31:
+; CHECK32:       # %bb.0:
+; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT:    rorl $31, %eax
+; CHECK32-NEXT:    retl
+;
+; CHECK64-LABEL: fshr31:
+; CHECK64:       # %bb.0:
+; CHECK64-NEXT:    movl %edi, %eax
+; CHECK64-NEXT:    rorl $31, %eax
+; CHECK64-NEXT:    retq
+  %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 31)
+  ret i32 %f
+}
+
+define i32 @fshr_load(i32* %p) nounwind {
+; CHECK32-LABEL: fshr_load:
+; CHECK32:       # %bb.0:
+; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT:    movl (%eax), %eax
+; CHECK32-NEXT:    rorl $7, %eax
+; CHECK32-NEXT:    retl
+;
+; CHECK64-LABEL: fshr_load:
+; CHECK64:       # %bb.0:
+; CHECK64-NEXT:    movl (%rdi), %eax
+; CHECK64-NEXT:    rorl $7, %eax
+; CHECK64-NEXT:    retq
+  %x = load i32, i32* %p
+  %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 7)
+  ret i32 %f
+}

Modified: llvm/trunk/test/CodeGen/X86/rot64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/rot64.ll?rev=356120&r1=356119&r2=356120&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/rot64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/rot64.ll Thu Mar 14 00:07:24 2019
@@ -190,3 +190,131 @@ entry:
 	%2 = or i64 %0, %1
 	ret i64 %2
 }
+
+define i64 @fshl(i64 %x) nounwind {
+; X64-LABEL: fshl:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    rolq $7, %rax
+; X64-NEXT:    retq
+;
+; SHLD-LABEL: fshl:
+; SHLD:       # %bb.0:
+; SHLD-NEXT:    movq %rdi, %rax
+; SHLD-NEXT:    shldq $7, %rdi, %rax
+; SHLD-NEXT:    retq
+;
+; BMI2-LABEL: fshl:
+; BMI2:       # %bb.0:
+; BMI2-NEXT:    rorxq $57, %rdi, %rax
+; BMI2-NEXT:    retq
+  %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 7)
+  ret i64 %f
+}
+declare i64 @llvm.fshl.i64(i64, i64, i64)
+
+define i64 @fshl1(i64 %x) nounwind {
+; X64-LABEL: fshl1:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    rolq %rax
+; X64-NEXT:    retq
+;
+; SHLD-LABEL: fshl1:
+; SHLD:       # %bb.0:
+; SHLD-NEXT:    movq %rdi, %rax
+; SHLD-NEXT:    shldq $1, %rdi, %rax
+; SHLD-NEXT:    retq
+;
+; BMI2-LABEL: fshl1:
+; BMI2:       # %bb.0:
+; BMI2-NEXT:    rorxq $63, %rdi, %rax
+; BMI2-NEXT:    retq
+  %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 1)
+  ret i64 %f
+}
+
+define i64 @fshl63(i64 %x) nounwind {
+; X64-LABEL: fshl63:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    rorq %rax
+; X64-NEXT:    retq
+;
+; SHLD-LABEL: fshl63:
+; SHLD:       # %bb.0:
+; SHLD-NEXT:    movq %rdi, %rax
+; SHLD-NEXT:    shldq $63, %rdi, %rax
+; SHLD-NEXT:    retq
+;
+; BMI2-LABEL: fshl63:
+; BMI2:       # %bb.0:
+; BMI2-NEXT:    rorxq $1, %rdi, %rax
+; BMI2-NEXT:    retq
+  %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 63)
+  ret i64 %f
+}
+
+define i64 @fshl_load(i64* %p) nounwind {
+; X64-LABEL: fshl_load:
+; X64:       # %bb.0:
+; X64-NEXT:    movq (%rdi), %rax
+; X64-NEXT:    rolq $7, %rax
+; X64-NEXT:    retq
+;
+; SHLD-LABEL: fshl_load:
+; SHLD:       # %bb.0:
+; SHLD-NEXT:    movq (%rdi), %rax
+; SHLD-NEXT:    shldq $7, %rax, %rax
+; SHLD-NEXT:    retq
+;
+; BMI2-LABEL: fshl_load:
+; BMI2:       # %bb.0:
+; BMI2-NEXT:    rorxq $57, (%rdi), %rax
+; BMI2-NEXT:    retq
+  %x = load i64, i64* %p
+  %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 7)
+  ret i64 %f
+}
+
+define i64 @fshr(i64 %x) nounwind {
+; ALL-LABEL: fshr:
+; ALL:       # %bb.0:
+; ALL-NEXT:    movq %rdi, %rax
+; ALL-NEXT:    rorq $7, %rax
+; ALL-NEXT:    retq
+  %f = call i64 @llvm.fshr.i64(i64 %x, i64 %x, i64 7)
+  ret i64 %f
+}
+declare i64 @llvm.fshr.i64(i64, i64, i64)
+
+define i64 @fshr1(i64 %x) nounwind {
+; ALL-LABEL: fshr1:
+; ALL:       # %bb.0:
+; ALL-NEXT:    movq %rdi, %rax
+; ALL-NEXT:    rorq $1, %rax
+; ALL-NEXT:    retq
+  %f = call i64 @llvm.fshr.i64(i64 %x, i64 %x, i64 1)
+  ret i64 %f
+}
+
+define i64 @fshr63(i64 %x) nounwind {
+; ALL-LABEL: fshr63:
+; ALL:       # %bb.0:
+; ALL-NEXT:    movq %rdi, %rax
+; ALL-NEXT:    rorq $63, %rax
+; ALL-NEXT:    retq
+  %f = call i64 @llvm.fshr.i64(i64 %x, i64 %x, i64 63)
+  ret i64 %f
+}
+
+define i64 @fshr_load(i64* %p) nounwind {
+; ALL-LABEL: fshr_load:
+; ALL:       # %bb.0:
+; ALL-NEXT:    movq (%rdi), %rax
+; ALL-NEXT:    rorq $7, %rax
+; ALL-NEXT:    retq
+  %x = load i64, i64* %p
+  %f = call i64 @llvm.fshr.i64(i64 %x, i64 %x, i64 7)
+  ret i64 %f
+}




More information about the llvm-commits mailing list