[llvm] 249a7ed - [x86] add tests for bitwise logic of funnel shifts; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 5 07:45:48 PDT 2022


Author: Filipp Zhinkin
Date: 2022-08-05T10:45:43-04:00
New Revision: 249a7ed750729b79341fb22003a0f4537883d928

URL: https://github.com/llvm/llvm-project/commit/249a7ed750729b79341fb22003a0f4537883d928
DIFF: https://github.com/llvm/llvm-project/commit/249a7ed750729b79341fb22003a0f4537883d928.diff

LOG: [x86] add tests for bitwise logic of funnel shifts; NFC

Baseline tests for D130994

Added: 
    llvm/test/CodeGen/X86/funnel-shift-logic-fold.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/funnel-shift-logic-fold.ll b/llvm/test/CodeGen/X86/funnel-shift-logic-fold.ll
new file mode 100644
index 0000000000000..b20cca63aa805
--- /dev/null
+++ b/llvm/test/CodeGen/X86/funnel-shift-logic-fold.ll
@@ -0,0 +1,154 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64
+
+declare i64 @llvm.fshl.i64(i64, i64, i64) nounwind readnone
+declare i64 @llvm.fshr.i64(i64, i64, i64) nounwind readnone
+
+define i64 @hoist_fshl_from_or(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind {
+; X64-LABEL: hoist_fshl_from_or:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rcx, %rax
+; X64-NEXT:    movl %r8d, %ecx
+; X64-NEXT:    shldq %cl, %rsi, %rdi
+; X64-NEXT:    shldq %cl, %rax, %rdx
+; X64-NEXT:    orq %rdi, %rdx
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    retq
+  %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %s)
+  %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 %s)
+  %res = or i64 %fshl.0, %fshl.1
+  ret i64 %res
+}
+
+define i64 @hoist_fshl_from_and(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind {
+; X64-LABEL: hoist_fshl_from_and:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rcx, %rax
+; X64-NEXT:    movl %r8d, %ecx
+; X64-NEXT:    shldq %cl, %rsi, %rdi
+; X64-NEXT:    shldq %cl, %rax, %rdx
+; X64-NEXT:    andq %rdi, %rdx
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    retq
+  %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %s)
+  %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 %s)
+  %res = and i64 %fshl.0, %fshl.1
+  ret i64 %res
+}
+
+define i64 @hoist_fshl_from_xor(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind {
+; X64-LABEL: hoist_fshl_from_xor:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rcx, %rax
+; X64-NEXT:    movl %r8d, %ecx
+; X64-NEXT:    shldq %cl, %rsi, %rdi
+; X64-NEXT:    shldq %cl, %rax, %rdx
+; X64-NEXT:    xorq %rdi, %rdx
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    retq
+  %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %s)
+  %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 %s)
+  %res = xor i64 %fshl.0, %fshl.1
+  ret i64 %res
+}
+
+define i64 @fshl_or_with_
diff erent_shift_value(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
+; X64-LABEL: fshl_or_with_
diff erent_shift_value:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    shldq $12, %rsi, %rdi
+; X64-NEXT:    shldq $13, %rcx, %rax
+; X64-NEXT:    orq %rdi, %rax
+; X64-NEXT:    retq
+  %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 12)
+  %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 13)
+  %res = or i64 %fshl.0, %fshl.1
+  ret i64 %res
+}
+
+define i64 @hoist_fshl_from_or_const_shift(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
+; X64-LABEL: hoist_fshl_from_or_const_shift:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    shldq $15, %rsi, %rdi
+; X64-NEXT:    shldq $15, %rcx, %rax
+; X64-NEXT:    orq %rdi, %rax
+; X64-NEXT:    retq
+  %fshl.0 = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 15)
+  %fshl.1 = call i64 @llvm.fshl.i64(i64 %c, i64 %d, i64 15)
+  %res = or i64 %fshl.0, %fshl.1
+  ret i64 %res
+}
+
+define i64 @hoist_fshr_from_or(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind {
+; X64-LABEL: hoist_fshr_from_or:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rcx, %rax
+; X64-NEXT:    movl %r8d, %ecx
+; X64-NEXT:    shrdq %cl, %rdi, %rsi
+; X64-NEXT:    shrdq %cl, %rdx, %rax
+; X64-NEXT:    orq %rsi, %rax
+; X64-NEXT:    retq
+  %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %s)
+  %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 %s)
+  %res = or i64 %fshr.0, %fshr.1
+  ret i64 %res
+}
+
+define i64 @hoist_fshr_from_and(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind {
+; X64-LABEL: hoist_fshr_from_and:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rcx, %rax
+; X64-NEXT:    movl %r8d, %ecx
+; X64-NEXT:    shrdq %cl, %rdi, %rsi
+; X64-NEXT:    shrdq %cl, %rdx, %rax
+; X64-NEXT:    andq %rsi, %rax
+; X64-NEXT:    retq
+  %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %s)
+  %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 %s)
+  %res = and i64 %fshr.0, %fshr.1
+  ret i64 %res
+}
+
+define i64 @hoist_fshr_from_xor(i64 %a, i64 %b, i64 %c, i64 %d, i64 %s) nounwind {
+; X64-LABEL: hoist_fshr_from_xor:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rcx, %rax
+; X64-NEXT:    movl %r8d, %ecx
+; X64-NEXT:    shrdq %cl, %rdi, %rsi
+; X64-NEXT:    shrdq %cl, %rdx, %rax
+; X64-NEXT:    xorq %rsi, %rax
+; X64-NEXT:    retq
+  %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %s)
+  %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 %s)
+  %res = xor i64 %fshr.0, %fshr.1
+  ret i64 %res
+}
+
+define i64 @fshr_or_with_
diff erent_shift_value(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
+; X64-LABEL: fshr_or_with_
diff erent_shift_value:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    shldq $52, %rsi, %rdi
+; X64-NEXT:    shldq $51, %rcx, %rax
+; X64-NEXT:    orq %rdi, %rax
+; X64-NEXT:    retq
+  %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 12)
+  %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 13)
+  %res = or i64 %fshr.0, %fshr.1
+  ret i64 %res
+}
+
+define i64 @hoist_fshr_from_or_const_shift(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
+; X64-LABEL: hoist_fshr_from_or_const_shift:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    shldq $49, %rsi, %rdi
+; X64-NEXT:    shldq $49, %rcx, %rax
+; X64-NEXT:    orq %rdi, %rax
+; X64-NEXT:    retq
+  %fshr.0 = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 15)
+  %fshr.1 = call i64 @llvm.fshr.i64(i64 %c, i64 %d, i64 15)
+  %res = or i64 %fshr.0, %fshr.1
+  ret i64 %res
+}


        


More information about the llvm-commits mailing list