[llvm] 0f7ecba - [X86] Add test coverage for and/or/xor with freeze

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sun Aug 14 05:15:09 PDT 2022


Author: Simon Pilgrim
Date: 2022-08-14T13:14:53+01:00
New Revision: 0f7ecba96a780450a16e62bffeef01587a25964c

URL: https://github.com/llvm/llvm-project/commit/0f7ecba96a780450a16e62bffeef01587a25964c
DIFF: https://github.com/llvm/llvm-project/commit/0f7ecba96a780450a16e62bffeef01587a25964c.diff

LOG: [X86] Add test coverage for and/or/xor with freeze

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/freeze-binary.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/freeze-binary.ll b/llvm/test/CodeGen/X86/freeze-binary.ll
index 403423cd69f42..e6ea99f8fb2bf 100644
--- a/llvm/test/CodeGen/X86/freeze-binary.ll
+++ b/llvm/test/CodeGen/X86/freeze-binary.ll
@@ -2,6 +2,120 @@
 ; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 | FileCheck %s --check-prefixes=X86
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=X64
 
+define i32 @freeze_and(i32 %a0) nounwind {
+; X86-LABEL: freeze_and:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl $15, %eax
+; X86-NEXT:    andl $7, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: freeze_and:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    andl $15, %eax
+; X64-NEXT:    andl $7, %eax
+; X64-NEXT:    retq
+  %x = and i32 %a0, 15
+  %y = freeze i32 %x
+  %z = and i32 %y, 7
+  ret i32 %z
+}
+
+define <2 x i64> @freeze_and_vec(<2 x i64> %a0) nounwind {
+; X86-LABEL: freeze_and_vec:
+; X86:       # %bb.0:
+; X86-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: freeze_and_vec:
+; X64:       # %bb.0:
+; X64-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    retq
+  %x = and <2 x i64> %a0, <i64 15, i64 7>
+  %y = freeze <2 x i64> %x
+  %z = and <2 x i64> %y, <i64 7, i64 15>
+  ret <2 x i64> %z
+}
+
+define i32 @freeze_or(i32 %a0) nounwind {
+; X86-LABEL: freeze_or:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    orl $3, %eax
+; X86-NEXT:    orl $12, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: freeze_or:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    orl $3, %eax
+; X64-NEXT:    orl $12, %eax
+; X64-NEXT:    retq
+  %x = or i32 %a0, 3
+  %y = freeze i32 %x
+  %z = or i32 %y, 12
+  ret i32 %z
+}
+
+define <2 x i64> @freeze_or_vec(<2 x i64> %a0) nounwind {
+; X86-LABEL: freeze_or_vec:
+; X86:       # %bb.0:
+; X86-NEXT:    orps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    orps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: freeze_or_vec:
+; X64:       # %bb.0:
+; X64-NEXT:    vorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    retq
+  %x = or <2 x i64> %a0, <i64 1, i64 3>
+  %y = freeze <2 x i64> %x
+  %z = or <2 x i64> %y, <i64 14, i64 12>
+  ret <2 x i64> %z
+}
+
+define i32 @freeze_xor(i32 %a0) nounwind {
+; X86-LABEL: freeze_xor:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl $3, %eax
+; X86-NEXT:    xorl $12, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: freeze_xor:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    xorl $3, %eax
+; X64-NEXT:    xorl $12, %eax
+; X64-NEXT:    retq
+  %x = xor i32 %a0, 3
+  %y = freeze i32 %x
+  %z = xor i32 %y, 12
+  ret i32 %z
+}
+
+define <8 x i16> @freeze_xor_vec(<8 x i16> %a0) nounwind {
+; X86-LABEL: freeze_xor_vec:
+; X86:       # %bb.0:
+; X86-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: freeze_xor_vec:
+; X64:       # %bb.0:
+; X64-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    retq
+  %x = xor <8 x i16> %a0, <i16 -1, i16 0, i16 -1, i16 0, i16 -1, i16 0, i16 -1, i16 0>
+  %y = freeze <8 x i16> %x
+  %z = xor <8 x i16> %y, <i16 0, i16 -1, i16 0, i16 -1, i16 0, i16 -1, i16 0, i16 -1>
+  ret <8 x i16> %z
+}
+
 define i32 @freeze_add(i32 %a0) nounwind {
 ; X86-LABEL: freeze_add:
 ; X86:       # %bb.0:


        


More information about the llvm-commits mailing list