[llvm] 2264f7e - [X86] Add 64bits test cases for D131358

Amaury Séchet via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 8 16:14:43 PDT 2022


Author: Amaury Séchet
Date: 2022-08-08T23:12:49Z
New Revision: 2264f7ef27a5dcc89e41fef4a7c734471011f6fd

URL: https://github.com/llvm/llvm-project/commit/2264f7ef27a5dcc89e41fef4a7c734471011f6fd
DIFF: https://github.com/llvm/llvm-project/commit/2264f7ef27a5dcc89e41fef4a7c734471011f6fd.diff

LOG: [X86] Add 64bits test cases for D131358

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/or-lea.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/or-lea.ll b/llvm/test/CodeGen/X86/or-lea.ll
index 93cb5faf09c94..b0bbd9a24e6d4 100644
--- a/llvm/test/CodeGen/X86/or-lea.ll
+++ b/llvm/test/CodeGen/X86/or-lea.ll
@@ -523,6 +523,34 @@ define i32 @or_sext1(i32 %x) {
   ret i32 %or
 }
 
+define i64 @or_sext1_64(i64 %x) {
+; X86-LABEL: or_sext1_64:
+; X86:       # %bb.0:
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    movl $42, %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    setl %al
+; X86-NEXT:    movzbl %al, %edx
+; X86-NEXT:    negl %edx
+; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    orl $1, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: or_sext1_64:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpq $43, %rdi
+; X64-NEXT:    setge %al
+; X64-NEXT:    negq %rax
+; X64-NEXT:    orq $1, %rax
+; X64-NEXT:    retq
+  %cmp = icmp sgt i64 %x, 42
+  %sext = sext i1 %cmp to i64
+  %or = or i64 %sext, 1
+  ret i64 %or
+}
+
 define i32 @or_sext2(i32 %x) {
 ; X86-LABEL: or_sext2:
 ; X86:       # %bb.0:
@@ -547,6 +575,34 @@ define i32 @or_sext2(i32 %x) {
   ret i32 %or
 }
 
+define i64 @or_sext2_64(i64 %x) {
+; X86-LABEL: or_sext2_64:
+; X86:       # %bb.0:
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    movl $42, %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    setl %al
+; X86-NEXT:    movzbl %al, %edx
+; X86-NEXT:    negl %edx
+; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    orl $2, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: or_sext2_64:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpq $43, %rdi
+; X64-NEXT:    setge %al
+; X64-NEXT:    negq %rax
+; X64-NEXT:    orq $2, %rax
+; X64-NEXT:    retq
+  %cmp = icmp sgt i64 %x, 42
+  %sext = sext i1 %cmp to i64
+  %or = or i64 %sext, 2
+  ret i64 %or
+}
+
 define i32 @or_sext3(i32 %x) {
 ; X86-LABEL: or_sext3:
 ; X86:       # %bb.0:
@@ -571,6 +627,34 @@ define i32 @or_sext3(i32 %x) {
   ret i32 %or
 }
 
+define i64 @or_sext3_64(i64 %x) {
+; X86-LABEL: or_sext3_64:
+; X86:       # %bb.0:
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    movl $42, %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    setl %al
+; X86-NEXT:    movzbl %al, %edx
+; X86-NEXT:    negl %edx
+; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    orl $3, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: or_sext3_64:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpq $43, %rdi
+; X64-NEXT:    setge %al
+; X64-NEXT:    negq %rax
+; X64-NEXT:    orq $3, %rax
+; X64-NEXT:    retq
+  %cmp = icmp sgt i64 %x, 42
+  %sext = sext i1 %cmp to i64
+  %or = or i64 %sext, 3
+  ret i64 %or
+}
+
 define i32 @or_sext4(i32 %x) {
 ; X86-LABEL: or_sext4:
 ; X86:       # %bb.0:
@@ -595,6 +679,34 @@ define i32 @or_sext4(i32 %x) {
   ret i32 %or
 }
 
+define i64 @or_sext4_64(i64 %x) {
+; X86-LABEL: or_sext4_64:
+; X86:       # %bb.0:
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    movl $42, %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    setl %al
+; X86-NEXT:    movzbl %al, %edx
+; X86-NEXT:    negl %edx
+; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    orl $4, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: or_sext4_64:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpq $43, %rdi
+; X64-NEXT:    setge %al
+; X64-NEXT:    negq %rax
+; X64-NEXT:    orq $4, %rax
+; X64-NEXT:    retq
+  %cmp = icmp sgt i64 %x, 42
+  %sext = sext i1 %cmp to i64
+  %or = or i64 %sext, 4
+  ret i64 %or
+}
+
 define i32 @or_sext7(i32 %x) {
 ; X86-LABEL: or_sext7:
 ; X86:       # %bb.0:
@@ -619,6 +731,34 @@ define i32 @or_sext7(i32 %x) {
   ret i32 %or
 }
 
+define i64 @or_sext7_64(i64 %x) {
+; X86-LABEL: or_sext7_64:
+; X86:       # %bb.0:
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    movl $42, %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    setl %al
+; X86-NEXT:    movzbl %al, %edx
+; X86-NEXT:    negl %edx
+; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    orl $7, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: or_sext7_64:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpq $43, %rdi
+; X64-NEXT:    setge %al
+; X64-NEXT:    negq %rax
+; X64-NEXT:    orq $7, %rax
+; X64-NEXT:    retq
+  %cmp = icmp sgt i64 %x, 42
+  %sext = sext i1 %cmp to i64
+  %or = or i64 %sext, 7
+  ret i64 %or
+}
+
 define i32 @or_sext8(i32 %x) {
 ; X86-LABEL: or_sext8:
 ; X86:       # %bb.0:
@@ -643,3 +783,30 @@ define i32 @or_sext8(i32 %x) {
   ret i32 %or
 }
 
+define i64 @or_sext8_64(i64 %x) {
+; X86-LABEL: or_sext8_64:
+; X86:       # %bb.0:
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    movl $42, %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    setl %al
+; X86-NEXT:    movzbl %al, %edx
+; X86-NEXT:    negl %edx
+; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    orl $8, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: or_sext8_64:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpq $43, %rdi
+; X64-NEXT:    setge %al
+; X64-NEXT:    negq %rax
+; X64-NEXT:    orq $8, %rax
+; X64-NEXT:    retq
+  %cmp = icmp sgt i64 %x, 42
+  %sext = sext i1 %cmp to i64
+  %or = or i64 %sext, 8
+  ret i64 %or
+}


        


More information about the llvm-commits mailing list