[llvm] 56fdc69 - [X86] Regenerate zext-load tests and add 32-bit test coverage.

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 6 01:54:25 PST 2020


Author: Simon Pilgrim
Date: 2020-11-06T09:54:08Z
New Revision: 56fdc6947274fe543828b2152d2c9b3cc02c6a3a

URL: https://github.com/llvm/llvm-project/commit/56fdc6947274fe543828b2152d2c9b3cc02c6a3a
DIFF: https://github.com/llvm/llvm-project/commit/56fdc6947274fe543828b2152d2c9b3cc02c6a3a.diff

LOG: [X86] Regenerate zext-load tests and add 32-bit test coverage.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/zext-logicop-shift-load.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/zext-logicop-shift-load.ll b/llvm/test/CodeGen/X86/zext-logicop-shift-load.ll
index 3618c76bee73..a02c5209a3b2 100644
--- a/llvm/test/CodeGen/X86/zext-logicop-shift-load.ll
+++ b/llvm/test/CodeGen/X86/zext-logicop-shift-load.ll
@@ -1,14 +1,23 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
-
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
 
 define i64 @test1(i8* %data) {
-; CHECK-LABEL: test1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    movl (%rdi), %eax
-; CHECK-NEXT:    shll $2, %eax
-; CHECK-NEXT:    andl $60, %eax
-; CHECK-NEXT:    retq
+; X86-LABEL: test1:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzbl (%eax), %eax
+; X86-NEXT:    shll $2, %eax
+; X86-NEXT:    andl $60, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: test1:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movl (%rdi), %eax
+; X64-NEXT:    shll $2, %eax
+; X64-NEXT:    andl $60, %eax
+; X64-NEXT:    retq
 entry:
   %bf.load = load i8, i8* %data, align 4
   %bf.clear = shl i8 %bf.load, 2
@@ -18,12 +27,20 @@ entry:
 }
 
 define i8* @test2(i8* %data) {
-; CHECK-LABEL: test2:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    movl (%rdi), %eax
-; CHECK-NEXT:    andl $15, %eax
-; CHECK-NEXT:    leaq (%rdi,%rax,4), %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: test2:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzbl (%eax), %ecx
+; X86-NEXT:    andl $15, %ecx
+; X86-NEXT:    leal (%eax,%ecx,4), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: test2:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movl (%rdi), %eax
+; X64-NEXT:    andl $15, %eax
+; X64-NEXT:    leaq (%rdi,%rax,4), %rax
+; X64-NEXT:    retq
 entry:
   %bf.load = load i8, i8* %data, align 4
   %bf.clear = shl i8 %bf.load, 2
@@ -35,13 +52,23 @@ entry:
 
 ; If the shift op is SHL, the logic op can only be AND.
 define i64 @test3(i8* %data) {
-; CHECK-LABEL: test3:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    movb (%rdi), %al
-; CHECK-NEXT:    shlb $2, %al
-; CHECK-NEXT:    xorb $60, %al
-; CHECK-NEXT:    movzbl %al, %eax
-; CHECK-NEXT:    retq
+; X86-LABEL: test3:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movb (%eax), %al
+; X86-NEXT:    shlb $2, %al
+; X86-NEXT:    xorb $60, %al
+; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: test3:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movb (%rdi), %al
+; X64-NEXT:    shlb $2, %al
+; X64-NEXT:    xorb $60, %al
+; X64-NEXT:    movzbl %al, %eax
+; X64-NEXT:    retq
 entry:
   %bf.load = load i8, i8* %data, align 4
   %bf.clear = shl i8 %bf.load, 2
@@ -51,12 +78,21 @@ entry:
 }
 
 define i64 @test4(i8* %data) {
-; CHECK-LABEL: test4:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    movl (%rdi), %eax
-; CHECK-NEXT:    shrq $2, %rax
-; CHECK-NEXT:    andl $60, %eax
-; CHECK-NEXT:    retq
+; X86-LABEL: test4:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzbl (%eax), %eax
+; X86-NEXT:    shrl $2, %eax
+; X86-NEXT:    andl $-4, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: test4:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movl (%rdi), %eax
+; X64-NEXT:    shrq $2, %rax
+; X64-NEXT:    andl $60, %eax
+; X64-NEXT:    retq
 entry:
   %bf.load = load i8, i8* %data, align 4
   %bf.clear = lshr i8 %bf.load, 2
@@ -66,12 +102,21 @@ entry:
 }
 
 define i64 @test5(i8* %data) {
-; CHECK-LABEL: test5:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    movzbl (%rdi), %eax
-; CHECK-NEXT:    shrq $2, %rax
-; CHECK-NEXT:    xorq $60, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: test5:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzbl (%eax), %eax
+; X86-NEXT:    shrl $2, %eax
+; X86-NEXT:    xorl $60, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: test5:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movzbl (%rdi), %eax
+; X64-NEXT:    shrq $2, %rax
+; X64-NEXT:    xorq $60, %rax
+; X64-NEXT:    retq
 entry:
   %bf.load = load i8, i8* %data, align 4
   %bf.clear = lshr i8 %bf.load, 2
@@ -81,12 +126,21 @@ entry:
 }
 
 define i64 @test6(i8* %data) {
-; CHECK-LABEL: test6:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    movzbl (%rdi), %eax
-; CHECK-NEXT:    shrq $2, %rax
-; CHECK-NEXT:    orq $60, %rax
-; CHECK-NEXT:    retq
+; X86-LABEL: test6:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzbl (%eax), %eax
+; X86-NEXT:    shrl $2, %eax
+; X86-NEXT:    orl $60, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: test6:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movzbl (%rdi), %eax
+; X64-NEXT:    shrq $2, %rax
+; X64-NEXT:    orq $60, %rax
+; X64-NEXT:    retq
 entry:
   %bf.load = load i8, i8* %data, align 4
   %bf.clear = lshr i8 %bf.load, 2
@@ -97,13 +151,23 @@ entry:
 
 ; Load is folded with sext.
 define i64 @test8(i8* %data) {
-; CHECK-LABEL: test8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    movsbl (%rdi), %eax
-; CHECK-NEXT:    movzwl %ax, %eax
-; CHECK-NEXT:    shrl $2, %eax
-; CHECK-NEXT:    orl $60, %eax
-; CHECK-NEXT:    retq
+; X86-LABEL: test8:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movsbl (%eax), %eax
+; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    shrl $2, %eax
+; X86-NEXT:    orl $60, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: test8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movsbl (%rdi), %eax
+; X64-NEXT:    movzwl %ax, %eax
+; X64-NEXT:    shrl $2, %eax
+; X64-NEXT:    orl $60, %eax
+; X64-NEXT:    retq
 entry:
   %bf.load = load i8, i8* %data, align 4
   %ext = sext i8 %bf.load to i16


        


More information about the llvm-commits mailing list