[llvm] r357845 - [X86] Add test cases to show missed opportunities to use a sign extended 8 or 32 bit immediate AND when reversing SHL+AND to form an LEA.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Apr 6 11:00:45 PDT 2019


Author: ctopper
Date: Sat Apr  6 11:00:45 2019
New Revision: 357845

URL: http://llvm.org/viewvc/llvm-project?rev=357845&view=rev
Log:
[X86] Add test cases to show missed opportunities to use a sign extended 8 or 32 bit immediate AND when reversing SHL+AND to form an LEA.

When we shift the AND mask over we should shift in sign bits instead of zero bits. The scale in the LEA will shift these bits out so it doesn't matter whether we mask the bits off or not. Using sign bits will potentially allow a sign extended immediate to be used.

Also add some other test cases for cases that are currently optimal.

Added:
    llvm/trunk/test/CodeGen/X86/fold-and-shift-x86_64.ll
Modified:
    llvm/trunk/test/CodeGen/X86/fold-and-shift.ll

Added: llvm/trunk/test/CodeGen/X86/fold-and-shift-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-and-shift-x86_64.ll?rev=357845&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-and-shift-x86_64.ll (added)
+++ llvm/trunk/test/CodeGen/X86/fold-and-shift-x86_64.ll Sat Apr  6 11:00:45 2019
@@ -0,0 +1,79 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+
+define i8 @t1(i8* %X, i64 %i) {
+; CHECK-LABEL: t1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movabsq $4611686018427387649, %rax # imm = 0x3FFFFFFFFFFFFF01
+; CHECK-NEXT:    andq %rsi, %rax
+; CHECK-NEXT:    movb (%rdi,%rax,4), %al
+; CHECK-NEXT:    retq
+
+entry:
+  %tmp2 = shl i64 %i, 2
+  %tmp4 = and i64 %tmp2, -1020
+  %tmp7 = getelementptr i8, i8* %X, i64 %tmp4
+  %tmp9 = load i8, i8* %tmp7
+  ret i8 %tmp9
+}
+
+define i8 @t2(i8* %X, i64 %i) {
+; CHECK-LABEL: t2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movabsq $4611686018427387890, %rax # imm = 0x3FFFFFFFFFFFFFF2
+; CHECK-NEXT:    andq %rsi, %rax
+; CHECK-NEXT:    movb (%rdi,%rax,4), %al
+; CHECK-NEXT:    retq
+
+entry:
+  %tmp2 = shl i64 %i, 2
+  %tmp4 = and i64 %tmp2, -56
+  %tmp7 = getelementptr i8, i8* %X, i64 %tmp4
+  %tmp9 = load i8, i8* %tmp7
+  ret i8 %tmp9
+}
+
+define i8 @t3(i8* %X, i64 %i) {
+; CHECK-LABEL: t3:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl %esi, %eax
+; CHECK-NEXT:    movb (%rdi,%rax,4), %al
+; CHECK-NEXT:    retq
+
+entry:
+  %tmp2 = shl i64 %i, 2
+  %tmp4 = and i64 %tmp2, 17179869180
+  %tmp7 = getelementptr i8, i8* %X, i64 %tmp4
+  %tmp9 = load i8, i8* %tmp7
+  ret i8 %tmp9
+}
+
+define i8 @t4(i8* %X, i64 %i) {
+; CHECK-LABEL: t4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    andl $-2, %esi
+; CHECK-NEXT:    movb (%rdi,%rsi,4), %al
+; CHECK-NEXT:    retq
+
+entry:
+  %tmp2 = shl i64 %i, 2
+  %tmp4 = and i64 %tmp2, 17179869176
+  %tmp7 = getelementptr i8, i8* %X, i64 %tmp4
+  %tmp9 = load i8, i8* %tmp7
+  ret i8 %tmp9
+}
+
+define i8 @t5(i8* %X, i64 %i) {
+; CHECK-LABEL: t5:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    andl $-250002, %esi # imm = 0xFFFC2F6E
+; CHECK-NEXT:    movb (%rdi,%rsi,4), %al
+; CHECK-NEXT:    retq
+
+entry:
+  %tmp2 = shl i64 %i, 2
+  %tmp4 = and i64 %tmp2, 17178869176
+  %tmp7 = getelementptr i8, i8* %X, i64 %tmp4
+  %tmp9 = load i8, i8* %tmp7
+  ret i8 %tmp9
+}

Modified: llvm/trunk/test/CodeGen/X86/fold-and-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-and-shift.ll?rev=357845&r1=357844&r2=357845&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-and-shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-and-shift.ll Sat Apr  6 11:00:45 2019
@@ -89,3 +89,37 @@ entry:
   %sum.2 = add i32 %sum.1, %index
   ret i32 %sum.2
 }
+
+define i8 @t5(i8* %X, i32 %i) {
+; CHECK-LABEL: t5:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT:    andl $1073741810, %ecx # imm = 0x3FFFFFF2
+; CHECK-NEXT:    movb (%eax,%ecx,4), %al
+; CHECK-NEXT:    retl
+
+entry:
+  %tmp2 = shl i32 %i, 2
+  %tmp4 = and i32 %tmp2, -56
+  %tmp7 = getelementptr i8, i8* %X, i32 %tmp4
+  %tmp9 = load i8, i8* %tmp7
+  ret i8 %tmp9
+}
+
+define i8 @t6(i8* %X, i32 %i) {
+; CHECK-LABEL: t6:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT:    andl $1073741569, %ecx # imm = 0x3FFFFF01
+; CHECK-NEXT:    movb (%eax,%ecx,4), %al
+; CHECK-NEXT:    retl
+
+entry:
+  %tmp2 = shl i32 %i, 2
+  %tmp4 = and i32 %tmp2, -1020
+  %tmp7 = getelementptr i8, i8* %X, i32 %tmp4
+  %tmp9 = load i8, i8* %tmp7
+  ret i8 %tmp9
+}




More information about the llvm-commits mailing list