[llvm] r357673 - [X86] Use INSERT_SUBREG rather than SUBREG_TO_REG when creating LEA64_32 during isel.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Wed Apr 3 22:00:18 PDT 2019
Author: ctopper
Date: Wed Apr 3 22:00:18 2019
New Revision: 357673
URL: http://llvm.org/viewvc/llvm-project?rev=357673&view=rev
Log:
[X86] Use INSERT_SUBREG rather than SUBREG_TO_REG when creating LEA64_32 during isel.
SUBREG_TO_REG is supposed to be used to assert that we know the upper bits are
zero. But that isn't the case here. We've done no analysis of the inputs.
Modified:
llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
llvm/trunk/test/CodeGen/X86/avg.ll
llvm/trunk/test/CodeGen/X86/x86-64-baseptr.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp?rev=357673&r1=357672&r2=357673&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp Wed Apr 3 22:00:18 2019
@@ -2131,12 +2131,10 @@ bool X86DAGToDAGISel::selectLEA64_32Addr
Base = CurDAG->getRegister(0, MVT::i64);
else if (Base.getValueType() == MVT::i32 && !dyn_cast<FrameIndexSDNode>(Base)) {
// Base could already be %rip, particularly in the x32 ABI.
- Base = SDValue(CurDAG->getMachineNode(
- TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
- CurDAG->getTargetConstant(0, DL, MVT::i64),
- Base,
- CurDAG->getTargetConstant(X86::sub_32bit, DL, MVT::i32)),
- 0);
+ SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
+ MVT::i64), 0);
+ Base = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
+ Base);
}
RN = dyn_cast<RegisterSDNode>(Index);
@@ -2145,13 +2143,10 @@ bool X86DAGToDAGISel::selectLEA64_32Addr
else {
assert(Index.getValueType() == MVT::i32 &&
"Expect to be extending 32-bit registers for use in LEA");
- Index = SDValue(CurDAG->getMachineNode(
- TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
- CurDAG->getTargetConstant(0, DL, MVT::i64),
- Index,
- CurDAG->getTargetConstant(X86::sub_32bit, DL,
- MVT::i32)),
- 0);
+ SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
+ MVT::i64), 0);
+ Index = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
+ Index);
}
return true;
Modified: llvm/trunk/test/CodeGen/X86/avg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avg.ll?rev=357673&r1=357672&r2=357673&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avg.ll Wed Apr 3 22:00:18 2019
@@ -2048,106 +2048,108 @@ define void @not_avg_v16i8_wide_constant
; AVX1-NEXT: pushq %r12
; AVX1-NEXT: pushq %rbx
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
-; AVX1-NEXT: vpextrq $1, %xmm6, %r15
-; AVX1-NEXT: vmovq %xmm6, %r12
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
-; AVX1-NEXT: vpextrq $1, %xmm2, %r11
-; AVX1-NEXT: vmovq %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
-; AVX1-NEXT: vpextrq $1, %xmm2, %r13
-; AVX1-NEXT: vmovq %xmm2, %r14
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX1-NEXT: vpextrq $1, %xmm7, %r15
+; AVX1-NEXT: vmovq %xmm7, %r14
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX1-NEXT: vpextrq $1, %xmm4, %r11
+; AVX1-NEXT: vmovq %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX1-NEXT: vpextrq $1, %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX1-NEXT: vmovq %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm7 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm8 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; AVX1-NEXT: vpextrq $1, %xmm6, %rbx
-; AVX1-NEXT: vmovq %xmm6, %rdx
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpextrq $1, %xmm1, %r9
-; AVX1-NEXT: vmovq %xmm1, %r10
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; AVX1-NEXT: vmovd %xmm7, %esi
-; AVX1-NEXT: vpextrd $1, %xmm7, %edi
-; AVX1-NEXT: vpextrd $2, %xmm7, %ecx
-; AVX1-NEXT: vpextrd $3, %xmm7, %ebp
-; AVX1-NEXT: vpextrd $3, %xmm6, %eax
-; AVX1-NEXT: leal -1(%rbp,%rax), %eax
-; AVX1-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; AVX1-NEXT: vpextrd $2, %xmm6, %eax
-; AVX1-NEXT: leal -1(%rcx,%rax), %eax
-; AVX1-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; AVX1-NEXT: vpextrd $1, %xmm6, %eax
-; AVX1-NEXT: leal -1(%rdi,%rax), %eax
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX1-NEXT: vmovd %xmm6, %ecx
+; AVX1-NEXT: vpextrd $1, %xmm6, %edx
+; AVX1-NEXT: vpextrd $2, %xmm6, %r13d
+; AVX1-NEXT: vpextrd $3, %xmm6, %r12d
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX1-NEXT: vmovd %xmm1, %ebx
+; AVX1-NEXT: vpextrd $1, %xmm1, %ebp
+; AVX1-NEXT: vpextrd $2, %xmm1, %esi
+; AVX1-NEXT: vpextrd $3, %xmm1, %edi
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
+; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero
+; AVX1-NEXT: vmovd %xmm7, %r8d
+; AVX1-NEXT: leal -1(%r12,%rdi), %eax
; AVX1-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; AVX1-NEXT: vmovd %xmm6, %eax
-; AVX1-NEXT: leal -1(%rsi,%rax), %r8d
-; AVX1-NEXT: vpextrq $1, %xmm5, %rax
-; AVX1-NEXT: leal -1(%r15,%rbx), %r15d
-; AVX1-NEXT: vmovq %xmm5, %rsi
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
-; AVX1-NEXT: leal -1(%r12,%rdx), %edx
-; AVX1-NEXT: vmovd %xmm4, %r12d
-; AVX1-NEXT: leal -1(%r11,%r9), %r11d
+; AVX1-NEXT: vpextrd $2, %xmm7, %eax
+; AVX1-NEXT: leal -1(%r13,%rsi), %esi
+; AVX1-NEXT: movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; AVX1-NEXT: vpextrd $2, %xmm4, %edi
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX1-NEXT: leal -1(%rcx,%r10), %r10d
-; AVX1-NEXT: vpextrd $2, %xmm2, %ebx
-; AVX1-NEXT: leal -1(%r13,%rax), %r9d
-; AVX1-NEXT: vpextrd $3, %xmm2, %eax
-; AVX1-NEXT: leal -1(%r14,%rsi), %esi
+; AVX1-NEXT: leal -1(%rdx,%rbp), %edx
+; AVX1-NEXT: movl %edx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; AVX1-NEXT: vpextrd $3, %xmm4, %edx
+; AVX1-NEXT: leal -1(%rcx,%rbx), %r10d
; AVX1-NEXT: vpextrd $3, %xmm1, %ecx
-; AVX1-NEXT: leal -1(%rax,%rcx), %eax
+; AVX1-NEXT: leal -1(%rdx,%rcx), %r9d
; AVX1-NEXT: vpextrd $2, %xmm1, %ecx
-; AVX1-NEXT: leal -1(%rbx,%rcx), %ebx
-; AVX1-NEXT: vpextrd $2, %xmm3, %ecx
-; AVX1-NEXT: leal -1(%rdi,%rcx), %ecx
-; AVX1-NEXT: vmovd %xmm3, %edi
-; AVX1-NEXT: leal -1(%r12,%rdi), %edi
-; AVX1-NEXT: vpextrq $1, %xmm8, %r12
+; AVX1-NEXT: leal -1(%rdi,%rcx), %edi
+; AVX1-NEXT: vpextrd $2, %xmm5, %ecx
+; AVX1-NEXT: leal -1(%rax,%rcx), %eax
+; AVX1-NEXT: vmovd %xmm5, %ecx
+; AVX1-NEXT: leal -1(%r8,%rcx), %r8d
+; AVX1-NEXT: vpextrq $1, %xmm6, %rdx
+; AVX1-NEXT: leal -1(%r15,%rdx), %r15d
+; AVX1-NEXT: vmovq %xmm6, %rdx
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero
+; AVX1-NEXT: leal -1(%r14,%rdx), %r14d
+; AVX1-NEXT: vpextrq $1, %xmm1, %rdx
+; AVX1-NEXT: leal -1(%r11,%rdx), %edx
+; AVX1-NEXT: vmovq %xmm1, %rcx
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX1-NEXT: leal -1(%rsi,%rcx), %ecx
+; AVX1-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
+; AVX1-NEXT: leal -1(%rbp,%rsi), %esi
+; AVX1-NEXT: vmovq %xmm1, %rbx
+; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
+; AVX1-NEXT: leal -1(%rbp,%rbx), %ebx
+; AVX1-NEXT: vpextrq $1, %xmm8, %r11
; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; AVX1-NEXT: vpextrq $1, %xmm0, %r13
-; AVX1-NEXT: leal -1(%r12,%r13), %r12d
-; AVX1-NEXT: vmovq %xmm8, %r13
-; AVX1-NEXT: vmovq %xmm0, %r14
-; AVX1-NEXT: leal -1(%r13,%r14), %ebp
+; AVX1-NEXT: vpextrq $1, %xmm0, %r12
+; AVX1-NEXT: leal -1(%r11,%r12), %r11d
+; AVX1-NEXT: vmovq %xmm8, %r12
+; AVX1-NEXT: vmovq %xmm0, %r13
+; AVX1-NEXT: leal -1(%r12,%r13), %ebp
; AVX1-NEXT: shrl %ebp
; AVX1-NEXT: vmovd %ebp, %xmm0
-; AVX1-NEXT: shrl %r12d
-; AVX1-NEXT: vpinsrb $1, %r12d, %xmm0, %xmm0
-; AVX1-NEXT: shrl %esi
-; AVX1-NEXT: vpinsrb $2, %esi, %xmm0, %xmm0
-; AVX1-NEXT: shrl %r9d
-; AVX1-NEXT: vpinsrb $3, %r9d, %xmm0, %xmm0
-; AVX1-NEXT: shrl %r10d
-; AVX1-NEXT: vpinsrb $4, %r10d, %xmm0, %xmm0
; AVX1-NEXT: shrl %r11d
-; AVX1-NEXT: vpinsrb $5, %r11d, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrb $1, %r11d, %xmm0, %xmm0
+; AVX1-NEXT: shrl %ebx
+; AVX1-NEXT: vpinsrb $2, %ebx, %xmm0, %xmm0
+; AVX1-NEXT: shrl %esi
+; AVX1-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0
+; AVX1-NEXT: shrl %ecx
+; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
; AVX1-NEXT: shrl %edx
-; AVX1-NEXT: vpinsrb $6, %edx, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrb $5, %edx, %xmm0, %xmm0
+; AVX1-NEXT: shrl %r14d
+; AVX1-NEXT: vpinsrb $6, %r14d, %xmm0, %xmm0
; AVX1-NEXT: shrl %r15d
; AVX1-NEXT: vpinsrb $7, %r15d, %xmm0, %xmm0
-; AVX1-NEXT: shrl %edi
-; AVX1-NEXT: vpinsrb $8, %edi, %xmm0, %xmm0
-; AVX1-NEXT: shrl %ecx
-; AVX1-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: shrl %ebx
-; AVX1-NEXT: vpinsrb $10, %ebx, %xmm0, %xmm0
-; AVX1-NEXT: shrl %eax
-; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
; AVX1-NEXT: shrl %r8d
-; AVX1-NEXT: vpinsrb $12, %r8d, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrb $8, %r8d, %xmm0, %xmm0
+; AVX1-NEXT: shrl %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX1-NEXT: shrl %edi
+; AVX1-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; AVX1-NEXT: shrl %r9d
+; AVX1-NEXT: vpinsrb $11, %r9d, %xmm0, %xmm0
+; AVX1-NEXT: shrl %r10d
+; AVX1-NEXT: vpinsrb $12, %r10d, %xmm0, %xmm0
; AVX1-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
; AVX1-NEXT: shrl %eax
; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/x86-64-baseptr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/x86-64-baseptr.ll?rev=357673&r1=357672&r2=357673&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/x86-64-baseptr.ll (original)
+++ llvm/trunk/test/CodeGen/X86/x86-64-baseptr.ll Wed Apr 3 22:00:18 2019
@@ -39,19 +39,19 @@ define void @base() #0 {
; X32ABI: # %bb.0: # %entry
; X32ABI-NEXT: pushq %rbp
; X32ABI-NEXT: movl %esp, %ebp
-; X32ABI-NEXT: pushq %rbx
+; X32ABI-NEXT: pushq %rbx
; X32ABI-NEXT: andl $-32, %esp
; X32ABI-NEXT: subl $32, %esp
; X32ABI-NEXT: movl %esp, %ebx
; X32ABI-NEXT: callq helper
; X32ABI-NEXT: # kill: def $eax killed $eax def $rax
-; X32ABI-NEXT: movl %esp, %ecx
; X32ABI-NEXT: leal 31(,%rax,4), %eax
; X32ABI-NEXT: andl $-32, %eax
+; X32ABI-NEXT: movl %esp, %ecx
; X32ABI-NEXT: movl %ecx, %edx
; X32ABI-NEXT: subl %eax, %edx
-; X32ABI-NEXT: movl %edx, %esp
; X32ABI-NEXT: negl %eax
+; X32ABI-NEXT: movl %edx, %esp
; X32ABI-NEXT: movl $0, (%ecx,%eax)
; X32ABI-NEXT: leal -8(%ebp), %esp
; X32ABI-NEXT: popq %rbx
More information about the llvm-commits
mailing list