[llvm] Add some -early-live-intervals RUN lines (PR #66058)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 12 02:16:29 PDT 2023
llvmbot wrote:
@llvm/pr-subscribers-backend-x86
<details>
<summary>Changes</summary>
This adds test coverage for an upcoming change to
TwoAddressInstructionPass::processTiedPairs.
--
Full diff: https://github.com/llvm/llvm-project/pull/66058.diff
3 Files Affected:
- (modified) llvm/test/CodeGen/SystemZ/rot-02.ll (+16-7)
- (modified) llvm/test/CodeGen/X86/combine-or.ll (+21-10)
- (modified) llvm/test/CodeGen/X86/combine-rotates.ll (+52-25)
<pre>
diff --git a/llvm/test/CodeGen/SystemZ/rot-02.ll b/llvm/test/CodeGen/SystemZ/rot-02.ll
index 491951f637bbf7b..84fac6af5fcaa5e 100644
--- a/llvm/test/CodeGen/SystemZ/rot-02.ll
+++ b/llvm/test/CodeGen/SystemZ/rot-02.ll
@@ -2,7 +2,8 @@
; Test removal of AND operations that don't affect last 6 bits of rotate amount
; operand.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefixes=CHECK,CHECK-LV
+; RUN: llc < %s -mtriple=s390x-linux-gnu -early-live-intervals | FileCheck %s -check-prefixes=CHECK,CHECK-LIS
; Test that AND is not removed when some lower 5 bits are not set.
define i32 @f1(i32 %val, i32 %amt) {
@@ -75,12 +76,20 @@ define i64 @f4(i64 %val, i64 %amt) {
; Test that AND is not entirely removed if the result is reused.
define i32 @f5(i32 %val, i32 %amt) {
-; CHECK-LABEL: f5:
-; CHECK: # %bb.0:
-; CHECK-NEXT: rll %r2, %r2, 0(%r3)
-; CHECK-NEXT: nilf %r3, 63
-; CHECK-NEXT: ar %r2, %r3
-; CHECK-NEXT: br %r14
+; CHECK-LV-LABEL: f5:
+; CHECK-LV: # %bb.0:
+; CHECK-LV-NEXT: rll %r2, %r2, 0(%r3)
+; CHECK-LV-NEXT: nilf %r3, 63
+; CHECK-LV-NEXT: ar %r2, %r3
+; CHECK-LV-NEXT: br %r14
+;
+; CHECK-LIS-LABEL: f5:
+; CHECK-LIS: # %bb.0:
+; CHECK-LIS-NEXT: rll %r0, %r2, 0(%r3)
+; CHECK-LIS-NEXT: nilf %r3, 63
+; CHECK-LIS-NEXT: ar %r3, %r0
+; CHECK-LIS-NEXT: lr %r2, %r3
+; CHECK-LIS-NEXT: br %r14
%and = and i32 %amt, 63
%inv = sub i32 32, %and
diff --git a/llvm/test/CodeGen/X86/combine-or.ll b/llvm/test/CodeGen/X86/combine-or.ll
index 8d490c7727e31ed..bfb9885c10c4e5d 100644
--- a/llvm/test/CodeGen/X86/combine-or.ll
+++ b/llvm/test/CodeGen/X86/combine-or.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s -check-prefixes=CHECK,CHECK-LV
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -early-live-intervals | FileCheck %s -check-prefixes=CHECK,CHECK-LIS
define i32 @or_self(i32 %x) {
; CHECK-LABEL: or_self:
@@ -235,15 +236,25 @@ define <4 x i32> @test17(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @test18(<4 x i32> %a, <4 x i32> %b) {
-; CHECK-LABEL: test18:
-; CHECK: # %bb.0:
-; CHECK-NEXT: pxor %xmm2, %xmm2
-; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
-; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,1,1]
-; CHECK-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3,4,5,6,7]
-; CHECK-NEXT: por %xmm0, %xmm2
-; CHECK-NEXT: movdqa %xmm2, %xmm0
-; CHECK-NEXT: retq
+; CHECK-LV-LABEL: test18:
+; CHECK-LV: # %bb.0:
+; CHECK-LV-NEXT: pxor %xmm2, %xmm2
+; CHECK-LV-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
+; CHECK-LV-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,1,1]
+; CHECK-LV-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3,4,5,6,7]
+; CHECK-LV-NEXT: por %xmm0, %xmm2
+; CHECK-LV-NEXT: movdqa %xmm2, %xmm0
+; CHECK-LV-NEXT: retq
+;
+; CHECK-LIS-LABEL: test18:
+; CHECK-LIS: # %bb.0:
+; CHECK-LIS-NEXT: pxor %xmm2, %xmm2
+; CHECK-LIS-NEXT: pxor %xmm3, %xmm3
+; CHECK-LIS-NEXT: pblendw {{.*#+}} xmm3 = xmm0[0,1],xmm3[2,3,4,5,6,7]
+; CHECK-LIS-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,0,1,1]
+; CHECK-LIS-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3,4,5,6,7]
+; CHECK-LIS-NEXT: por %xmm2, %xmm0
+; CHECK-LIS-NEXT: retq
%shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 0, i32 4, i32 4>
%shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 4, i32 4, i32 4>
%or = or <4 x i32> %shuf1, %shuf2
diff --git a/llvm/test/CodeGen/X86/combine-rotates.ll b/llvm/test/CodeGen/X86/combine-rotates.ll
index 41d1b231b1ef7a0..dc8c0e13edcaa27 100644
--- a/llvm/test/CodeGen/X86/combine-rotates.ll
+++ b/llvm/test/CodeGen/X86/combine-rotates.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2,SSE2-LV
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -early-live-intervals | FileCheck %s --check-prefixes=CHECK,SSE2,SSE2-LIS
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefixes=CHECK,XOP
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512
@@ -114,30 +115,56 @@ define i32 @combine_rot_select_zero(i32, i32) {
}
define <4 x i32> @combine_vec_rot_select_zero(<4 x i32>, <4 x i32>) {
-; SSE2-LABEL: combine_vec_rot_select_zero:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm2
-; SSE2-NEXT: pslld $23, %xmm1
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pmuludq %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm5, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; SSE2-NEXT: por %xmm4, %xmm3
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm3, %xmm2
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: retq
+; SSE2-LV-LABEL: combine_vec_rot_select_zero:
+; SSE2-LV: # %bb.0:
+; SSE2-LV-NEXT: pxor %xmm2, %xmm2
+; SSE2-LV-NEXT: pcmpeqd %xmm1, %xmm2
+; SSE2-LV-NEXT: pslld $23, %xmm1
+; SSE2-LV-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-LV-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-LV-NEXT: cvttps2dq %xmm1, %xmm1
+; SSE2-LV-NEXT: movdqa %xmm0, %xmm3
+; SSE2-LV-NEXT: pmuludq %xmm1, %xmm3
+; SSE2-LV-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,3,2,3]
+; SSE2-LV-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE2-LV-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-LV-NEXT: pmuludq %xmm5, %xmm1
+; SSE2-LV-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,3,2,3]
+; SSE2-LV-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-LV-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; SSE2-LV-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-LV-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; SSE2-LV-NEXT: por %xmm4, %xmm3
+; SSE2-LV-NEXT: pand %xmm2, %xmm0
+; SSE2-LV-NEXT: pandn %xmm3, %xmm2
+; SSE2-LV-NEXT: por %xmm2, %xmm0
+; SSE2-LV-NEXT: retq
+;
+; SSE2-LIS-LABEL: combine_vec_rot_select_zero:
+; SSE2-LIS: # %bb.0:
+; SSE2-LIS-NEXT: pxor %xmm2, %xmm2
+; SSE2-LIS-NEXT: pcmpeqd %xmm1, %xmm2
+; SSE2-LIS-NEXT: pslld $23, %xmm1
+; SSE2-LIS-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-LIS-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-LIS-NEXT: cvttps2dq %xmm1, %xmm1
+; SSE2-LIS-NEXT: movdqa %xmm0, %xmm3
+; SSE2-LIS-NEXT: pmuludq %xmm1, %xmm3
+; SSE2-LIS-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,3,2,3]
+; SSE2-LIS-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE2-LIS-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-LIS-NEXT: pmuludq %xmm5, %xmm1
+; SSE2-LIS-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,3,2,3]
+; SSE2-LIS-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-LIS-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; SSE2-LIS-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-LIS-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; SSE2-LIS-NEXT: por %xmm4, %xmm3
+; SSE2-LIS-NEXT: pand %xmm2, %xmm0
+; SSE2-LIS-NEXT: pandn %xmm3, %xmm2
+; SSE2-LIS-NEXT: por %xmm0, %xmm2
+; SSE2-LIS-NEXT: movdqa %xmm2, %xmm0
+; SSE2-LIS-NEXT: retq
;
; XOP-LABEL: combine_vec_rot_select_zero:
; XOP: # %bb.0:
</pre>
</details>
https://github.com/llvm/llvm-project/pull/66058
More information about the llvm-commits
mailing list