[llvm] 956ae7c - [X86] combine-addo.ll - add common CHECK prefix
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Fri Sep 29 02:32:36 PDT 2023
Author: Simon Pilgrim
Date: 2023-09-29T10:31:38+01:00
New Revision: 956ae7cf8da52c4caa834d6cf1ac8d8ee04f2353
URL: https://github.com/llvm/llvm-project/commit/956ae7cf8da52c4caa834d6cf1ac8d8ee04f2353
DIFF: https://github.com/llvm/llvm-project/commit/956ae7cf8da52c4caa834d6cf1ac8d8ee04f2353.diff
LOG: [X86] combine-addo.ll - add common CHECK prefix
Added:
Modified:
llvm/test/CodeGen/X86/combine-addo.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/combine-addo.ll b/llvm/test/CodeGen/X86/combine-addo.ll
index e93254e052a7191..af51c04765224ef 100644
--- a/llvm/test/CodeGen/X86/combine-addo.ll
+++ b/llvm/test/CodeGen/X86/combine-addo.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX
declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
@@ -10,15 +10,10 @@ declare {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32>, <4 x i32
; fold (sadd x, 0) -> x
define i32 @combine_sadd_zero(i32 %a0, i32 %a1) {
-; SSE-LABEL: combine_sadd_zero:
-; SSE: # %bb.0:
-; SSE-NEXT: movl %edi, %eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: combine_sadd_zero:
-; AVX: # %bb.0:
-; AVX-NEXT: movl %edi, %eax
-; AVX-NEXT: retq
+; CHECK-LABEL: combine_sadd_zero:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: retq
%1 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a0, i32 zeroinitializer)
%2 = extractvalue {i32, i1} %1, 0
%3 = extractvalue {i32, i1} %1, 1
@@ -27,13 +22,9 @@ define i32 @combine_sadd_zero(i32 %a0, i32 %a1) {
}
define <4 x i32> @combine_vec_sadd_zero(<4 x i32> %a0, <4 x i32> %a1) {
-; SSE-LABEL: combine_vec_sadd_zero:
-; SSE: # %bb.0:
-; SSE-NEXT: retq
-;
-; AVX-LABEL: combine_vec_sadd_zero:
-; AVX: # %bb.0:
-; AVX-NEXT: retq
+; CHECK-LABEL: combine_vec_sadd_zero:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
%1 = call {<4 x i32>, <4 x i1>} @llvm.sadd.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitializer)
%2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
%3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1
@@ -43,15 +34,10 @@ define <4 x i32> @combine_vec_sadd_zero(<4 x i32> %a0, <4 x i32> %a1) {
; fold (uadd x, 0) -> x
define i32 @combine_uadd_zero(i32 %a0, i32 %a1) {
-; SSE-LABEL: combine_uadd_zero:
-; SSE: # %bb.0:
-; SSE-NEXT: movl %edi, %eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: combine_uadd_zero:
-; AVX: # %bb.0:
-; AVX-NEXT: movl %edi, %eax
-; AVX-NEXT: retq
+; CHECK-LABEL: combine_uadd_zero:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: retq
%1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a0, i32 zeroinitializer)
%2 = extractvalue {i32, i1} %1, 0
%3 = extractvalue {i32, i1} %1, 1
@@ -60,13 +46,9 @@ define i32 @combine_uadd_zero(i32 %a0, i32 %a1) {
}
define <4 x i32> @combine_vec_uadd_zero(<4 x i32> %a0, <4 x i32> %a1) {
-; SSE-LABEL: combine_vec_uadd_zero:
-; SSE: # %bb.0:
-; SSE-NEXT: retq
-;
-; AVX-LABEL: combine_vec_uadd_zero:
-; AVX: # %bb.0:
-; AVX-NEXT: retq
+; CHECK-LABEL: combine_vec_uadd_zero:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
%1 = call {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitializer)
%2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
%3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1
@@ -76,19 +58,12 @@ define <4 x i32> @combine_vec_uadd_zero(<4 x i32> %a0, <4 x i32> %a1) {
; fold (uadd (xor a, -1), 1) -> (usub 0, a) and flip carry
define i32 @combine_uadd_not(i32 %a0, i32 %a1) {
-; SSE-LABEL: combine_uadd_not:
-; SSE: # %bb.0:
-; SSE-NEXT: movl %edi, %eax
-; SSE-NEXT: negl %eax
-; SSE-NEXT: cmovael %esi, %eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: combine_uadd_not:
-; AVX: # %bb.0:
-; AVX-NEXT: movl %edi, %eax
-; AVX-NEXT: negl %eax
-; AVX-NEXT: cmovael %esi, %eax
-; AVX-NEXT: retq
+; CHECK-LABEL: combine_uadd_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: negl %eax
+; CHECK-NEXT: cmovael %esi, %eax
+; CHECK-NEXT: retq
%1 = xor i32 %a0, -1
%2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %1, i32 1)
%3 = extractvalue {i32, i1} %2, 0
@@ -128,23 +103,14 @@ define <4 x i32> @combine_vec_uadd_not(<4 x i32> %a0, <4 x i32> %a1) {
; if uaddo never overflows, replace with add
define i32 @combine_uadd_no_overflow(i32 %a0, i32 %a1, i32 %a2) {
-; SSE-LABEL: combine_uadd_no_overflow:
-; SSE: # %bb.0:
-; SSE-NEXT: # kill: def $edx killed $edx def $rdx
-; SSE-NEXT: # kill: def $esi killed $esi def $rsi
-; SSE-NEXT: shrl $16, %esi
-; SSE-NEXT: shrl $16, %edx
-; SSE-NEXT: leal (%rdx,%rsi), %eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: combine_uadd_no_overflow:
-; AVX: # %bb.0:
-; AVX-NEXT: # kill: def $edx killed $edx def $rdx
-; AVX-NEXT: # kill: def $esi killed $esi def $rsi
-; AVX-NEXT: shrl $16, %esi
-; AVX-NEXT: shrl $16, %edx
-; AVX-NEXT: leal (%rdx,%rsi), %eax
-; AVX-NEXT: retq
+; CHECK-LABEL: combine_uadd_no_overflow:
+; CHECK: # %bb.0:
+; CHECK-NEXT: # kill: def $edx killed $edx def $rdx
+; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
+; CHECK-NEXT: shrl $16, %esi
+; CHECK-NEXT: shrl $16, %edx
+; CHECK-NEXT: leal (%rdx,%rsi), %eax
+; CHECK-NEXT: retq
%1 = lshr i32 %a1, 16
%2 = lshr i32 %a2, 16
%3 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %1, i32 %2)
More information about the llvm-commits
mailing list