[llvm] f652bfe - [X86] Fix typo in vXi64 ABDS/ABDU test cases

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 14 07:42:11 PDT 2023


Author: Simon Pilgrim
Date: 2023-03-14T14:32:19Z
New Revision: f652bfeba583987b44c2a76ec2f4b5ec168977c8

URL: https://github.com/llvm/llvm-project/commit/f652bfeba583987b44c2a76ec2f4b5ec168977c8
DIFF: https://github.com/llvm/llvm-project/commit/f652bfeba583987b44c2a76ec2f4b5ec168977c8.diff

LOG: [X86] Fix typo in vXi64 ABDS/ABDU test cases

The select operands were commuted preventing D144789 from folding

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/abds-vector-128.ll
    llvm/test/CodeGen/X86/abds-vector-256.ll
    llvm/test/CodeGen/X86/abds-vector-512.ll
    llvm/test/CodeGen/X86/abdu-vector-128.ll
    llvm/test/CodeGen/X86/abdu-vector-256.ll
    llvm/test/CodeGen/X86/abdu-vector-512.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/abds-vector-128.ll b/llvm/test/CodeGen/X86/abds-vector-128.ll
index b0eb8e75d790..6533e2cc20b3 100644
--- a/llvm/test/CodeGen/X86/abds-vector-128.ll
+++ b/llvm/test/CodeGen/X86/abds-vector-128.ll
@@ -968,9 +968,9 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-NEXT:    psubq %xmm1, %xmm3
 ; SSE2-NEXT:    psubq %xmm0, %xmm1
-; SSE2-NEXT:    pand %xmm2, %xmm3
-; SSE2-NEXT:    pandn %xmm1, %xmm2
-; SSE2-NEXT:    por %xmm3, %xmm2
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    pandn %xmm3, %xmm2
+; SSE2-NEXT:    por %xmm1, %xmm2
 ; SSE2-NEXT:    movdqa %xmm2, %xmm0
 ; SSE2-NEXT:    retq
 ;
@@ -982,8 +982,8 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 ; SSE42-NEXT:    psubq %xmm1, %xmm3
 ; SSE42-NEXT:    psubq %xmm0, %xmm1
 ; SSE42-NEXT:    movdqa %xmm2, %xmm0
-; SSE42-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
-; SSE42-NEXT:    movapd %xmm1, %xmm0
+; SSE42-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
+; SSE42-NEXT:    movapd %xmm3, %xmm0
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: abd_cmp_v2i64:
@@ -991,7 +991,7 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 ; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
 ; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm3
 ; AVX1-NEXT:    vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm3, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: abd_cmp_v2i64:
@@ -999,20 +999,20 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 ; AVX2-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
 ; AVX2-NEXT:    vpsubq %xmm1, %xmm0, %xmm3
 ; AVX2-NEXT:    vpsubq %xmm0, %xmm1, %xmm0
-; AVX2-NEXT:    vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vblendvpd %xmm2, %xmm0, %xmm3, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: abd_cmp_v2i64:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpcmpnltq %xmm1, %xmm0, %k1
-; AVX512-NEXT:    vpsubq %xmm1, %xmm0, %xmm2
-; AVX512-NEXT:    vpsubq %xmm0, %xmm1, %xmm2 {%k1}
+; AVX512-NEXT:    vpsubq %xmm0, %xmm1, %xmm2
+; AVX512-NEXT:    vpsubq %xmm1, %xmm0, %xmm2 {%k1}
 ; AVX512-NEXT:    vmovdqa %xmm2, %xmm0
 ; AVX512-NEXT:    retq
   %cmp = icmp sge <2 x i64> %a, %b
   %ab = sub <2 x i64> %a, %b
   %ba = sub <2 x i64> %b, %a
-  %sel = select <2 x i1> %cmp, <2 x i64> %ba, <2 x i64> %ab
+  %sel = select <2 x i1> %cmp, <2 x i64> %ab, <2 x i64> %ba
   ret <2 x i64> %sel
 }
 

diff  --git a/llvm/test/CodeGen/X86/abds-vector-256.ll b/llvm/test/CodeGen/X86/abds-vector-256.ll
index 880ce6434b67..b9bd875cee76 100644
--- a/llvm/test/CodeGen/X86/abds-vector-256.ll
+++ b/llvm/test/CodeGen/X86/abds-vector-256.ll
@@ -566,9 +566,9 @@ define <4 x i64> @abd_cmp_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
 ; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm6
 ; AVX1-NEXT:    vpsubq %xmm4, %xmm3, %xmm7
 ; AVX1-NEXT:    vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vblendvpd %xmm2, %xmm6, %xmm0, %xmm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm6, %xmm0
 ; AVX1-NEXT:    vpsubq %xmm3, %xmm4, %xmm1
-; AVX1-NEXT:    vblendvpd %xmm5, %xmm7, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm7, %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
@@ -577,20 +577,20 @@ define <4 x i64> @abd_cmp_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
 ; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm2
 ; AVX2-NEXT:    vpsubq %ymm1, %ymm0, %ymm3
 ; AVX2-NEXT:    vpsubq %ymm0, %ymm1, %ymm0
-; AVX2-NEXT:    vblendvpd %ymm2, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm3, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: abd_cmp_v4i64:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpcmpnltq %ymm1, %ymm0, %k1
-; AVX512-NEXT:    vpsubq %ymm1, %ymm0, %ymm2
-; AVX512-NEXT:    vpsubq %ymm0, %ymm1, %ymm2 {%k1}
+; AVX512-NEXT:    vpsubq %ymm0, %ymm1, %ymm2
+; AVX512-NEXT:    vpsubq %ymm1, %ymm0, %ymm2 {%k1}
 ; AVX512-NEXT:    vmovdqa %ymm2, %ymm0
 ; AVX512-NEXT:    retq
   %cmp = icmp sge <4 x i64> %a, %b
   %ab = sub <4 x i64> %a, %b
   %ba = sub <4 x i64> %b, %a
-  %sel = select <4 x i1> %cmp, <4 x i64> %ba, <4 x i64> %ab
+  %sel = select <4 x i1> %cmp, <4 x i64> %ab, <4 x i64> %ba
   ret <4 x i64> %sel
 }
 

diff  --git a/llvm/test/CodeGen/X86/abds-vector-512.ll b/llvm/test/CodeGen/X86/abds-vector-512.ll
index b21cc31b9d23..65daad55c5cd 100644
--- a/llvm/test/CodeGen/X86/abds-vector-512.ll
+++ b/llvm/test/CodeGen/X86/abds-vector-512.ll
@@ -345,14 +345,14 @@ define <8 x i64> @abd_cmp_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
 ; AVX512-LABEL: abd_cmp_v8i64:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpcmpnltq %zmm1, %zmm0, %k1
-; AVX512-NEXT:    vpsubq %zmm1, %zmm0, %zmm2
-; AVX512-NEXT:    vpsubq %zmm0, %zmm1, %zmm2 {%k1}
+; AVX512-NEXT:    vpsubq %zmm0, %zmm1, %zmm2
+; AVX512-NEXT:    vpsubq %zmm1, %zmm0, %zmm2 {%k1}
 ; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm0
 ; AVX512-NEXT:    retq
   %cmp = icmp sge <8 x i64> %a, %b
   %ab = sub <8 x i64> %a, %b
   %ba = sub <8 x i64> %b, %a
-  %sel = select <8 x i1> %cmp, <8 x i64> %ba, <8 x i64> %ab
+  %sel = select <8 x i1> %cmp, <8 x i64> %ab, <8 x i64> %ba
   ret <8 x i64> %sel
 }
 

diff  --git a/llvm/test/CodeGen/X86/abdu-vector-128.ll b/llvm/test/CodeGen/X86/abdu-vector-128.ll
index a2a683124912..910fe49d33a0 100644
--- a/llvm/test/CodeGen/X86/abdu-vector-128.ll
+++ b/llvm/test/CodeGen/X86/abdu-vector-128.ll
@@ -843,9 +843,9 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-NEXT:    psubq %xmm1, %xmm3
 ; SSE2-NEXT:    psubq %xmm0, %xmm1
-; SSE2-NEXT:    pand %xmm2, %xmm3
-; SSE2-NEXT:    pandn %xmm1, %xmm2
-; SSE2-NEXT:    por %xmm3, %xmm2
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    pandn %xmm3, %xmm2
+; SSE2-NEXT:    por %xmm1, %xmm2
 ; SSE2-NEXT:    movdqa %xmm2, %xmm0
 ; SSE2-NEXT:    retq
 ;
@@ -860,8 +860,8 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 ; SSE42-NEXT:    psubq %xmm1, %xmm3
 ; SSE42-NEXT:    psubq %xmm0, %xmm1
 ; SSE42-NEXT:    movdqa %xmm2, %xmm0
-; SSE42-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
-; SSE42-NEXT:    movapd %xmm1, %xmm0
+; SSE42-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
+; SSE42-NEXT:    movapd %xmm3, %xmm0
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: abd_cmp_v2i64:
@@ -872,7 +872,7 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 ; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm3
 ; AVX1-NEXT:    vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm3, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: abd_cmp_v2i64:
@@ -883,20 +883,20 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 ; AVX2-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
 ; AVX2-NEXT:    vpsubq %xmm1, %xmm0, %xmm3
 ; AVX2-NEXT:    vpsubq %xmm0, %xmm1, %xmm0
-; AVX2-NEXT:    vblendvpd %xmm2, %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vblendvpd %xmm2, %xmm0, %xmm3, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: abd_cmp_v2i64:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpcmpnltuq %xmm1, %xmm0, %k1
-; AVX512-NEXT:    vpsubq %xmm1, %xmm0, %xmm2
-; AVX512-NEXT:    vpsubq %xmm0, %xmm1, %xmm2 {%k1}
+; AVX512-NEXT:    vpsubq %xmm0, %xmm1, %xmm2
+; AVX512-NEXT:    vpsubq %xmm1, %xmm0, %xmm2 {%k1}
 ; AVX512-NEXT:    vmovdqa %xmm2, %xmm0
 ; AVX512-NEXT:    retq
   %cmp = icmp uge <2 x i64> %a, %b
   %ab = sub <2 x i64> %a, %b
   %ba = sub <2 x i64> %b, %a
-  %sel = select <2 x i1> %cmp, <2 x i64> %ba, <2 x i64> %ab
+  %sel = select <2 x i1> %cmp, <2 x i64> %ab, <2 x i64> %ba
   ret <2 x i64> %sel
 }
 

diff  --git a/llvm/test/CodeGen/X86/abdu-vector-256.ll b/llvm/test/CodeGen/X86/abdu-vector-256.ll
index d4e9d8acc97f..be6c7442bf0a 100644
--- a/llvm/test/CodeGen/X86/abdu-vector-256.ll
+++ b/llvm/test/CodeGen/X86/abdu-vector-256.ll
@@ -606,9 +606,9 @@ define <4 x i64> @abd_cmp_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
 ; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm5
 ; AVX1-NEXT:    vpsubq %xmm6, %xmm4, %xmm7
 ; AVX1-NEXT:    vpsubq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vblendvpd %xmm3, %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm0, %xmm5, %xmm0
 ; AVX1-NEXT:    vpsubq %xmm4, %xmm6, %xmm1
-; AVX1-NEXT:    vblendvpd %xmm2, %xmm7, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm1, %xmm7, %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
@@ -620,20 +620,20 @@ define <4 x i64> @abd_cmp_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
 ; AVX2-NEXT:    vpcmpgtq %ymm3, %ymm2, %ymm2
 ; AVX2-NEXT:    vpsubq %ymm1, %ymm0, %ymm3
 ; AVX2-NEXT:    vpsubq %ymm0, %ymm1, %ymm0
-; AVX2-NEXT:    vblendvpd %ymm2, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm3, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: abd_cmp_v4i64:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpcmpnltuq %ymm1, %ymm0, %k1
-; AVX512-NEXT:    vpsubq %ymm1, %ymm0, %ymm2
-; AVX512-NEXT:    vpsubq %ymm0, %ymm1, %ymm2 {%k1}
+; AVX512-NEXT:    vpsubq %ymm0, %ymm1, %ymm2
+; AVX512-NEXT:    vpsubq %ymm1, %ymm0, %ymm2 {%k1}
 ; AVX512-NEXT:    vmovdqa %ymm2, %ymm0
 ; AVX512-NEXT:    retq
   %cmp = icmp uge <4 x i64> %a, %b
   %ab = sub <4 x i64> %a, %b
   %ba = sub <4 x i64> %b, %a
-  %sel = select <4 x i1> %cmp, <4 x i64> %ba, <4 x i64> %ab
+  %sel = select <4 x i1> %cmp, <4 x i64> %ab, <4 x i64> %ba
   ret <4 x i64> %sel
 }
 

diff  --git a/llvm/test/CodeGen/X86/abdu-vector-512.ll b/llvm/test/CodeGen/X86/abdu-vector-512.ll
index d10fc73c9946..19af5d3694f4 100644
--- a/llvm/test/CodeGen/X86/abdu-vector-512.ll
+++ b/llvm/test/CodeGen/X86/abdu-vector-512.ll
@@ -349,14 +349,14 @@ define <8 x i64> @abd_cmp_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
 ; AVX512-LABEL: abd_cmp_v8i64:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpcmpnltuq %zmm1, %zmm0, %k1
-; AVX512-NEXT:    vpsubq %zmm1, %zmm0, %zmm2
-; AVX512-NEXT:    vpsubq %zmm0, %zmm1, %zmm2 {%k1}
+; AVX512-NEXT:    vpsubq %zmm0, %zmm1, %zmm2
+; AVX512-NEXT:    vpsubq %zmm1, %zmm0, %zmm2 {%k1}
 ; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm0
 ; AVX512-NEXT:    retq
   %cmp = icmp uge <8 x i64> %a, %b
   %ab = sub <8 x i64> %a, %b
   %ba = sub <8 x i64> %b, %a
-  %sel = select <8 x i1> %cmp, <8 x i64> %ba, <8 x i64> %ab
+  %sel = select <8 x i1> %cmp, <8 x i64> %ab, <8 x i64> %ba
   ret <8 x i64> %sel
 }
 


        


More information about the llvm-commits mailing list