[llvm] r281012 - [AVX-512] Add more integer vector comparison tests with loads. Some of these show opportunities where we can commute to fold loads.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 8 18:36:04 PDT 2016


Author: ctopper
Date: Thu Sep  8 20:36:04 2016
New Revision: 281012

URL: http://llvm.org/viewvc/llvm-project?rev=281012&view=rev
Log:
[AVX-512] Add more integer vector comparison tests with loads. Some of these show opportunities where we can commute to fold loads.

Commutes will be added in a followup commit.

Modified:
    llvm/trunk/test/CodeGen/X86/avx512vl-vec-cmp.ll

Modified: llvm/trunk/test/CodeGen/X86/avx512vl-vec-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-vec-cmp.ll?rev=281012&r1=281011&r2=281012&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-vec-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-vec-cmp.ll Thu Sep  8 20:36:04 2016
@@ -57,6 +57,18 @@ define <8 x i32> @test256_5(<8 x i32> %x
   ret <8 x i32> %max
 }
 
+define <8 x i32> @test256_5b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
+; CHECK-LABEL: test256_5b:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpcmpeqd (%rdi), %ymm0, %k1
+; CHECK-NEXT:    vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT:    retq
+  %y = load <8 x i32>, <8 x i32>* %yp, align 4
+  %mask = icmp eq <8 x i32> %y, %x
+  %max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
+  ret <8 x i32> %max
+}
+
 define <8 x i32> @test256_6(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
 ; CHECK-LABEL: test256_6:
 ; CHECK:       ## BB#0:
@@ -69,6 +81,18 @@ define <8 x i32> @test256_6(<8 x i32> %x
   ret <8 x i32> %max
 }
 
+define <8 x i32> @test256_6b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
+; CHECK-LABEL: test256_6b:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpcmpgtd (%rdi), %ymm0, %k1
+; CHECK-NEXT:    vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT:    retq
+  %y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
+  %mask = icmp slt <8 x i32> %y, %x
+  %max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
+  ret <8 x i32> %max
+}
+
 define <8 x i32> @test256_7(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
 ; CHECK-LABEL: test256_7:
 ; CHECK:       ## BB#0:
@@ -81,6 +105,18 @@ define <8 x i32> @test256_7(<8 x i32> %x
   ret <8 x i32> %max
 }
 
+define <8 x i32> @test256_7b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
+; CHECK-LABEL: test256_7b:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpcmpled (%rdi), %ymm0, %k1
+; CHECK-NEXT:    vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT:    retq
+  %y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
+  %mask = icmp sge <8 x i32> %y, %x
+  %max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
+  ret <8 x i32> %max
+}
+
 define <8 x i32> @test256_8(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
 ; CHECK-LABEL: test256_8:
 ; CHECK:       ## BB#0:
@@ -93,6 +129,19 @@ define <8 x i32> @test256_8(<8 x i32> %x
   ret <8 x i32> %max
 }
 
+define <8 x i32> @test256_8b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
+; CHECK-LABEL: test256_8b:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vmovdqu32 (%rdi), %ymm2
+; CHECK-NEXT:    vpcmpnltud %ymm0, %ymm2, %k1
+; CHECK-NEXT:    vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT:    retq
+  %y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
+  %mask = icmp uge <8 x i32> %y, %x
+  %max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
+  ret <8 x i32> %max
+}
+
 define <8 x i32> @test256_9(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1, <8 x i32> %y1) nounwind {
 ; CHECK-LABEL: test256_9:
 ; CHECK:       ## BB#0:
@@ -213,6 +262,56 @@ define <4 x i64> @test256_16(<4 x i64> %
   ret <4 x i64> %max
 }
 
+define <8 x i32> @test256_17(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
+; CHECK-LABEL: test256_17:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpcmpneqd (%rdi), %ymm0, %k1
+; CHECK-NEXT:    vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT:    retq
+  %y = load <8 x i32>, <8 x i32>* %yp, align 4
+  %mask = icmp ne <8 x i32> %x, %y
+  %max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
+  ret <8 x i32> %max
+}
+
+define <8 x i32> @test256_18(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
+; CHECK-LABEL: test256_18:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vmovdqu32 (%rdi), %ymm2
+; CHECK-NEXT:    vpcmpneqd %ymm0, %ymm2, %k1
+; CHECK-NEXT:    vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT:    retq
+  %y = load <8 x i32>, <8 x i32>* %yp, align 4
+  %mask = icmp ne <8 x i32> %y, %x
+  %max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
+  ret <8 x i32> %max
+}
+
+define <8 x i32> @test256_19(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
+; CHECK-LABEL: test256_19:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpcmpnltud (%rdi), %ymm0, %k1
+; CHECK-NEXT:    vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT:    retq
+  %y = load <8 x i32>, <8 x i32>* %yp, align 4
+  %mask = icmp uge <8 x i32> %x, %y
+  %max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
+  ret <8 x i32> %max
+}
+
+define <8 x i32> @test256_20(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
+; CHECK-LABEL: test256_20:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vmovdqu32 (%rdi), %ymm2
+; CHECK-NEXT:    vpcmpnltud %ymm0, %ymm2, %k1
+; CHECK-NEXT:    vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT:    retq
+  %y = load <8 x i32>, <8 x i32>* %yp, align 4
+  %mask = icmp uge <8 x i32> %y, %x
+  %max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
+  ret <8 x i32> %max
+}
+
 define <2 x i64> @test128_1(<2 x i64> %x, <2 x i64> %y) nounwind {
 ; CHECK-LABEL: test128_1:
 ; CHECK:       ## BB#0:
@@ -269,6 +368,18 @@ define <4 x i32> @test128_5(<4 x i32> %x
   ret <4 x i32> %max
 }
 
+define <4 x i32> @test128_5b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %yp) nounwind {
+; CHECK-LABEL: test128_5b:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpcmpeqd (%rdi), %xmm0, %k1
+; CHECK-NEXT:    vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT:    retq
+  %y = load <4 x i32>, <4 x i32>* %yp, align 4
+  %mask = icmp eq <4 x i32> %y, %x
+  %max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
+  ret <4 x i32> %max
+}
+
 define <4 x i32> @test128_6(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
 ; CHECK-LABEL: test128_6:
 ; CHECK:       ## BB#0:
@@ -281,6 +392,18 @@ define <4 x i32> @test128_6(<4 x i32> %x
   ret <4 x i32> %max
 }
 
+define <4 x i32> @test128_6b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
+; CHECK-LABEL: test128_6b:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpcmpgtd (%rdi), %xmm0, %k1
+; CHECK-NEXT:    vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT:    retq
+  %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
+  %mask = icmp slt <4 x i32> %y, %x
+  %max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
+  ret <4 x i32> %max
+}
+
 define <4 x i32> @test128_7(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
 ; CHECK-LABEL: test128_7:
 ; CHECK:       ## BB#0:
@@ -293,6 +416,18 @@ define <4 x i32> @test128_7(<4 x i32> %x
   ret <4 x i32> %max
 }
 
+define <4 x i32> @test128_7b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
+; CHECK-LABEL: test128_7b:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpcmpled (%rdi), %xmm0, %k1
+; CHECK-NEXT:    vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT:    retq
+  %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
+  %mask = icmp sge <4 x i32> %y, %x
+  %max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
+  ret <4 x i32> %max
+}
+
 define <4 x i32> @test128_8(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
 ; CHECK-LABEL: test128_8:
 ; CHECK:       ## BB#0:
@@ -305,6 +440,19 @@ define <4 x i32> @test128_8(<4 x i32> %x
   ret <4 x i32> %max
 }
 
+define <4 x i32> @test128_8b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
+; CHECK-LABEL: test128_8b:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vmovdqu32 (%rdi), %xmm2
+; CHECK-NEXT:    vpcmpnltud %xmm0, %xmm2, %k1
+; CHECK-NEXT:    vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT:    retq
+  %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
+  %mask = icmp uge <4 x i32> %y, %x
+  %max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
+  ret <4 x i32> %max
+}
+
 define <4 x i32> @test128_9(<4 x i32> %x, <4 x i32> %y, <4 x i32> %x1, <4 x i32> %y1) nounwind {
 ; CHECK-LABEL: test128_9:
 ; CHECK:       ## BB#0:
@@ -424,3 +572,53 @@ define <2 x i64> @test128_16(<2 x i64> %
   %max = select <2 x i1> %mask, <2 x i64> %x, <2 x i64> %x1
   ret <2 x i64> %max
 }
+
+define <4 x i32> @test128_17(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
+; CHECK-LABEL: test128_17:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpcmpneqd (%rdi), %xmm0, %k1
+; CHECK-NEXT:    vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT:    retq
+  %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
+  %mask = icmp ne <4 x i32> %x, %y
+  %max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
+  ret <4 x i32> %max
+}
+
+define <4 x i32> @test128_18(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
+; CHECK-LABEL: test128_18:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vmovdqu32 (%rdi), %xmm2
+; CHECK-NEXT:    vpcmpneqd %xmm0, %xmm2, %k1
+; CHECK-NEXT:    vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT:    retq
+  %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
+  %mask = icmp ne <4 x i32> %y, %x
+  %max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
+  ret <4 x i32> %max
+}
+
+define <4 x i32> @test128_19(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
+; CHECK-LABEL: test128_19:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpcmpnltud (%rdi), %xmm0, %k1
+; CHECK-NEXT:    vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT:    retq
+  %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
+  %mask = icmp uge <4 x i32> %x, %y
+  %max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
+  ret <4 x i32> %max
+}
+
+define <4 x i32> @test128_20(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
+; CHECK-LABEL: test128_20:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vmovdqu32 (%rdi), %xmm2
+; CHECK-NEXT:    vpcmpnltud %xmm0, %xmm2, %k1
+; CHECK-NEXT:    vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT:    retq
+  %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
+  %mask = icmp uge <4 x i32> %y, %x
+  %max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
+  ret <4 x i32> %max
+}




More information about the llvm-commits mailing list