[llvm] r275046 - [AVX512] Add support for 512-bit ANDN now that all ones build vectors survive long enough to allow the matching.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Jul 10 22:36:53 PDT 2016


Author: ctopper
Date: Mon Jul 11 00:36:53 2016
New Revision: 275046

URL: http://llvm.org/viewvc/llvm-project?rev=275046&view=rev
Log:
[AVX512] Add support for 512-bit ANDN now that all ones build vectors survive long enough to allow the matching.

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/avx512-logic.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=275046&r1=275045&r2=275046&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Mon Jul 11 00:36:53 2016
@@ -27882,6 +27882,7 @@ static SDValue combineANDXORWithAllOnesI
   SDLoc DL(N);
 
   if (VT != MVT::v2i64 && VT != MVT::v4i64 &&
+      VT != MVT::v8i64 && VT != MVT::v16i32 &&
       VT != MVT::v4i32 && VT != MVT::v8i32) // Legal with VLX
     return SDValue();
 
@@ -27897,7 +27898,7 @@ static SDValue combineANDXORWithAllOnesI
 
   N01 = peekThroughBitcasts(N01);
 
-  // Either match a direct AllOnes for 128 and 256-bit vectors, or an
+  // Either match a direct AllOnes for 128, 256, and 512-bit vectors, or an
   // insert_subvector building a 256-bit AllOnes vector.
   if (!ISD::isBuildVectorAllOnes(N01.getNode())) {
     if (!VT.is256BitVector() || N01->getOpcode() != ISD::INSERT_SUBVECTOR)

Modified: llvm/trunk/test/CodeGen/X86/avx512-logic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-logic.ll?rev=275046&r1=275045&r2=275046&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-logic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-logic.ll Mon Jul 11 00:36:53 2016
@@ -17,6 +17,22 @@ entry:
   ret <16 x i32> %x
 }
 
+define <16 x i32> @vpandnd(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
+; ALL-LABEL: vpandnd:
+; ALL:       ## BB#0: ## %entry
+; ALL-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; ALL-NEXT:    vpandnd %zmm0, %zmm1, %zmm0
+; ALL-NEXT:    retq
+entry:
+  ; Force the execution domain with an add.
+  %a2 = add <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,
+                            i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %b2 = xor <16 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1,
+                            i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+  %x = and <16 x i32> %a2, %b2
+  ret <16 x i32> %x
+}
+
 define <16 x i32> @vpord(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone ssp {
 ; ALL-LABEL: vpord:
 ; ALL:       ## BB#0: ## %entry
@@ -58,6 +74,20 @@ entry:
   ret <8 x i64> %x
 }
 
+define <8 x i64> @vpandnq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
+; ALL-LABEL: vpandnq:
+; ALL:       ## BB#0: ## %entry
+; ALL-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; ALL-NEXT:    vpandnq %zmm0, %zmm1, %zmm0
+; ALL-NEXT:    retq
+entry:
+  ; Force the execution domain with an add.
+  %a2 = add <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+  %b2 = xor <8 x i64> %b, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
+  %x = and <8 x i64> %a2, %b2
+  ret <8 x i64> %x
+}
+
 define <8 x i64> @vporq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ssp {
 ; ALL-LABEL: vporq:
 ; ALL:       ## BB#0: ## %entry
@@ -133,6 +163,25 @@ define <64 x i8> @and_v64i8(<64 x i8> %a
   ret <64 x i8> %res
 }
 
+define <64 x i8> @andn_v64i8(<64 x i8> %a, <64 x i8> %b) {
+; KNL-LABEL: andn_v64i8:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vandnps %ymm0, %ymm2, %ymm0
+; KNL-NEXT:    vandnps %ymm1, %ymm3, %ymm1
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: andn_v64i8:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpandnq %zmm0, %zmm1, %zmm0
+; SKX-NEXT:    retq
+  %b2 = xor <64 x i8> %b, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
+                           i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
+                           i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
+                           i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+  %res = and <64 x i8> %a, %b2
+  ret <64 x i8> %res
+}
+
 define <64 x i8> @or_v64i8(<64 x i8> %a, <64 x i8> %b) {
 ; KNL-LABEL: or_v64i8:
 ; KNL:       ## BB#0:
@@ -178,6 +227,23 @@ define <32 x i16> @and_v32i16(<32 x i16>
   ret <32 x i16> %res
 }
 
+define <32 x i16> @andn_v32i16(<32 x i16> %a, <32 x i16> %b) {
+; KNL-LABEL: andn_v32i16:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vandnps %ymm0, %ymm2, %ymm0
+; KNL-NEXT:    vandnps %ymm1, %ymm3, %ymm1
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: andn_v32i16:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpandnq %zmm0, %zmm1, %zmm0
+; SKX-NEXT:    retq
+  %b2 = xor <32 x i16> %b, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1,
+                            i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+  %res = and <32 x i16> %a, %b2
+  ret <32 x i16> %res
+}
+
 define <32 x i16> @or_v32i16(<32 x i16> %a, <32 x i16> %b) {
 ; KNL-LABEL: or_v32i16:
 ; KNL:       ## BB#0:




More information about the llvm-commits mailing list