[llvm] r271826 - [AVX512] Fix PANDN combining for v4i32/v8i32 when VLX is enabled.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Jun 4 22:35:11 PDT 2016


Author: ctopper
Date: Sun Jun  5 00:35:11 2016
New Revision: 271826

URL: http://llvm.org/viewvc/llvm-project?rev=271826&view=rev
Log:
[AVX512] Fix PANDN combining for v4i32/v8i32 when VLX is enabled.

v4i32/v8i32 ANDs aren't promoted to v2i64/v4i64 when VLX is enabled.

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/avx512vl-logic.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=271826&r1=271825&r2=271826&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sun Jun  5 00:35:11 2016
@@ -27117,7 +27117,8 @@ static SDValue combineANDXORWithAllOnesI
   SDValue N1 = N->getOperand(1);
   SDLoc DL(N);
 
-  if (VT != MVT::v2i64 && VT != MVT::v4i64)
+  if (VT != MVT::v2i64 && VT != MVT::v4i64 &&
+      VT != MVT::v4i32 && VT != MVT::v8i32) // Legal with VLX
     return SDValue();
 
   // Canonicalize XOR to the left.

Modified: llvm/trunk/test/CodeGen/X86/avx512vl-logic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-logic.ll?rev=271826&r1=271825&r2=271826&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-logic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-logic.ll Sun Jun  5 00:35:11 2016
@@ -13,6 +13,18 @@ entry:
   ret <8 x i32> %x
 }
 
+; CHECK-LABEL: vpandnd256
+; CHECK: vpandnd %ymm
+; CHECK: ret
+define <8 x i32> @vpandnd256(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone ssp {
+entry:
+  ; Force the execution domain with an add.
+  %a2 = add <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %b2 = xor <8 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+  %x = and <8 x i32> %a2, %b2
+  ret <8 x i32> %x
+}
+
 ; CHECK-LABEL: vpord256
 ; CHECK: vpord %ymm
 ; CHECK: ret
@@ -46,6 +58,18 @@ entry:
   ret <4 x i64> %x
 }
 
+; CHECK-LABEL: vpandnq256
+; CHECK: vpandnq %ymm
+; CHECK: ret
+define <4 x i64> @vpandnq256(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
+entry:
+  ; Force the execution domain with an add.
+  %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
+  %b2 = xor <4 x i64> %b, <i64 -1, i64 -1, i64 -1, i64 -1>
+  %x = and <4 x i64> %a2, %b2
+  ret <4 x i64> %x
+}
+
 ; CHECK-LABEL: vporq256
 ; CHECK: vporq %ymm
 ; CHECK: ret
@@ -81,6 +105,18 @@ entry:
   ret <4 x i32> %x
 }
 
+; CHECK-LABEL: vpandnd128
+; CHECK: vpandnd %xmm
+; CHECK: ret
+define <4 x i32> @vpandnd128(<4 x i32> %a, <4 x i32> %b) nounwind uwtable readnone ssp {
+entry:
+  ; Force the execution domain with an add.
+  %a2 = add <4 x i32> %a, <i32 1, i32 1, i32 1, i32 1>
+  %b2 = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
+  %x = and <4 x i32> %a2, %b2
+  ret <4 x i32> %x
+}
+
 ; CHECK-LABEL: vpord128
 ; CHECK: vpord %xmm
 ; CHECK: ret
@@ -114,6 +150,18 @@ entry:
   ret <2 x i64> %x
 }
 
+; CHECK-LABEL: vpandnq128
+; CHECK: vpandnq %xmm
+; CHECK: ret
+define <2 x i64> @vpandnq128(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
+entry:
+  ; Force the execution domain with an add.
+  %a2 = add <2 x i64> %a, <i64 1, i64 1>
+  %b2 = xor <2 x i64> %b, <i64 -1, i64 -1>
+  %x = and <2 x i64> %a2, %b2
+  ret <2 x i64> %x
+}
+
 ; CHECK-LABEL: vporq128
 ; CHECK: vporq %xmm
 ; CHECK: ret




More information about the llvm-commits mailing list