[llvm-branch-commits] [llvm] 0f59d09 - [X86][AVX] combineVectorSignBitsTruncation - limit AVX512 truncations to 128-bits (PR48727)

Simon Pilgrim via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Wed Jan 13 02:51:42 PST 2021


Author: Simon Pilgrim
Date: 2021-01-13T10:38:23Z
New Revision: 0f59d099571d3d803b54e2ce06aa94babb9b26db

URL: https://github.com/llvm/llvm-project/commit/0f59d099571d3d803b54e2ce06aa94babb9b26db
DIFF: https://github.com/llvm/llvm-project/commit/0f59d099571d3d803b54e2ce06aa94babb9b26db.diff

LOG: [X86][AVX] combineVectorSignBitsTruncation - limit AVX512 truncations to 128-bits (PR48727)

rG73a44f437bf1 result in 256-bit packss/packus ops with additional shuffles that shuffle combining can sometimes try to convert back into a truncation.

Added: 
    llvm/test/CodeGen/X86/pr48727.ll

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/vector-pack-256.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 65b784f31842..5949782f3c0c 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -45957,11 +45957,11 @@ static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
   if (Subtarget.hasAVX512() &&
       !(!Subtarget.useAVX512Regs() && VT.is256BitVector() &&
         InVT.is512BitVector())) {
-    // PACK should still be worth it for 128/256-bit vectors if the sources were
+    // PACK should still be worth it for 128-bit vectors if the sources were
     // originally concatenated from subvectors.
     SmallVector<SDValue> ConcatOps;
-    if (VT.getSizeInBits() > 256 || !collectConcatOps(In.getNode(), ConcatOps))
-      return SDValue();
+    if (VT.getSizeInBits() > 128 || !collectConcatOps(In.getNode(), ConcatOps))
+    return SDValue();
   }
 
   unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16);

diff  --git a/llvm/test/CodeGen/X86/pr48727.ll b/llvm/test/CodeGen/X86/pr48727.ll
new file mode 100644
index 000000000000..4fa16db14acc
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr48727.ll
@@ -0,0 +1,51 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=skx | FileCheck %s
+
+define void @PR48727() {
+; CHECK-LABEL: PR48727:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vcvttpd2dqy 0, %xmm0
+; CHECK-NEXT:    vcvttpd2dqy 128, %xmm1
+; CHECK-NEXT:    movq (%rax), %rax
+; CHECK-NEXT:    vcvttpd2dqy 160, %xmm2
+; CHECK-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
+; CHECK-NEXT:    vcvttpd2dqy (%rax), %xmm2
+; CHECK-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; CHECK-NEXT:    vpmovdw %zmm0, %ymm0
+; CHECK-NEXT:    vmovdqu %ymm0, 16(%rax)
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+entry:
+  %0 = load [100 x [100 x i16]]*, [100 x [100 x i16]]** undef, align 8
+  %wide.load.2 = load <4 x double>, <4 x double>* null, align 16
+  %1 = fptosi <4 x double> %wide.load.2 to <4 x i16>
+  %2 = getelementptr inbounds [100 x [100 x i16]], [100 x [100 x i16]]* %0, i64 0, i64 0, i64 8
+  %3 = bitcast i16* %2 to <4 x i16>*
+  store <4 x i16> %1, <4 x i16>* %3, align 8
+  %wide.load.3 = load <4 x double>, <4 x double>* undef, align 16, !invariant.load !0, !noalias !1
+  %4 = fptosi <4 x double> %wide.load.3 to <4 x i16>
+  %5 = getelementptr inbounds [100 x [100 x i16]], [100 x [100 x i16]]* %0, i64 0, i64 0, i64 12
+  %6 = bitcast i16* %5 to <4 x i16>*
+  store <4 x i16> %4, <4 x i16>* %6, align 8
+  %7 = getelementptr inbounds [100 x [100 x double]], [100 x [100 x double]]* null, i64 0, i64 0, i64 16
+  %8 = bitcast double* %7 to <4 x double>*
+  %wide.load.4 = load <4 x double>, <4 x double>* %8, align 16, !invariant.load !0, !noalias !1
+  %9 = fptosi <4 x double> %wide.load.4 to <4 x i16>
+  %10 = getelementptr inbounds [100 x [100 x i16]], [100 x [100 x i16]]* %0, i64 0, i64 0, i64 16
+  %11 = bitcast i16* %10 to <4 x i16>*
+  store <4 x i16> %9, <4 x i16>* %11, align 8
+  %12 = getelementptr inbounds [100 x [100 x double]], [100 x [100 x double]]* null, i64 0, i64 0, i64 20
+  %13 = bitcast double* %12 to <4 x double>*
+  %wide.load.5 = load <4 x double>, <4 x double>* %13, align 16, !invariant.load !0, !noalias !1
+  %14 = fptosi <4 x double> %wide.load.5 to <4 x i16>
+  %15 = getelementptr inbounds [100 x [100 x i16]], [100 x [100 x i16]]* %0, i64 0, i64 0, i64 20
+  %16 = bitcast i16* %15 to <4 x i16>*
+  store <4 x i16> %14, <4 x i16>* %16, align 8
+  ret void
+}
+
+!0 = !{}
+!1 = !{!2}
+!2 = !{!"buffer: {index:1, offset:0, size:20000}", !3}
+!3 = !{!"XLA global AA domain"}

diff  --git a/llvm/test/CodeGen/X86/vector-pack-256.ll b/llvm/test/CodeGen/X86/vector-pack-256.ll
index af06ddbd3f3a..b789b46906cb 100644
--- a/llvm/test/CodeGen/X86/vector-pack-256.ll
+++ b/llvm/test/CodeGen/X86/vector-pack-256.ll
@@ -31,7 +31,10 @@ define <16 x i16> @trunc_concat_packssdw_256(<8 x i32> %a0, <8 x i32> %a1) nounw
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpsrad $17, %ymm0, %ymm0
 ; AVX512-NEXT:    vpsrad $23, %ymm1, %ymm1
-; AVX512-NEXT:    vpackssdw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
+; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512-NEXT:    retq
   %1 = ashr <8 x i32> %a0, <i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17>
   %2 = ashr <8 x i32> %a1, <i32 23, i32 23, i32 23, i32 23, i32 23, i32 23, i32 23, i32 23>
@@ -65,7 +68,10 @@ define <16 x i16> @trunc_concat_packusdw_256(<8 x i32> %a0, <8 x i32> %a1) nounw
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpsrld $17, %ymm0, %ymm0
 ; AVX512-NEXT:    vpandd {{.*}}(%rip){1to8}, %ymm1, %ymm1
-; AVX512-NEXT:    vpackusdw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
+; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
 ; AVX512-NEXT:    retq
   %1 = lshr <8 x i32> %a0, <i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17>
   %2 = and  <8 x i32> %a1, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
@@ -94,12 +100,28 @@ define <32 x i8> @trunc_concat_packsswb_256(<16 x i16> %a0, <16 x i16> %a1) noun
 ; AVX2-NEXT:    vpacksswb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: trunc_concat_packsswb_256:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsraw $15, %ymm0, %ymm0
-; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX512-NEXT:    vpacksswb %ymm1, %ymm0, %ymm0
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: trunc_concat_packsswb_256:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsraw $15, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: trunc_concat_packsswb_256:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsraw $15, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
+; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    retq
   %1 = ashr <16 x i16> %a0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
   %2 = and  <16 x i16> %a1, <i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1>
   %3 = shufflevector <16 x i16> %1, <16 x i16> %2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -127,12 +149,28 @@ define <32 x i8> @trunc_concat_packuswb_256(<16 x i16> %a0, <16 x i16> %a1) noun
 ; AVX2-NEXT:    vpackuswb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: trunc_concat_packuswb_256:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsrlw $15, %ymm0, %ymm0
-; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX512-NEXT:    vpackuswb %ymm1, %ymm0, %ymm0
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: trunc_concat_packuswb_256:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpsrlw $15, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512F-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: trunc_concat_packuswb_256:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpsrlw $15, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
+; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    retq
   %1 = lshr <16 x i16> %a0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
   %2 = and  <16 x i16> %a1, <i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1, i16  1>
   %3 = shufflevector <16 x i16> %1, <16 x i16> %2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>


        


More information about the llvm-branch-commits mailing list