[llvm] ceadd98 - [X86][AVX] lowerBuildVectorAsBroadcast - improve i64 BROADCASTM lowering on 32-bit targets

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 18 09:00:14 PDT 2020


Author: Simon Pilgrim
Date: 2020-09-18T16:59:57+01:00
New Revision: ceadd98c2fd51ab1faa80d142b2a6e080117e5ae

URL: https://github.com/llvm/llvm-project/commit/ceadd98c2fd51ab1faa80d142b2a6e080117e5ae
DIFF: https://github.com/llvm/llvm-project/commit/ceadd98c2fd51ab1faa80d142b2a6e080117e5ae.diff

LOG: [X86][AVX] lowerBuildVectorAsBroadcast - improve i64 BROADCASTM lowering on 32-bit targets

We already handle the the cases where we have a 'zero extended splat' build vector (a, 0, 0, 0, a, 0, 0, 0, ...) but were missing the case where the 'a' scalar was zero-extended as well - such as i64 -> vXi64 splat cases on 32-bit targets.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/avx512cd-intrinsics-fast-isel.ll
    llvm/test/CodeGen/X86/avx512vlcd-intrinsics-fast-isel.ll
    llvm/test/CodeGen/X86/broadcastm-lowering.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2480e395e0a4..948197d246e6 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -8655,10 +8655,14 @@ static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
     SDValue BOperand;
     SDValue ZeroExtended = isSplatZeroExtended(BVOp, NumElts, EltType);
     if ((ZeroExtended && ZeroExtended.getOpcode() == ISD::BITCAST) ||
+        (ZeroExtended && ZeroExtended.getOpcode() == ISD::ZERO_EXTEND &&
+         ZeroExtended.getOperand(0).getOpcode() == ISD::BITCAST) ||
         (Ld && Ld.getOpcode() == ISD::ZERO_EXTEND &&
          Ld.getOperand(0).getOpcode() == ISD::BITCAST)) {
-      if (ZeroExtended)
+      if (ZeroExtended && ZeroExtended.getOpcode() == ISD::BITCAST)
         BOperand = ZeroExtended.getOperand(0);
+      else if (ZeroExtended)
+        BOperand = ZeroExtended.getOperand(0).getOperand(0);
       else
         BOperand = Ld.getOperand(0).getOperand(0);
       MVT MaskVT = BOperand.getSimpleValueType();

diff  --git a/llvm/test/CodeGen/X86/avx512cd-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512cd-intrinsics-fast-isel.ll
index a46b7316b3d4..2f80fbfc7e62 100644
--- a/llvm/test/CodeGen/X86/avx512cd-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/avx512cd-intrinsics-fast-isel.ll
@@ -3,20 +3,11 @@
 ; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown -mattr=+avx512cd | FileCheck %s --check-prefixes=CHECK,X64
 
 define <8 x i64> @test_mm512_broadcastmb_epi64(<8 x i64> %a, <8 x i64> %b) {
-; X86-LABEL: test_mm512_broadcastmb_epi64:
-; X86:       # %bb.0: # %entry
-; X86-NEXT:    vpcmpeqq %zmm1, %zmm0, %k0
-; X86-NEXT:    kmovw %k0, %eax
-; X86-NEXT:    movzbl %al, %eax
-; X86-NEXT:    vmovd %eax, %xmm0
-; X86-NEXT:    vpbroadcastq %xmm0, %zmm0
-; X86-NEXT:    retl
-;
-; X64-LABEL: test_mm512_broadcastmb_epi64:
-; X64:       # %bb.0: # %entry
-; X64-NEXT:    vpcmpeqq %zmm1, %zmm0, %k0
-; X64-NEXT:    vpbroadcastmb2q %k0, %zmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm512_broadcastmb_epi64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpcmpeqq %zmm1, %zmm0, %k0
+; CHECK-NEXT:    vpbroadcastmb2q %k0, %zmm0
+; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = icmp eq <8 x i64> %a, %b
   %1 = bitcast <8 x i1> %0 to i8

diff  --git a/llvm/test/CodeGen/X86/avx512vlcd-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512vlcd-intrinsics-fast-isel.ll
index a9fb97f705c3..4163a37afa19 100644
--- a/llvm/test/CodeGen/X86/avx512vlcd-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/avx512vlcd-intrinsics-fast-isel.ll
@@ -3,19 +3,11 @@
 ; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512cd,+avx512vl | FileCheck %s --check-prefixes=CHECK,X64
 
 define <2 x i64> @test_mm_broadcastmb_epi64(<2 x i64> %a, <2 x i64> %b) {
-; X86-LABEL: test_mm_broadcastmb_epi64:
-; X86:       # %bb.0: # %entry
-; X86-NEXT:    vpcmpeqd %xmm1, %xmm0, %k0
-; X86-NEXT:    kmovw %k0, %eax
-; X86-NEXT:    vmovd %eax, %xmm0
-; X86-NEXT:    vpbroadcastq %xmm0, %xmm0
-; X86-NEXT:    retl
-;
-; X64-LABEL: test_mm_broadcastmb_epi64:
-; X64:       # %bb.0: # %entry
-; X64-NEXT:    vpcmpeqd %xmm1, %xmm0, %k0
-; X64-NEXT:    vpbroadcastmb2q %k0, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm_broadcastmb_epi64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm0, %k0
+; CHECK-NEXT:    vpbroadcastmb2q %k0, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = bitcast <2 x i64> %a to <4 x i32>
   %1 = bitcast <2 x i64> %b to <4 x i32>
@@ -29,19 +21,11 @@ entry:
 }
 
 define <4 x i64> @test_mm256_broadcastmb_epi64(<4 x i64> %a, <4 x i64> %b) {
-; X86-LABEL: test_mm256_broadcastmb_epi64:
-; X86:       # %bb.0: # %entry
-; X86-NEXT:    vpcmpeqq %ymm1, %ymm0, %k0
-; X86-NEXT:    kmovw %k0, %eax
-; X86-NEXT:    vmovd %eax, %xmm0
-; X86-NEXT:    vpbroadcastq %xmm0, %ymm0
-; X86-NEXT:    retl
-;
-; X64-LABEL: test_mm256_broadcastmb_epi64:
-; X64:       # %bb.0: # %entry
-; X64-NEXT:    vpcmpeqq %ymm1, %ymm0, %k0
-; X64-NEXT:    vpbroadcastmb2q %k0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_mm256_broadcastmb_epi64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpcmpeqq %ymm1, %ymm0, %k0
+; CHECK-NEXT:    vpbroadcastmb2q %k0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = icmp eq <4 x i64> %a, %b
   %1 = shufflevector <4 x i1> %0, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>

diff  --git a/llvm/test/CodeGen/X86/broadcastm-lowering.ll b/llvm/test/CodeGen/X86/broadcastm-lowering.ll
index 3dfbdf181ef4..91ac96185db9 100644
--- a/llvm/test/CodeGen/X86/broadcastm-lowering.ll
+++ b/llvm/test/CodeGen/X86/broadcastm-lowering.ll
@@ -29,20 +29,11 @@ define <2 x i64> @test_mm_epi64(<8 x i16> %a, <8 x i16> %b) {
 ; X86-AVX512CD-NEXT:    vzeroupper
 ; X86-AVX512CD-NEXT:    retl
 ;
-; X64-AVX512VLCDBW-LABEL: test_mm_epi64:
-; X64-AVX512VLCDBW:       # %bb.0: # %entry
-; X64-AVX512VLCDBW-NEXT:    vpcmpeqw %xmm1, %xmm0, %k0
-; X64-AVX512VLCDBW-NEXT:    vpbroadcastmb2q %k0, %xmm0
-; X64-AVX512VLCDBW-NEXT:    retq
-;
-; X86-AVX512VLCDBW-LABEL: test_mm_epi64:
-; X86-AVX512VLCDBW:       # %bb.0: # %entry
-; X86-AVX512VLCDBW-NEXT:    vpcmpeqw %xmm1, %xmm0, %k0
-; X86-AVX512VLCDBW-NEXT:    kmovd %k0, %eax
-; X86-AVX512VLCDBW-NEXT:    movzbl %al, %eax
-; X86-AVX512VLCDBW-NEXT:    vmovd %eax, %xmm0
-; X86-AVX512VLCDBW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3],zero,zero,zero,zero,xmm0[0,1,2,3],zero,zero,zero,zero
-; X86-AVX512VLCDBW-NEXT:    retl
+; AVX512VLCDBW-LABEL: test_mm_epi64:
+; AVX512VLCDBW:       # %bb.0: # %entry
+; AVX512VLCDBW-NEXT:    vpcmpeqw %xmm1, %xmm0, %k0
+; AVX512VLCDBW-NEXT:    vpbroadcastmb2q %k0, %xmm0
+; AVX512VLCDBW-NEXT:    ret{{[l|q]}}
 entry:
   %0 = icmp eq <8 x i16> %a, %b
   %1 = bitcast <8 x i1> %0 to i8
@@ -91,39 +82,19 @@ entry:
 }
 
 define <8 x i64> @test_mm512_epi64(<8 x i32> %a, <8 x i32> %b) {
-; X64-AVX512CD-LABEL: test_mm512_epi64:
-; X64-AVX512CD:       # %bb.0: # %entry
-; X64-AVX512CD-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
-; X64-AVX512CD-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; X64-AVX512CD-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
-; X64-AVX512CD-NEXT:    vpbroadcastmb2q %k0, %zmm0
-; X64-AVX512CD-NEXT:    retq
-;
-; X86-AVX512CD-LABEL: test_mm512_epi64:
-; X86-AVX512CD:       # %bb.0: # %entry
-; X86-AVX512CD-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
-; X86-AVX512CD-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; X86-AVX512CD-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
-; X86-AVX512CD-NEXT:    kmovw %k0, %eax
-; X86-AVX512CD-NEXT:    movzbl %al, %eax
-; X86-AVX512CD-NEXT:    vmovd %eax, %xmm0
-; X86-AVX512CD-NEXT:    vpbroadcastq %xmm0, %zmm0
-; X86-AVX512CD-NEXT:    retl
-;
-; X64-AVX512VLCDBW-LABEL: test_mm512_epi64:
-; X64-AVX512VLCDBW:       # %bb.0: # %entry
-; X64-AVX512VLCDBW-NEXT:    vpcmpeqd %ymm1, %ymm0, %k0
-; X64-AVX512VLCDBW-NEXT:    vpbroadcastmb2q %k0, %zmm0
-; X64-AVX512VLCDBW-NEXT:    retq
+; AVX512CD-LABEL: test_mm512_epi64:
+; AVX512CD:       # %bb.0: # %entry
+; AVX512CD-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512CD-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512CD-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
+; AVX512CD-NEXT:    vpbroadcastmb2q %k0, %zmm0
+; AVX512CD-NEXT:    ret{{[l|q]}}
 ;
-; X86-AVX512VLCDBW-LABEL: test_mm512_epi64:
-; X86-AVX512VLCDBW:       # %bb.0: # %entry
-; X86-AVX512VLCDBW-NEXT:    vpcmpeqd %ymm1, %ymm0, %k0
-; X86-AVX512VLCDBW-NEXT:    kmovd %k0, %eax
-; X86-AVX512VLCDBW-NEXT:    movzbl %al, %eax
-; X86-AVX512VLCDBW-NEXT:    vmovd %eax, %xmm0
-; X86-AVX512VLCDBW-NEXT:    vpbroadcastq %xmm0, %zmm0
-; X86-AVX512VLCDBW-NEXT:    retl
+; AVX512VLCDBW-LABEL: test_mm512_epi64:
+; AVX512VLCDBW:       # %bb.0: # %entry
+; AVX512VLCDBW-NEXT:    vpcmpeqd %ymm1, %ymm0, %k0
+; AVX512VLCDBW-NEXT:    vpbroadcastmb2q %k0, %zmm0
+; AVX512VLCDBW-NEXT:    ret{{[l|q]}}
 entry:
   %0 = icmp eq <8 x i32> %a, %b
   %1 = bitcast <8 x i1> %0 to i8
@@ -156,20 +127,11 @@ define <4 x i64> @test_mm256_epi64(<8 x i32> %a, <8 x i32> %b) {
 ; X86-AVX512CD-NEXT:    vpbroadcastq %xmm0, %ymm0
 ; X86-AVX512CD-NEXT:    retl
 ;
-; X64-AVX512VLCDBW-LABEL: test_mm256_epi64:
-; X64-AVX512VLCDBW:       # %bb.0: # %entry
-; X64-AVX512VLCDBW-NEXT:    vpcmpeqd %ymm1, %ymm0, %k0
-; X64-AVX512VLCDBW-NEXT:    vpbroadcastmb2q %k0, %ymm0
-; X64-AVX512VLCDBW-NEXT:    retq
-;
-; X86-AVX512VLCDBW-LABEL: test_mm256_epi64:
-; X86-AVX512VLCDBW:       # %bb.0: # %entry
-; X86-AVX512VLCDBW-NEXT:    vpcmpeqd %ymm1, %ymm0, %k0
-; X86-AVX512VLCDBW-NEXT:    kmovd %k0, %eax
-; X86-AVX512VLCDBW-NEXT:    movzbl %al, %eax
-; X86-AVX512VLCDBW-NEXT:    vmovd %eax, %xmm0
-; X86-AVX512VLCDBW-NEXT:    vpbroadcastq %xmm0, %ymm0
-; X86-AVX512VLCDBW-NEXT:    retl
+; AVX512VLCDBW-LABEL: test_mm256_epi64:
+; AVX512VLCDBW:       # %bb.0: # %entry
+; AVX512VLCDBW-NEXT:    vpcmpeqd %ymm1, %ymm0, %k0
+; AVX512VLCDBW-NEXT:    vpbroadcastmb2q %k0, %ymm0
+; AVX512VLCDBW-NEXT:    ret{{[l|q]}}
 entry:
   %0 = icmp eq <8 x i32> %a, %b
   %1 = bitcast <8 x i1> %0 to i8


        


More information about the llvm-commits mailing list