[llvm] 77b3f89 - [X86] combineAndMaskToShift - match constant splat with X86::isConstantSplat

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 11 03:26:02 PDT 2023


Author: Simon Pilgrim
Date: 2023-07-11T11:25:34+01:00
New Revision: 77b3f890ccad711184d2a2a9ddcef351051d4dd5

URL: https://github.com/llvm/llvm-project/commit/77b3f890ccad711184d2a2a9ddcef351051d4dd5
DIFF: https://github.com/llvm/llvm-project/commit/77b3f890ccad711184d2a2a9ddcef351051d4dd5.diff

LOG: [X86] combineAndMaskToShift - match constant splat with X86::isConstantSplat

Using X86::isConstantSplat instead of ISD::isConstantSplatVector allows us to detect constant masks after they've been lowered to constant pool loads.

Addresses regression from D154592

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/cast-vsel.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 475dac3f4b2dc3..2d6f660b17e9ab 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -50317,8 +50317,7 @@ static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
   }
 
   APInt SplatVal;
-  if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
-      !SplatVal.isMask())
+  if (!X86::isConstantSplat(Op1, SplatVal, false) || !SplatVal.isMask())
     return SDValue();
 
   // Don't prevent creation of ANDN.

diff  --git a/llvm/test/CodeGen/X86/cast-vsel.ll b/llvm/test/CodeGen/X86/cast-vsel.ll
index 019eae7c428c6c..3067ea674c00d9 100644
--- a/llvm/test/CodeGen/X86/cast-vsel.ll
+++ b/llvm/test/CodeGen/X86/cast-vsel.ll
@@ -278,24 +278,23 @@ define dso_local void @example25() nounwind {
 ; SSE2-LABEL: example25:
 ; SSE2:       # %bb.0: # %vector.ph
 ; SSE2-NEXT:    movq $-4096, %rax # imm = 0xF000
-; SSE2-NEXT:    movaps {{.*#+}} xmm0 = [1,1,1,1]
 ; SSE2-NEXT:    .p2align 4, 0x90
 ; SSE2-NEXT:  .LBB5_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
-; SSE2-NEXT:    movaps da+4112(%rax), %xmm1
-; SSE2-NEXT:    movaps da+4096(%rax), %xmm2
-; SSE2-NEXT:    cmpltps db+4096(%rax), %xmm2
-; SSE2-NEXT:    cmpltps db+4112(%rax), %xmm1
-; SSE2-NEXT:    movaps dc+4112(%rax), %xmm3
-; SSE2-NEXT:    movaps dc+4096(%rax), %xmm4
-; SSE2-NEXT:    cmpltps dd+4096(%rax), %xmm4
-; SSE2-NEXT:    andps %xmm2, %xmm4
-; SSE2-NEXT:    cmpltps dd+4112(%rax), %xmm3
+; SSE2-NEXT:    movaps da+4112(%rax), %xmm0
+; SSE2-NEXT:    movaps da+4096(%rax), %xmm1
+; SSE2-NEXT:    cmpltps db+4096(%rax), %xmm1
+; SSE2-NEXT:    cmpltps db+4112(%rax), %xmm0
+; SSE2-NEXT:    movaps dc+4112(%rax), %xmm2
+; SSE2-NEXT:    movaps dc+4096(%rax), %xmm3
+; SSE2-NEXT:    cmpltps dd+4096(%rax), %xmm3
 ; SSE2-NEXT:    andps %xmm1, %xmm3
-; SSE2-NEXT:    andps %xmm0, %xmm4
-; SSE2-NEXT:    andps %xmm0, %xmm3
-; SSE2-NEXT:    movaps %xmm3, dj+4112(%rax)
-; SSE2-NEXT:    movaps %xmm4, dj+4096(%rax)
+; SSE2-NEXT:    cmpltps dd+4112(%rax), %xmm2
+; SSE2-NEXT:    andps %xmm0, %xmm2
+; SSE2-NEXT:    psrld $31, %xmm3
+; SSE2-NEXT:    psrld $31, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, dj+4112(%rax)
+; SSE2-NEXT:    movdqa %xmm3, dj+4096(%rax)
 ; SSE2-NEXT:    addq $32, %rax
 ; SSE2-NEXT:    jne .LBB5_1
 ; SSE2-NEXT:  # %bb.2: # %for.end
@@ -304,24 +303,23 @@ define dso_local void @example25() nounwind {
 ; SSE41-LABEL: example25:
 ; SSE41:       # %bb.0: # %vector.ph
 ; SSE41-NEXT:    movq $-4096, %rax # imm = 0xF000
-; SSE41-NEXT:    movaps {{.*#+}} xmm0 = [1,1,1,1]
 ; SSE41-NEXT:    .p2align 4, 0x90
 ; SSE41-NEXT:  .LBB5_1: # %vector.body
 ; SSE41-NEXT:    # =>This Inner Loop Header: Depth=1
-; SSE41-NEXT:    movaps da+4112(%rax), %xmm1
-; SSE41-NEXT:    movaps da+4096(%rax), %xmm2
-; SSE41-NEXT:    cmpltps db+4096(%rax), %xmm2
-; SSE41-NEXT:    cmpltps db+4112(%rax), %xmm1
-; SSE41-NEXT:    movaps dc+4112(%rax), %xmm3
-; SSE41-NEXT:    movaps dc+4096(%rax), %xmm4
-; SSE41-NEXT:    cmpltps dd+4096(%rax), %xmm4
-; SSE41-NEXT:    andps %xmm2, %xmm4
-; SSE41-NEXT:    cmpltps dd+4112(%rax), %xmm3
+; SSE41-NEXT:    movaps da+4112(%rax), %xmm0
+; SSE41-NEXT:    movaps da+4096(%rax), %xmm1
+; SSE41-NEXT:    cmpltps db+4096(%rax), %xmm1
+; SSE41-NEXT:    cmpltps db+4112(%rax), %xmm0
+; SSE41-NEXT:    movaps dc+4112(%rax), %xmm2
+; SSE41-NEXT:    movaps dc+4096(%rax), %xmm3
+; SSE41-NEXT:    cmpltps dd+4096(%rax), %xmm3
 ; SSE41-NEXT:    andps %xmm1, %xmm3
-; SSE41-NEXT:    andps %xmm0, %xmm3
-; SSE41-NEXT:    psrld $31, %xmm4
-; SSE41-NEXT:    movaps %xmm3, dj+4112(%rax)
-; SSE41-NEXT:    movdqa %xmm4, dj+4096(%rax)
+; SSE41-NEXT:    cmpltps dd+4112(%rax), %xmm2
+; SSE41-NEXT:    andps %xmm0, %xmm2
+; SSE41-NEXT:    psrld $31, %xmm3
+; SSE41-NEXT:    psrld $31, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, dj+4112(%rax)
+; SSE41-NEXT:    movdqa %xmm3, dj+4096(%rax)
 ; SSE41-NEXT:    addq $32, %rax
 ; SSE41-NEXT:    jne .LBB5_1
 ; SSE41-NEXT:  # %bb.2: # %for.end


        


More information about the llvm-commits mailing list