[llvm] 2434c8f - [DAG] canCreateUndefOrPoison - add ISD::INSERT_VECTOR_ELT handling

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sun Apr 2 08:28:38 PDT 2023


Author: Simon Pilgrim
Date: 2023-04-02T16:28:26+01:00
New Revision: 2434c8fcf92ce3de8deb9ee32519ffc13bcdc8e7

URL: https://github.com/llvm/llvm-project/commit/2434c8fcf92ce3de8deb9ee32519ffc13bcdc8e7
DIFF: https://github.com/llvm/llvm-project/commit/2434c8fcf92ce3de8deb9ee32519ffc13bcdc8e7.diff

LOG: [DAG] canCreateUndefOrPoison - add ISD::INSERT_VECTOR_ELT handling

If the inserted element index is guaranteed to be inbounds then a ISD::INSERT_VECTOR_ELT will not create poison/undef.

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
    llvm/test/CodeGen/X86/freeze-vector.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 95cc1fa94fbb5..faffe53cfce49 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -4820,6 +4820,13 @@ bool SelectionDAG::canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts,
     return ConsiderFlags && (Op->getFlags().hasNoSignedWrap() ||
                              Op->getFlags().hasNoUnsignedWrap());
 
+  case ISD::INSERT_VECTOR_ELT:{
+    // Ensure that the element index is in bounds.
+    EVT VecVT = Op.getOperand(0).getValueType();
+    KnownBits KnownIdx = computeKnownBits(Op.getOperand(2), Depth + 1);
+    return KnownIdx.getMaxValue().uge(VecVT.getVectorMinNumElements());
+  }
+
   default:
     // Allow the target to implement this method for its nodes.
     if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||

diff  --git a/llvm/test/CodeGen/X86/freeze-vector.ll b/llvm/test/CodeGen/X86/freeze-vector.ll
index 4e562e103ae44..4139d974d8308 100644
--- a/llvm/test/CodeGen/X86/freeze-vector.ll
+++ b/llvm/test/CodeGen/X86/freeze-vector.ll
@@ -3,19 +3,10 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X64
 
 define <2 x i64> @freeze_insert_vector_elt(<2 x i64> %a0) {
-; X86-LABEL: freeze_insert_vector_elt:
-; X86:       # %bb.0:
-; X86-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; X86-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; X86-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
-; X86-NEXT:    retl
-;
-; X64-LABEL: freeze_insert_vector_elt:
-; X64:       # %bb.0:
-; X64-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; X64-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
-; X64-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
-; X64-NEXT:    retq
+; CHECK-LABEL: freeze_insert_vector_elt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %idx0 = insertelement <2 x i64> %a0, i64 0, i64 0
   %freeze0 = freeze <2 x i64> %idx0
   %idx1 = insertelement <2 x i64> %freeze0, i64 0, i64 1
@@ -358,17 +349,16 @@ define void @freeze_two_frozen_buildvectors(ptr %origin0, ptr %origin1, ptr %dst
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl (%edx), %edx
 ; X86-NEXT:    andl $15, %edx
-; X86-NEXT:    vmovd %edx, %xmm0
-; X86-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
-; X86-NEXT:    vmovd %eax, %xmm2
-; X86-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
-; X86-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5,6,7]
-; X86-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; X86-NEXT:    vmovdqa {{.*#+}} xmm3 = [7,7,7,7]
-; X86-NEXT:    vpand %xmm3, %xmm1, %xmm1
-; X86-NEXT:    vmovdqa %xmm1, (%ecx)
-; X86-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5],xmm2[6,7]
-; X86-NEXT:    vpand %xmm3, %xmm0, %xmm0
+; X86-NEXT:    vpinsrd $1, %edx, %xmm0, %xmm0
+; X86-NEXT:    vmovdqa {{.*#+}} xmm1 = [7,7,7,7]
+; X86-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; X86-NEXT:    vmovdqa %xmm0, (%ecx)
+; X86-NEXT:    vmovd %eax, %xmm0
+; X86-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X86-NEXT:    vmovd %edx, %xmm2
+; X86-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
+; X86-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5],xmm0[6,7]
+; X86-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; X86-NEXT:    vmovdqa %xmm0, (%eax)
 ; X86-NEXT:    retl
 ;
@@ -376,16 +366,16 @@ define void @freeze_two_frozen_buildvectors(ptr %origin0, ptr %origin1, ptr %dst
 ; X64:       # %bb.0:
 ; X64-NEXT:    movl (%rdi), %eax
 ; X64-NEXT:    andl $15, %eax
+; X64-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0
+; X64-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [7,7,7,7]
+; X64-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; X64-NEXT:    vmovdqa %xmm0, (%rdx)
 ; X64-NEXT:    vmovd %eax, %xmm0
 ; X64-NEXT:    vpbroadcastd %xmm0, %xmm0
-; X64-NEXT:    vmovd %eax, %xmm1
-; X64-NEXT:    vpbroadcastd %xmm1, %xmm1
-; X64-NEXT:    vpblendd {{.*#+}} xmm2 = xmm1[0],xmm0[1],xmm1[2,3]
-; X64-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [7,7,7,7]
-; X64-NEXT:    vpand %xmm3, %xmm2, %xmm2
-; X64-NEXT:    vmovdqa %xmm2, (%rdx)
-; X64-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3]
-; X64-NEXT:    vpand %xmm3, %xmm0, %xmm0
+; X64-NEXT:    vmovd %eax, %xmm2
+; X64-NEXT:    vpbroadcastd %xmm2, %xmm2
+; X64-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3]
+; X64-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    vmovdqa %xmm0, (%rcx)
 ; X64-NEXT:    retq
   %i0.src = load i32, ptr %origin0


        


More information about the llvm-commits mailing list