[llvm] 1eec357 - [VP] IR expansion for maxnum/minnum

via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 11 19:23:41 PDT 2023


Author: liqin.weng
Date: 2023-09-12T10:15:52+08:00
New Revision: 1eec3574943f185c6a21b9ee9ee3213b503f9a74

URL: https://github.com/llvm/llvm-project/commit/1eec3574943f185c6a21b9ee9ee3213b503f9a74
DIFF: https://github.com/llvm/llvm-project/commit/1eec3574943f185c6a21b9ee9ee3213b503f9a74.diff

LOG: [VP] IR expansion for maxnum/minnum

Add basic handling for VP ops that can expand to non-predicate ops

Reviewed By: RKSimon

Differential Revision: https://reviews.llvm.org/D159494

Added: 
    

Modified: 
    llvm/lib/CodeGen/ExpandVectorPredication.cpp
    llvm/test/CodeGen/X86/expand-vp-fp-intrinsics.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/ExpandVectorPredication.cpp b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
index bf2291ba8897744..50182ce0bbf278f 100644
--- a/llvm/lib/CodeGen/ExpandVectorPredication.cpp
+++ b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
@@ -318,6 +318,16 @@ Value *CachingVPExpander::expandPredicationToFPCall(
     replaceOperation(*NewOp, VPI);
     return NewOp;
   }
+  case Intrinsic::maxnum:
+  case Intrinsic::minnum: {
+    Value *Op0 = VPI.getOperand(0);
+    Value *Op1 = VPI.getOperand(1);
+    Function *Fn = Intrinsic::getDeclaration(
+        VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
+    Value *NewOp = Builder.CreateCall(Fn, {Op0, Op1}, VPI.getName());
+    replaceOperation(*NewOp, VPI);
+    return NewOp;
+  }
   case Intrinsic::experimental_constrained_fma:
   case Intrinsic::experimental_constrained_fmuladd: {
     Value *Op0 = VPI.getOperand(0);
@@ -708,6 +718,10 @@ Value *CachingVPExpander::expandPredication(VPIntrinsic &VPI) {
     return expandPredicationToFPCall(Builder, VPI, Intrinsic::fabs);
   case Intrinsic::vp_sqrt:
     return expandPredicationToFPCall(Builder, VPI, Intrinsic::sqrt);
+  case Intrinsic::vp_maxnum:
+    return expandPredicationToFPCall(Builder, VPI, Intrinsic::maxnum);
+  case Intrinsic::vp_minnum:
+    return expandPredicationToFPCall(Builder, VPI, Intrinsic::minnum);
   case Intrinsic::vp_load:
   case Intrinsic::vp_store:
   case Intrinsic::vp_gather:

diff  --git a/llvm/test/CodeGen/X86/expand-vp-fp-intrinsics.ll b/llvm/test/CodeGen/X86/expand-vp-fp-intrinsics.ll
index b2c336576fc8bb4..55750be8bdca393 100644
--- a/llvm/test/CodeGen/X86/expand-vp-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/expand-vp-fp-intrinsics.ll
@@ -404,3 +404,162 @@ define void @vp_fmuladd_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i4 %a5
 }
 declare <4 x float> @llvm.vp.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32)
 
+declare <4 x float> @llvm.vp.maxnum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
+define <4 x float> @vfmax_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) {
+; SSE-LABEL: vfmax_vv_v4f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps %xmm1, %xmm2
+; SSE-NEXT:    maxps %xmm0, %xmm2
+; SSE-NEXT:    cmpunordps %xmm0, %xmm0
+; SSE-NEXT:    andps %xmm0, %xmm1
+; SSE-NEXT:    andnps %xmm2, %xmm0
+; SSE-NEXT:    orps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: vfmax_vv_v4f32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmaxps %xmm0, %xmm1, %xmm2
+; AVX1-NEXT:    vcmpunordps %xmm0, %xmm0, %xmm0
+; AVX1-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vfmax_vv_v4f32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmaxps %xmm0, %xmm1, %xmm2
+; AVX2-NEXT:    vcmpunordps %xmm0, %xmm0, %xmm0
+; AVX2-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: vfmax_vv_v4f32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmaxps %xmm0, %xmm1, %xmm2
+; AVX512-NEXT:    vcmpunordps %xmm0, %xmm0, %k1
+; AVX512-NEXT:    vmovaps %xmm1, %xmm2 {%k1}
+; AVX512-NEXT:    vmovaps %xmm2, %xmm0
+; AVX512-NEXT:    retq
+  %v = call <4 x float> @llvm.vp.maxnum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
+  ret <4 x float> %v
+}
+
+declare <8 x float> @llvm.vp.maxnum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32)
+define <8 x float> @vfmax_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) {
+; SSE-LABEL: vfmax_vv_v8f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps %xmm2, %xmm4
+; SSE-NEXT:    maxps %xmm0, %xmm4
+; SSE-NEXT:    cmpunordps %xmm0, %xmm0
+; SSE-NEXT:    andps %xmm0, %xmm2
+; SSE-NEXT:    andnps %xmm4, %xmm0
+; SSE-NEXT:    orps %xmm2, %xmm0
+; SSE-NEXT:    movaps %xmm3, %xmm2
+; SSE-NEXT:    maxps %xmm1, %xmm2
+; SSE-NEXT:    cmpunordps %xmm1, %xmm1
+; SSE-NEXT:    andps %xmm1, %xmm3
+; SSE-NEXT:    andnps %xmm2, %xmm1
+; SSE-NEXT:    orps %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: vfmax_vv_v8f32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmaxps %ymm0, %ymm1, %ymm2
+; AVX1-NEXT:    vcmpunordps %ymm0, %ymm0, %ymm0
+; AVX1-NEXT:    vblendvps %ymm0, %ymm1, %ymm2, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vfmax_vv_v8f32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmaxps %ymm0, %ymm1, %ymm2
+; AVX2-NEXT:    vcmpunordps %ymm0, %ymm0, %ymm0
+; AVX2-NEXT:    vblendvps %ymm0, %ymm1, %ymm2, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: vfmax_vv_v8f32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmaxps %ymm0, %ymm1, %ymm2
+; AVX512-NEXT:    vcmpunordps %ymm0, %ymm0, %k1
+; AVX512-NEXT:    vmovaps %ymm1, %ymm2 {%k1}
+; AVX512-NEXT:    vmovaps %ymm2, %ymm0
+; AVX512-NEXT:    retq
+  %v = call <8 x float> @llvm.vp.maxnum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
+  ret <8 x float> %v
+}
+
+declare <4 x float> @llvm.vp.minnum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
+define <4 x float> @vfmin_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) {
+; SSE-LABEL: vfmin_vv_v4f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps %xmm1, %xmm2
+; SSE-NEXT:    minps %xmm0, %xmm2
+; SSE-NEXT:    cmpunordps %xmm0, %xmm0
+; SSE-NEXT:    andps %xmm0, %xmm1
+; SSE-NEXT:    andnps %xmm2, %xmm0
+; SSE-NEXT:    orps %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: vfmin_vv_v4f32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vminps %xmm0, %xmm1, %xmm2
+; AVX1-NEXT:    vcmpunordps %xmm0, %xmm0, %xmm0
+; AVX1-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vfmin_vv_v4f32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vminps %xmm0, %xmm1, %xmm2
+; AVX2-NEXT:    vcmpunordps %xmm0, %xmm0, %xmm0
+; AVX2-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: vfmin_vv_v4f32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vminps %xmm0, %xmm1, %xmm2
+; AVX512-NEXT:    vcmpunordps %xmm0, %xmm0, %k1
+; AVX512-NEXT:    vmovaps %xmm1, %xmm2 {%k1}
+; AVX512-NEXT:    vmovaps %xmm2, %xmm0
+; AVX512-NEXT:    retq
+  %v = call <4 x float> @llvm.vp.minnum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
+  ret <4 x float> %v
+}
+
+declare <8 x float> @llvm.vp.minnum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32)
+define <8 x float> @vfmin_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) {
+; SSE-LABEL: vfmin_vv_v8f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps %xmm2, %xmm4
+; SSE-NEXT:    minps %xmm0, %xmm4
+; SSE-NEXT:    cmpunordps %xmm0, %xmm0
+; SSE-NEXT:    andps %xmm0, %xmm2
+; SSE-NEXT:    andnps %xmm4, %xmm0
+; SSE-NEXT:    orps %xmm2, %xmm0
+; SSE-NEXT:    movaps %xmm3, %xmm2
+; SSE-NEXT:    minps %xmm1, %xmm2
+; SSE-NEXT:    cmpunordps %xmm1, %xmm1
+; SSE-NEXT:    andps %xmm1, %xmm3
+; SSE-NEXT:    andnps %xmm2, %xmm1
+; SSE-NEXT:    orps %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: vfmin_vv_v8f32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vminps %ymm0, %ymm1, %ymm2
+; AVX1-NEXT:    vcmpunordps %ymm0, %ymm0, %ymm0
+; AVX1-NEXT:    vblendvps %ymm0, %ymm1, %ymm2, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vfmin_vv_v8f32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vminps %ymm0, %ymm1, %ymm2
+; AVX2-NEXT:    vcmpunordps %ymm0, %ymm0, %ymm0
+; AVX2-NEXT:    vblendvps %ymm0, %ymm1, %ymm2, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: vfmin_vv_v8f32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vminps %ymm0, %ymm1, %ymm2
+; AVX512-NEXT:    vcmpunordps %ymm0, %ymm0, %k1
+; AVX512-NEXT:    vmovaps %ymm1, %ymm2 {%k1}
+; AVX512-NEXT:    vmovaps %ymm2, %ymm0
+; AVX512-NEXT:    retq
+  %v = call <8 x float> @llvm.vp.minnum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
+  ret <8 x float> %v
+}


        


More information about the llvm-commits mailing list