[llvm] r271990 - [InstCombine][SSE] Add MOVMSK constant folding (PR27982)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 7 01:18:35 PDT 2016


Author: rksimon
Date: Tue Jun  7 03:18:35 2016
New Revision: 271990

URL: http://llvm.org/viewvc/llvm-project?rev=271990&view=rev
Log:
[InstCombine][SSE] Add MOVMSK constant folding (PR27982)

This patch adds support for folding undef/zero/constant inputs to MOVMSK instructions.

The SSE/AVX versions can be fully folded, but the MMX version can only handle undef inputs.

Differential Revision: http://reviews.llvm.org/D20998

Modified:
    llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp
    llvm/trunk/test/Transforms/InstCombine/x86-movmsk.ll

Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp?rev=271990&r1=271989&r2=271990&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp Tue Jun  7 03:18:35 2016
@@ -325,6 +325,45 @@ static Value *simplifyX86immShift(const
   return Builder.CreateAShr(Vec, ShiftVec);
 }
 
+static Value *simplifyX86movmsk(const IntrinsicInst &II,
+                                InstCombiner::BuilderTy &Builder) {
+  Value *Arg = II.getArgOperand(0);
+  Type *ResTy = II.getType();
+  Type *ArgTy = Arg->getType();
+
+  // movmsk(undef) -> zero as we must ensure the upper bits are zero.
+  if (isa<UndefValue>(Arg))
+    return Constant::getNullValue(ResTy);
+
+  // We can't easily peek through x86_mmx types.
+  if (!ArgTy->isVectorTy())
+    return nullptr;
+
+  auto *C = dyn_cast<Constant>(Arg);
+  if (!C)
+    return nullptr;
+
+  // Extract signbits of the vector input and pack into integer result.
+  APInt Result(ResTy->getPrimitiveSizeInBits(), 0);
+  for (unsigned I = 0, E = ArgTy->getVectorNumElements(); I != E; ++I) {
+    auto *COp = C->getAggregateElement(I);
+    if (!COp)
+      return nullptr;
+    if (isa<UndefValue>(COp))
+      continue;
+
+    auto *CInt = dyn_cast<ConstantInt>(COp);
+    auto *CFp = dyn_cast<ConstantFP>(COp);
+    if (!CInt && !CFp)
+      return nullptr;
+
+    if ((CInt && CInt->isNegative()) || (CFp && CFp->isNegative()))
+      Result.setBit(I);
+  }
+
+  return Constant::getIntegerValue(ResTy, Result);
+}
+
 static Value *simplifyX86insertps(const IntrinsicInst &II,
                                   InstCombiner::BuilderTy &Builder) {
   auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2));
@@ -1460,6 +1499,18 @@ Instruction *InstCombiner::visitCallInst
     break;
   }
 
+  case Intrinsic::x86_mmx_pmovmskb:
+  case Intrinsic::x86_sse_movmsk_ps:
+  case Intrinsic::x86_sse2_movmsk_pd:
+  case Intrinsic::x86_sse2_pmovmskb_128:
+  case Intrinsic::x86_avx_movmsk_pd_256:
+  case Intrinsic::x86_avx_movmsk_ps_256:
+  case Intrinsic::x86_avx2_pmovmskb: {
+    if (Value *V = simplifyX86movmsk(*II, *Builder))
+      return replaceInstUsesWith(*II, V);
+    break;
+  }
+
   case Intrinsic::x86_sse_comieq_ss:
   case Intrinsic::x86_sse_comige_ss:
   case Intrinsic::x86_sse_comigt_ss:

Modified: llvm/trunk/test/Transforms/InstCombine/x86-movmsk.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/x86-movmsk.ll?rev=271990&r1=271989&r2=271990&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/x86-movmsk.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/x86-movmsk.ll Tue Jun  7 03:18:35 2016
@@ -129,6 +129,190 @@ define i32 @test_lower_x86_avx_movmsk_pd
 
 ; llvm.x86.avx2.pmovmskb uses the whole of the 32-bit register.
 
+;
+; Constant Folding (UNDEF -> ZERO)
+;
+
+define i32 @undef_x86_mmx_pmovmskb() {
+; CHECK-LABEL: @undef_x86_mmx_pmovmskb(
+; CHECK-NEXT:    ret i32 0
+;
+  %1 = call i32 @llvm.x86.mmx.pmovmskb(x86_mmx undef)
+  ret i32 %1
+}
+
+define i32 @undef_x86_sse_movmsk_ps() {
+; CHECK-LABEL: @undef_x86_sse_movmsk_ps(
+; CHECK-NEXT:    ret i32 0
+;
+  %1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> undef)
+  ret i32 %1
+}
+
+define i32 @undef_x86_sse2_movmsk_pd() {
+; CHECK-LABEL: @undef_x86_sse2_movmsk_pd(
+; CHECK-NEXT:    ret i32 0
+;
+  %1 = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> undef)
+  ret i32 %1
+}
+
+define i32 @undef_x86_sse2_pmovmskb_128() {
+; CHECK-LABEL: @undef_x86_sse2_pmovmskb_128(
+; CHECK-NEXT:    ret i32 0
+;
+  %1 = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> undef)
+  ret i32 %1
+}
+
+define i32 @undef_x86_avx_movmsk_ps_256() {
+; CHECK-LABEL: @undef_x86_avx_movmsk_ps_256(
+; CHECK-NEXT:    ret i32 0
+;
+  %1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> undef)
+  ret i32 %1
+}
+
+define i32 @undef_x86_avx_movmsk_pd_256() {
+; CHECK-LABEL: @undef_x86_avx_movmsk_pd_256(
+; CHECK-NEXT:    ret i32 0
+;
+  %1 = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> undef)
+  ret i32 %1
+}
+
+define i32 @undef_x86_avx2_pmovmskb() {
+; CHECK-LABEL: @undef_x86_avx2_pmovmskb(
+; CHECK-NEXT:    ret i32 0
+;
+  %1 = call i32 @llvm.x86.avx2.pmovmskb(<32 x i8> undef)
+  ret i32 %1
+}
+
+;
+; Constant Folding (ZERO -> ZERO)
+;
+
+define i32 @zero_x86_mmx_pmovmskb() {
+; CHECK-LABEL: @zero_x86_mmx_pmovmskb(
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.x86.mmx.pmovmskb(x86_mmx bitcast (<1 x i64> zeroinitializer to x86_mmx))
+; CHECK-NEXT:    ret i32 [[TMP1]]
+;
+  %1 = bitcast <1 x i64> zeroinitializer to x86_mmx
+  %2 = call i32 @llvm.x86.mmx.pmovmskb(x86_mmx %1)
+  ret i32 %2
+}
+
+define i32 @zero_x86_sse_movmsk_ps() {
+; CHECK-LABEL: @zero_x86_sse_movmsk_ps(
+; CHECK-NEXT:    ret i32 0
+;
+  %1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> zeroinitializer)
+  ret i32 %1
+}
+
+define i32 @zero_x86_sse2_movmsk_pd() {
+; CHECK-LABEL: @zero_x86_sse2_movmsk_pd(
+; CHECK-NEXT:    ret i32 0
+;
+  %1 = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> zeroinitializer)
+  ret i32 %1
+}
+
+define i32 @zero_x86_sse2_pmovmskb_128() {
+; CHECK-LABEL: @zero_x86_sse2_pmovmskb_128(
+; CHECK-NEXT:    ret i32 0
+;
+  %1 = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> zeroinitializer)
+  ret i32 %1
+}
+
+define i32 @zero_x86_avx_movmsk_ps_256() {
+; CHECK-LABEL: @zero_x86_avx_movmsk_ps_256(
+; CHECK-NEXT:    ret i32 0
+;
+  %1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> zeroinitializer)
+  ret i32 %1
+}
+
+define i32 @zero_x86_avx_movmsk_pd_256() {
+; CHECK-LABEL: @zero_x86_avx_movmsk_pd_256(
+; CHECK-NEXT:    ret i32 0
+;
+  %1 = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> zeroinitializer)
+  ret i32 %1
+}
+
+define i32 @zero_x86_avx2_pmovmskb() {
+; CHECK-LABEL: @zero_x86_avx2_pmovmskb(
+; CHECK-NEXT:    ret i32 0
+;
+  %1 = call i32 @llvm.x86.avx2.pmovmskb(<32 x i8> zeroinitializer)
+  ret i32 %1
+}
+
+;
+; Constant Folding
+;
+
+define i32 @fold_x86_mmx_pmovmskb() {
+; CHECK-LABEL: @fold_x86_mmx_pmovmskb(
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.x86.mmx.pmovmskb(x86_mmx bitcast (<8 x i8> <i8 0, i8 -1, i8 -1, i8 127, i8 -127, i8 63, i8 64, i8 0> to x86_mmx))
+; CHECK-NEXT:    ret i32 [[TMP1]]
+;
+  %1 = bitcast <8 x i8> <i8 0, i8 255, i8 -1, i8 127, i8 -127, i8 63, i8 64, i8 256> to x86_mmx
+  %2 = call i32 @llvm.x86.mmx.pmovmskb(x86_mmx %1)
+  ret i32 %2
+}
+
+define i32 @fold_x86_sse_movmsk_ps() {
+; CHECK-LABEL: @fold_x86_sse_movmsk_ps(
+; CHECK-NEXT:    ret i32 10
+;
+  %1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> <float 1.0, float -1.0, float 100.0, float -200.0>)
+  ret i32 %1
+}
+
+define i32 @fold_x86_sse2_movmsk_pd() {
+; CHECK-LABEL: @fold_x86_sse2_movmsk_pd(
+; CHECK-NEXT:    ret i32 2
+;
+  %1 = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> <double 1.0, double -1.0>)
+  ret i32 %1
+}
+
+define i32 @fold_x86_sse2_pmovmskb_128() {
+; CHECK-LABEL: @fold_x86_sse2_pmovmskb_128(
+; CHECK-NEXT:    ret i32 5654
+;
+  %1 = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> <i8 0, i8 255, i8 -1, i8 127, i8 -127, i8 63, i8 64, i8 256, i8 0, i8 255, i8 -1, i8 127, i8 -127, i8 63, i8 64, i8 256>)
+  ret i32 %1
+}
+
+define i32 @fold_x86_avx_movmsk_ps_256() {
+; CHECK-LABEL: @fold_x86_avx_movmsk_ps_256(
+; CHECK-NEXT:    ret i32 170
+;
+  %1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> <float 1.0, float -1.0, float 100.0, float -200.0, float +0.0, float -0.0, float 100000.0, float -5000000.0>)
+  ret i32 %1
+}
+
+define i32 @fold_x86_avx_movmsk_pd_256() {
+; CHECK-LABEL: @fold_x86_avx_movmsk_pd_256(
+; CHECK-NEXT:    ret i32 10
+;
+  %1 = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> <double 1.0, double -1.0, double 100.0, double -200.0>)
+  ret i32 %1
+}
+
+define i32 @fold_x86_avx2_pmovmskb() {
+; CHECK-LABEL: @fold_x86_avx2_pmovmskb(
+; CHECK-NEXT:    ret i32 370546176
+;
+  %1 = call i32 @llvm.x86.avx2.pmovmskb(<32 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 255, i8 -1, i8 127, i8 -127, i8 63, i8 64, i8 256, i8 0, i8 255, i8 -1, i8 127, i8 -127, i8 63, i8 64, i8 256, i8 0, i8 255, i8 -1, i8 127, i8 -127, i8 63, i8 64, i8 256>)
+  ret i32 %1
+}
+
 declare i32 @llvm.x86.mmx.pmovmskb(x86_mmx)
 
 declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>)




More information about the llvm-commits mailing list