[llvm] [InstCombine] Simplify demanded bits of blendv mask operands (PR #173723)
Dhruva Narayan K via llvm-commits
llvm-commits at lists.llvm.org
Sun Dec 28 05:45:42 PST 2025
https://github.com/Xylecrack updated https://github.com/llvm/llvm-project/pull/173723
>From 93fa3981e3a01496745fa65086abb1604cd4f2ba Mon Sep 17 00:00:00 2001
From: Dhruva Narayan <dhruvakodiadka at gmail.com>
Date: Sat, 27 Dec 2025 12:33:47 +0530
Subject: [PATCH 1/2] Add baseline tests for blendv mask simplification
---
.../Target/X86/X86InstCombineIntrinsic.cpp | 23 +++++++++-
.../Transforms/InstCombine/X86/blend_x86.ll | 46 +++++++++++++++++++
2 files changed, 68 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
index c4d349044fe80..01c8ca806c6ba 100644
--- a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
@@ -2870,10 +2870,31 @@ X86TTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
case Intrinsic::x86_avx_blendv_ps_256:
case Intrinsic::x86_avx_blendv_pd_256:
case Intrinsic::x86_avx2_pblendvb: {
+
+ Value *Mask = II.getArgOperand(2);
+
+ unsigned BitWidth = Mask->getType()->getScalarSizeInBits();
+ KnownBits Known(BitWidth);
+
+ if (Mask->getType()->isIntOrIntVectorTy()) {
+ if (IC.SimplifyDemandedBits(&II, 2, APInt::getSignMask(BitWidth), Known))
+ return &II;
+ }
+
+ else if (auto *BC = dyn_cast<BitCastInst>(Mask)) {
+ Value *Src = BC->getOperand(0);
+ if (Src->getType()->isIntOrIntVectorTy()) {
+ unsigned SrcBitWidth = Src->getType()->getScalarSizeInBits();
+ KnownBits KnownSrc(SrcBitWidth);
+ if (IC.SimplifyDemandedBits(BC, 0, APInt::getSignMask(SrcBitWidth),
+ KnownSrc))
+ return &II;
+ }
+ }
// fold (blend A, A, Mask) -> A
Value *Op0 = II.getArgOperand(0);
Value *Op1 = II.getArgOperand(1);
- Value *Mask = II.getArgOperand(2);
+ Mask = II.getArgOperand(2);
if (Op0 == Op1) {
return IC.replaceInstUsesWith(II, Op0);
}
diff --git a/llvm/test/Transforms/InstCombine/X86/blend_x86.ll b/llvm/test/Transforms/InstCombine/X86/blend_x86.ll
index fb6bd7cdca83a..84109520d2f05 100644
--- a/llvm/test/Transforms/InstCombine/X86/blend_x86.ll
+++ b/llvm/test/Transforms/InstCombine/X86/blend_x86.ll
@@ -357,6 +357,52 @@ define <4 x double> @shl_blendvpd_v4f64(<4 x double> %a0, <4 x double> %a1, <4 x
ret <4 x double> %r
}
+define <16 x i8> @pblendvb_demanded_msb(<16 x i8> %a, <16 x i8> %b, <16 x i8> %m) {
+; CHECK-LABEL: @pblendvb_demanded_msb(
+; CHECK-NEXT: [[OR:%.*]] = or <16 x i8> [[M:%.*]], splat (i8 1)
+; CHECK-NEXT: [[R:%.*]] = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[OR]])
+; CHECK-NEXT: ret <16 x i8> [[R]]
+;
+ %or = or <16 x i8> %m, splat (i8 1)
+ %r = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a, <16 x i8> %b, <16 x i8> %or)
+ ret <16 x i8> %r
+}
+
+define <8 x float> @blendvps_demanded_msb(<8 x float> %a, <8 x float> %b, <8 x i32> %m) {
+; CHECK-LABEL: @blendvps_demanded_msb(
+; CHECK-NEXT: [[OR:%.*]] = or <8 x i32> [[M:%.*]], splat (i32 1)
+; CHECK-NEXT: [[MASK:%.*]] = bitcast <8 x i32> [[OR]] to <8 x float>
+; CHECK-NEXT: [[R:%.*]] = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> [[A:%.*]], <8 x float> [[B:%.*]], <8 x float> [[MASK]])
+; CHECK-NEXT: ret <8 x float> [[R]]
+;
+ %or = or <8 x i32> %m, splat (i32 1)
+ %mask = bitcast <8 x i32> %or to <8 x float>
+ %r = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> %mask)
+ ret <8 x float> %r
+}
+
+define <16 x i8> @pblendvb_or_affects_msb(<16 x i8> %a, <16 x i8> %b, <16 x i8> %m) {
+; CHECK-LABEL: @pblendvb_or_affects_msb(
+; CHECK-NEXT: [[OR:%.*]] = or <16 x i8> [[M:%.*]], splat (i8 -128)
+; CHECK-NEXT: [[R:%.*]] = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[OR]])
+; CHECK-NEXT: ret <16 x i8> [[R]]
+;
+ %or = or <16 x i8> %m, splat (i8 128)
+ %r = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a, <16 x i8> %b, <16 x i8> %or)
+ ret <16 x i8> %r
+}
+
+define <32 x i8> @pblendvb_demanded_msb_avx2(<32 x i8> %a, <32 x i8> %b, <32 x i8> %m) {
+; CHECK-LABEL: @pblendvb_demanded_msb_avx2(
+; CHECK-NEXT: [[OR:%.*]] = or <32 x i8> [[M:%.*]], splat (i8 1)
+; CHECK-NEXT: [[R:%.*]] = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> [[A:%.*]], <32 x i8> [[B:%.*]], <32 x i8> [[OR]])
+; CHECK-NEXT: ret <32 x i8> [[R]]
+;
+ %or = or <32 x i8> %m, splat (i8 1)
+ %r = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %a, <32 x i8> %b, <32 x i8> %or)
+ ret <32 x i8> %r
+}
+
declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>)
declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>)
declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x double>)
>From 3626c4d485053ee31d0376207129739d9072f767 Mon Sep 17 00:00:00 2001
From: Dhruva Narayan <dhruvakodiadka at gmail.com>
Date: Sat, 27 Dec 2025 20:16:13 +0530
Subject: [PATCH 2/2] Call simplifydemandedbits on mask operand
---
.../Target/X86/X86InstCombineIntrinsic.cpp | 40 +++++++++----------
.../Transforms/InstCombine/X86/blend_x86.ll | 26 +++++++-----
2 files changed, 35 insertions(+), 31 deletions(-)
diff --git a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
index 01c8ca806c6ba..19188b74373c7 100644
--- a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
@@ -2870,31 +2870,10 @@ X86TTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
case Intrinsic::x86_avx_blendv_ps_256:
case Intrinsic::x86_avx_blendv_pd_256:
case Intrinsic::x86_avx2_pblendvb: {
-
- Value *Mask = II.getArgOperand(2);
-
- unsigned BitWidth = Mask->getType()->getScalarSizeInBits();
- KnownBits Known(BitWidth);
-
- if (Mask->getType()->isIntOrIntVectorTy()) {
- if (IC.SimplifyDemandedBits(&II, 2, APInt::getSignMask(BitWidth), Known))
- return &II;
- }
-
- else if (auto *BC = dyn_cast<BitCastInst>(Mask)) {
- Value *Src = BC->getOperand(0);
- if (Src->getType()->isIntOrIntVectorTy()) {
- unsigned SrcBitWidth = Src->getType()->getScalarSizeInBits();
- KnownBits KnownSrc(SrcBitWidth);
- if (IC.SimplifyDemandedBits(BC, 0, APInt::getSignMask(SrcBitWidth),
- KnownSrc))
- return &II;
- }
- }
// fold (blend A, A, Mask) -> A
Value *Op0 = II.getArgOperand(0);
Value *Op1 = II.getArgOperand(1);
- Mask = II.getArgOperand(2);
+ Value *Mask = II.getArgOperand(2);
if (Op0 == Op1) {
return IC.replaceInstUsesWith(II, Op0);
}
@@ -2910,7 +2889,24 @@ X86TTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
getNegativeIsTrueBoolVec(ConstantMask, IC.getDataLayout());
return SelectInst::Create(NewSelector, Op1, Op0, "blendv");
}
+ unsigned BitWidth = Mask->getType()->getScalarSizeInBits();
+ if (Mask->getType()->isIntOrIntVectorTy()) {
+ KnownBits Known(BitWidth);
+ if (IC.SimplifyDemandedBits(&II, 2, APInt::getSignMask(BitWidth), Known))
+ return &II;
+ } else if (auto *BC = dyn_cast<BitCastInst>(Mask)) {
+ Value *Src = BC->getOperand(0);
+ if (Src->getType()->isIntOrIntVectorTy()) {
+ unsigned SrcBitWidth = Src->getType()->getScalarSizeInBits();
+ if (SrcBitWidth == BitWidth) {
+ KnownBits KnownSrc(SrcBitWidth);
+ if (IC.SimplifyDemandedBits(BC, 0, APInt::getSignMask(SrcBitWidth),
+ KnownSrc))
+ return &II;
+ }
+ }
+ }
Mask = InstCombiner::peekThroughBitcast(Mask);
// Peek through a one-use shuffle - VectorCombine should have simplified
diff --git a/llvm/test/Transforms/InstCombine/X86/blend_x86.ll b/llvm/test/Transforms/InstCombine/X86/blend_x86.ll
index 84109520d2f05..3125dc417c031 100644
--- a/llvm/test/Transforms/InstCombine/X86/blend_x86.ll
+++ b/llvm/test/Transforms/InstCombine/X86/blend_x86.ll
@@ -359,8 +359,7 @@ define <4 x double> @shl_blendvpd_v4f64(<4 x double> %a0, <4 x double> %a1, <4 x
define <16 x i8> @pblendvb_demanded_msb(<16 x i8> %a, <16 x i8> %b, <16 x i8> %m) {
; CHECK-LABEL: @pblendvb_demanded_msb(
-; CHECK-NEXT: [[OR:%.*]] = or <16 x i8> [[M:%.*]], splat (i8 1)
-; CHECK-NEXT: [[R:%.*]] = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[OR]])
+; CHECK-NEXT: [[R:%.*]] = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[OR:%.*]])
; CHECK-NEXT: ret <16 x i8> [[R]]
;
%or = or <16 x i8> %m, splat (i8 1)
@@ -370,8 +369,7 @@ define <16 x i8> @pblendvb_demanded_msb(<16 x i8> %a, <16 x i8> %b, <16 x i8> %m
define <8 x float> @blendvps_demanded_msb(<8 x float> %a, <8 x float> %b, <8 x i32> %m) {
; CHECK-LABEL: @blendvps_demanded_msb(
-; CHECK-NEXT: [[OR:%.*]] = or <8 x i32> [[M:%.*]], splat (i32 1)
-; CHECK-NEXT: [[MASK:%.*]] = bitcast <8 x i32> [[OR]] to <8 x float>
+; CHECK-NEXT: [[MASK:%.*]] = bitcast <8 x i32> [[OR:%.*]] to <8 x float>
; CHECK-NEXT: [[R:%.*]] = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> [[A:%.*]], <8 x float> [[B:%.*]], <8 x float> [[MASK]])
; CHECK-NEXT: ret <8 x float> [[R]]
;
@@ -383,9 +381,7 @@ define <8 x float> @blendvps_demanded_msb(<8 x float> %a, <8 x float> %b, <8 x i
define <16 x i8> @pblendvb_or_affects_msb(<16 x i8> %a, <16 x i8> %b, <16 x i8> %m) {
; CHECK-LABEL: @pblendvb_or_affects_msb(
-; CHECK-NEXT: [[OR:%.*]] = or <16 x i8> [[M:%.*]], splat (i8 -128)
-; CHECK-NEXT: [[R:%.*]] = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[OR]])
-; CHECK-NEXT: ret <16 x i8> [[R]]
+; CHECK-NEXT: ret <16 x i8> [[R:%.*]]
;
%or = or <16 x i8> %m, splat (i8 128)
%r = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a, <16 x i8> %b, <16 x i8> %or)
@@ -394,8 +390,7 @@ define <16 x i8> @pblendvb_or_affects_msb(<16 x i8> %a, <16 x i8> %b, <16 x i8>
define <32 x i8> @pblendvb_demanded_msb_avx2(<32 x i8> %a, <32 x i8> %b, <32 x i8> %m) {
; CHECK-LABEL: @pblendvb_demanded_msb_avx2(
-; CHECK-NEXT: [[OR:%.*]] = or <32 x i8> [[M:%.*]], splat (i8 1)
-; CHECK-NEXT: [[R:%.*]] = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> [[A:%.*]], <32 x i8> [[B:%.*]], <32 x i8> [[OR]])
+; CHECK-NEXT: [[R:%.*]] = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> [[A:%.*]], <32 x i8> [[B:%.*]], <32 x i8> [[OR:%.*]])
; CHECK-NEXT: ret <32 x i8> [[R]]
;
%or = or <32 x i8> %m, splat (i8 1)
@@ -403,6 +398,19 @@ define <32 x i8> @pblendvb_demanded_msb_avx2(<32 x i8> %a, <32 x i8> %b, <32 x i
ret <32 x i8> %r
}
+define <2 x double> @blendvpd_demanded_msb(<2 x double> %a, <2 x double> %b, <2 x i64> %m) {
+; CHECK-LABEL: @blendvpd_demanded_msb(
+; CHECK-NEXT: [[MASK:%.*]] = bitcast <2 x i64> [[M:%.*]] to <2 x double>
+; CHECK-NEXT: [[R:%.*]] = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[MASK]])
+; CHECK-NEXT: ret <2 x double> [[R]]
+;
+ %or = or <2 x i64> %m, splat (i64 1)
+ %mask = bitcast <2 x i64> %or to <2 x double>
+ %r = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a, <2 x double> %b, <2 x double> %mask)
+ ret <2 x double> %r
+}
+
+
declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>)
declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>)
declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x double>)
More information about the llvm-commits
mailing list