[llvm] f84ad45 - [LLVM][InstCombine] not (bitcast (cmp A, B) --> bitcast (!cmp A, B) (#167693)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Nov 13 03:47:16 PST 2025
Author: Paul Walker
Date: 2025-11-13T11:47:12Z
New Revision: f84ad4504dfba9df049296d451ec8da668e847a4
URL: https://github.com/llvm/llvm-project/commit/f84ad4504dfba9df049296d451ec8da668e847a4
DIFF: https://github.com/llvm/llvm-project/commit/f84ad4504dfba9df049296d451ec8da668e847a4.diff
LOG: [LLVM][InstCombine] not (bitcast (cmp A, B) --> bitcast (!cmp A, B) (#167693)
Added:
Modified:
llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
llvm/test/Transforms/InstCombine/not.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index cbaff294819a2..ba5568b00441b 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -5096,9 +5096,17 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
return &I;
}
+ // not (bitcast (cmp A, B) --> bitcast (!cmp A, B)
+ if (match(NotOp, m_OneUse(m_BitCast(m_Value(X)))) &&
+ match(X, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) {
+ cast<CmpInst>(X)->setPredicate(CmpInst::getInversePredicate(Pred));
+ return new BitCastInst(X, Ty);
+ }
+
// Move a 'not' ahead of casts of a bool to enable logic reduction:
// not (bitcast (sext i1 X)) --> bitcast (sext (not i1 X))
- if (match(NotOp, m_OneUse(m_BitCast(m_OneUse(m_SExt(m_Value(X)))))) && X->getType()->isIntOrIntVectorTy(1)) {
+ if (match(NotOp, m_OneUse(m_BitCast(m_OneUse(m_SExt(m_Value(X)))))) &&
+ X->getType()->isIntOrIntVectorTy(1)) {
Type *SextTy = cast<BitCastOperator>(NotOp)->getSrcTy();
Value *NotX = Builder.CreateNot(X);
Value *Sext = Builder.CreateSExt(NotX, SextTy);
diff --git a/llvm/test/Transforms/InstCombine/not.ll b/llvm/test/Transforms/InstCombine/not.ll
index d693b9d8f8557..1acf55a50208d 100644
--- a/llvm/test/Transforms/InstCombine/not.ll
+++ b/llvm/test/Transforms/InstCombine/not.ll
@@ -1061,3 +1061,81 @@ if.else:
call void @f2()
unreachable
}
+
+define i8 @invert_bitcasted_icmp(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: @invert_bitcasted_icmp(
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <8 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[MASK_AS_INT:%.*]] = bitcast <8 x i1> [[CMP]] to i8
+; CHECK-NEXT: ret i8 [[MASK_AS_INT]]
+;
+ %cmp = icmp sle <8 x i32> %a, %b
+ %mask.as.int = bitcast <8 x i1> %cmp to i8
+ %not = xor i8 %mask.as.int, 255
+ ret i8 %not
+}
+
+define i8 @invert_bitcasted_icmp_samesign(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: @invert_bitcasted_icmp_samesign(
+; CHECK-NEXT: [[CMP:%.*]] = icmp samesign sgt <8 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[MASK_AS_INT:%.*]] = bitcast <8 x i1> [[CMP]] to i8
+; CHECK-NEXT: ret i8 [[MASK_AS_INT]]
+;
+ %cmp = icmp samesign sle <8 x i32> %a, %b
+ %mask.as.int = bitcast <8 x i1> %cmp to i8
+ %not = xor i8 %mask.as.int, 255
+ ret i8 %not
+}
+
+define i8 @invert_bitcasted_icmp_multi_use_1(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: @invert_bitcasted_icmp_multi_use_1(
+; CHECK-NEXT: [[CMP:%.*]] = icmp sle <8 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: call void (...) @llvm.fake.use(<8 x i1> [[CMP]])
+; CHECK-NEXT: [[MASK_AS_INT:%.*]] = bitcast <8 x i1> [[CMP]] to i8
+; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[MASK_AS_INT]], -1
+; CHECK-NEXT: ret i8 [[NOT]]
+;
+ %cmp = icmp sle <8 x i32> %a, %b
+ call void (...) @llvm.fake.use(<8 x i1> %cmp)
+ %mask.as.int = bitcast <8 x i1> %cmp to i8
+ %not = xor i8 %mask.as.int, -1
+ ret i8 %not
+}
+
+define i8 @invert_bitcasted_icmp_multi_use_2(<8 x i32> %a, <8 x i32> %b) {
+; CHECK-LABEL: @invert_bitcasted_icmp_multi_use_2(
+; CHECK-NEXT: [[CMP:%.*]] = icmp sle <8 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[MASK_AS_INT:%.*]] = bitcast <8 x i1> [[CMP]] to i8
+; CHECK-NEXT: call void (...) @llvm.fake.use(i8 [[MASK_AS_INT]])
+; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[MASK_AS_INT]], -1
+; CHECK-NEXT: ret i8 [[NOT]]
+;
+ %cmp = icmp sle <8 x i32> %a, %b
+ %mask.as.int = bitcast <8 x i1> %cmp to i8
+ call void (...) @llvm.fake.use(i8 %mask.as.int)
+ %not = xor i8 %mask.as.int, -1
+ ret i8 %not
+}
+
+define i8 @invert_bitcasted_fcmp(<8 x float> %a, <8 x float> %b) {
+; CHECK-LABEL: @invert_bitcasted_fcmp(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp uge <8 x float> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[MASK_AS_INT:%.*]] = bitcast <8 x i1> [[CMP]] to i8
+; CHECK-NEXT: ret i8 [[MASK_AS_INT]]
+;
+ %cmp = fcmp olt <8 x float> %a, %b
+ %mask.as.int = bitcast <8 x i1> %cmp to i8
+ %not = xor i8 %mask.as.int, 255
+ ret i8 %not
+}
+
+define i8 @invert_bitcasted_fcmp_fast(<8 x float> %a, <8 x float> %b) {
+; CHECK-LABEL: @invert_bitcasted_fcmp_fast(
+; CHECK-NEXT: [[CMP:%.*]] = fcmp fast uge <8 x float> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[MASK_AS_INT:%.*]] = bitcast <8 x i1> [[CMP]] to i8
+; CHECK-NEXT: ret i8 [[MASK_AS_INT]]
+;
+ %cmp = fcmp fast olt <8 x float> %a, %b
+ %mask.as.int = bitcast <8 x i1> %cmp to i8
+ %not = xor i8 %mask.as.int, 255
+ ret i8 %not
+}
More information about the llvm-commits
mailing list