[llvm] c8fb277 - [ValueTracking] Add tests for known bits after common BMI pattern (blsmsk/blsi); NFC

Noah Goldstein via llvm-commits llvm-commits at lists.llvm.org
Sat Feb 18 11:31:32 PST 2023


Author: Noah Goldstein
Date: 2023-02-18T13:31:12-06:00
New Revision: c8fb2775cee0e80b07ba99d8f4986c170ff6e2be

URL: https://github.com/llvm/llvm-project/commit/c8fb2775cee0e80b07ba99d8f4986c170ff6e2be
DIFF: https://github.com/llvm/llvm-project/commit/c8fb2775cee0e80b07ba99d8f4986c170ff6e2be.diff

LOG: [ValueTracking] Add tests for known bits after common BMI pattern (blsmsk/blsi); NFC

Differential Revision: https://reviews.llvm.org/D142270

Added: 
    llvm/test/Analysis/ValueTracking/knownbits-bmi-pattern.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Analysis/ValueTracking/knownbits-bmi-pattern.ll b/llvm/test/Analysis/ValueTracking/knownbits-bmi-pattern.ll
new file mode 100644
index 0000000000000..aa192c38ed747
--- /dev/null
+++ b/llvm/test/Analysis/ValueTracking/knownbits-bmi-pattern.ll
@@ -0,0 +1,886 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -passes=instcombine < %s | FileCheck %s
+
+declare void @llvm.assume(i1)
+
+define i1 @blsmsk_eq_is_false(i32 %x) {
+; CHECK-LABEL: @blsmsk_eq_is_false(
+; CHECK-NEXT:    ret i1 false
+;
+  %x1 = or i32 %x, 10
+  %x2 = sub i32 %x1, 1
+  %x3 = xor i32 %x1, %x2
+  %z = icmp eq i32 %x3, 8
+  ret i1 %z
+}
+
+define <2 x i1> @blsmsk_ne_is_true_vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsmsk_ne_is_true_vec(
+; CHECK-NEXT:    ret <2 x i1> <i1 true, i1 true>
+;
+  %x1 = or <2 x i32> %x, <i32 10, i32 10>
+  %x2 = sub <2 x i32> %x1, <i32 1, i32 1>
+  %x3 = xor <2 x i32> %x2, %x1
+  %z = icmp ne <2 x i32> %x3, <i32 8, i32 8>
+  ret <2 x i1> %z
+}
+
+define <2 x i1> @blsmsk_ne_is_true_
diff _vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsmsk_ne_is_true_
diff _vec(
+; CHECK-NEXT:    [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 10, i32 130>
+; CHECK-NEXT:    [[X2:%.*]] = add nsw <2 x i32> [[X1]], <i32 -1, i32 -1>
+; CHECK-NEXT:    [[X3:%.*]] = xor <2 x i32> [[X2]], [[X1]]
+; CHECK-NEXT:    [[Z:%.*]] = icmp ne <2 x i32> [[X3]], <i32 8, i32 8>
+; CHECK-NEXT:    ret <2 x i1> [[Z]]
+;
+  %x1 = or <2 x i32> %x, <i32 10, i32 130>
+  %x2 = sub <2 x i32> %x1, <i32 1, i32 1>
+  %x3 = xor <2 x i32> %x2, %x1
+  %z = icmp ne <2 x i32> %x3, <i32 8, i32 8>
+  ret <2 x i1> %z
+}
+
+define i1 @blsmsk_ge_is_false(i32 %x) {
+; CHECK-LABEL: @blsmsk_ge_is_false(
+; CHECK-NEXT:    [[X1:%.*]] = or i32 [[X:%.*]], 10
+; CHECK-NEXT:    [[X2:%.*]] = add nsw i32 [[X1]], -1
+; CHECK-NEXT:    [[X3:%.*]] = xor i32 [[X1]], [[X2]]
+; CHECK-NEXT:    [[Z:%.*]] = icmp ugt i32 [[X3]], 7
+; CHECK-NEXT:    ret i1 [[Z]]
+;
+  %x1 = or i32 %x, 10
+  %x2 = sub i32 %x1, 1
+  %x3 = xor i32 %x1, %x2
+  %z = icmp uge i32 %x3, 8
+  ret i1 %z
+}
+
+define <2 x i1> @blsmsk_gt_is_false_vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsmsk_gt_is_false_vec(
+; CHECK-NEXT:    [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 10, i32 10>
+; CHECK-NEXT:    [[X2:%.*]] = add nsw <2 x i32> [[X1]], <i32 -1, i32 -1>
+; CHECK-NEXT:    [[X3:%.*]] = xor <2 x i32> [[X2]], [[X1]]
+; CHECK-NEXT:    [[Z:%.*]] = icmp ugt <2 x i32> [[X3]], <i32 8, i32 8>
+; CHECK-NEXT:    ret <2 x i1> [[Z]]
+;
+  %x1 = or <2 x i32> %x, <i32 10, i32 10>
+  %x2 = sub <2 x i32> %x1, <i32 1, i32 1>
+  %x3 = xor <2 x i32> %x2, %x1
+  %z = icmp ugt <2 x i32> %x3, <i32 8, i32 8>
+  ret <2 x i1> %z
+}
+
+define i1 @blsmsk_signed_is_false(i32 %x) {
+; CHECK-LABEL: @blsmsk_signed_is_false(
+; CHECK-NEXT:    [[X1:%.*]] = or i32 [[X:%.*]], 10
+; CHECK-NEXT:    [[X2:%.*]] = add nsw i32 [[X1]], -1
+; CHECK-NEXT:    [[X3:%.*]] = xor i32 [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = icmp slt i32 [[X3]], 0
+; CHECK-NEXT:    ret i1 [[Z]]
+;
+  %x1 = or i32 %x, 10
+  %x2 = sub i32 %x1, 1
+  %x3 = xor i32 %x1, %x2
+  %z = icmp slt i32 %x3, 0
+  ret i1 %z
+}
+
+define i32 @blsmsk_add_eval(i32 %x) {
+; CHECK-LABEL: @blsmsk_add_eval(
+; CHECK-NEXT:    [[X1:%.*]] = or i32 [[X:%.*]], 9
+; CHECK-NEXT:    [[X2:%.*]] = add nsw i32 [[X1]], -1
+; CHECK-NEXT:    [[X3:%.*]] = xor i32 [[X2]], [[X1]]
+; CHECK-NEXT:    [[Z:%.*]] = add i32 [[X3]], 32
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %x1 = or i32 %x, 9
+  %x2 = sub i32 %x1, 1
+  %x3 = xor i32 %x2, %x1
+  %z = add i32 %x3, 32
+  ret i32 %z
+}
+
+define <2 x i32> @blsmsk_add_eval_vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsmsk_add_eval_vec(
+; CHECK-NEXT:    [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 9, i32 9>
+; CHECK-NEXT:    [[X2:%.*]] = add nsw <2 x i32> [[X1]], <i32 -1, i32 -1>
+; CHECK-NEXT:    [[X3:%.*]] = xor <2 x i32> [[X2]], [[X1]]
+; CHECK-NEXT:    [[Z:%.*]] = add <2 x i32> [[X3]], <i32 32, i32 32>
+; CHECK-NEXT:    ret <2 x i32> [[Z]]
+;
+  %x1 = or <2 x i32> %x, <i32 9, i32 9>
+  %x2 = add <2 x i32> %x1, <i32 -1, i32 -1>
+  %x3 = xor <2 x i32> %x2, %x1
+  %z = add <2 x i32> %x3, <i32 32, i32 32>
+  ret <2 x i32> %z
+}
+
+define i32 @blsmsk_sub_eval(i32 %x) {
+; CHECK-LABEL: @blsmsk_sub_eval(
+; CHECK-NEXT:    [[X1:%.*]] = or i32 [[X:%.*]], 9
+; CHECK-NEXT:    [[X2:%.*]] = add nsw i32 [[X1]], -1
+; CHECK-NEXT:    [[X3:%.*]] = xor i32 [[X1]], [[X2]]
+; CHECK-NEXT:    [[Z:%.*]] = add i32 [[X3]], -32
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %x1 = or i32 %x, 9
+  %x2 = sub i32 %x1, 1
+  %x3 = xor i32 %x1, %x2
+  %z = sub i32 %x3, 32
+  ret i32 %z
+}
+
+define i32 @blsmsk_or_eval(i32 %x) {
+; CHECK-LABEL: @blsmsk_or_eval(
+; CHECK-NEXT:    [[X1:%.*]] = or i32 [[X:%.*]], 129
+; CHECK-NEXT:    [[X2:%.*]] = add nsw i32 [[X1]], -1
+; CHECK-NEXT:    [[X3:%.*]] = xor i32 [[X2]], [[X1]]
+; CHECK-NEXT:    [[Z:%.*]] = or i32 [[X3]], 32
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %x1 = or i32 %x, 129
+  %x2 = sub i32 %x1, 1
+  %x3 = xor i32 %x2, %x1
+  %z = or i32 %x3, 32
+  ret i32 %z
+}
+
+define <2 x i32> @blsmsk_or_eval_vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsmsk_or_eval_vec(
+; CHECK-NEXT:    [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 9, i32 9>
+; CHECK-NEXT:    [[X2:%.*]] = add nsw <2 x i32> [[X1]], <i32 -1, i32 -1>
+; CHECK-NEXT:    [[X3:%.*]] = xor <2 x i32> [[X2]], [[X1]]
+; CHECK-NEXT:    [[Z:%.*]] = or <2 x i32> [[X3]], <i32 32, i32 32>
+; CHECK-NEXT:    ret <2 x i32> [[Z]]
+;
+  %x1 = or <2 x i32> %x, <i32 9, i32 9>
+  %x2 = add <2 x i32> %x1, <i32 -1, i32 -1>
+  %x3 = xor <2 x i32> %x2, %x1
+  %z = or <2 x i32> %x3, <i32 32, i32 32>
+  ret <2 x i32> %z
+}
+
+define i32 @blsmsk_xor_eval(i32 %x) {
+; CHECK-LABEL: @blsmsk_xor_eval(
+; CHECK-NEXT:    [[X1:%.*]] = or i32 [[X:%.*]], 255
+; CHECK-NEXT:    [[X2:%.*]] = add nsw i32 [[X1]], -1
+; CHECK-NEXT:    [[X3:%.*]] = xor i32 [[X1]], [[X2]]
+; CHECK-NEXT:    [[Z1:%.*]] = or i32 [[X3]], 32
+; CHECK-NEXT:    ret i32 [[Z1]]
+;
+  %x1 = or i32 %x, 255
+  %x2 = sub i32 %x1, 1
+  %x3 = xor i32 %x1, %x2
+  %z = xor i32 %x3, 32
+  ret i32 %z
+}
+
+define i32 @blsmsk_and_eval(i32 %x) {
+; CHECK-LABEL: @blsmsk_and_eval(
+; CHECK-NEXT:    ret i32 0
+;
+  %x1 = or i32 %x, 34
+  %x2 = sub i32 %x1, 1
+  %x3 = xor i32 %x2, %x1
+  %z = and i32 %x3, 32
+  ret i32 %z
+}
+
+define <2 x i32> @blsmsk_and_eval_vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsmsk_and_eval_vec(
+; CHECK-NEXT:    ret <2 x i32> zeroinitializer
+;
+  %x1 = or <2 x i32> %x, <i32 34, i32 34>
+  %x2 = add <2 x i32> %x1, <i32 -1, i32 -1>
+  %x3 = xor <2 x i32> %x2, %x1
+  %z = and <2 x i32> %x3, <i32 32, i32 32>
+  ret <2 x i32> %z
+}
+
+define i32 @blsmsk_and_eval2(i32 %x) {
+; CHECK-LABEL: @blsmsk_and_eval2(
+; CHECK-NEXT:    [[X1:%.*]] = or i32 [[X:%.*]], 10
+; CHECK-NEXT:    [[X2:%.*]] = add i32 [[X1]], 63
+; CHECK-NEXT:    [[X3:%.*]] = xor i32 [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = and i32 [[X3]], 32
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %x1 = or i32 %x, 10
+  %x2 = sub i32 %x1, 1
+  %x3 = xor i32 %x1, %x2
+  %z = and i32 %x3, 32
+  ret i32 %z
+}
+
+define <2 x i32> @blsmsk_and_eval3_vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsmsk_and_eval3_vec(
+; CHECK-NEXT:    [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 34, i32 34>
+; CHECK-NEXT:    [[X2:%.*]] = add <2 x i32> [[X1]], <i32 63, i32 63>
+; CHECK-NEXT:    [[X3:%.*]] = xor <2 x i32> [[X2]], [[X1]]
+; CHECK-NEXT:    [[Z:%.*]] = and <2 x i32> [[X3]], <i32 16, i32 32>
+; CHECK-NEXT:    ret <2 x i32> [[Z]]
+;
+  %x1 = or <2 x i32> %x, <i32 34, i32 34>
+  %x2 = add <2 x i32> %x1, <i32 -1, i32 -1>
+  %x3 = xor <2 x i32> %x2, %x1
+  %z = and <2 x i32> %x3, <i32 16, i32 32>
+  ret <2 x i32> %z
+}
+
+define i1 @blsmsk_eq_is_false_assume(i32 %x) {
+; CHECK-LABEL: @blsmsk_eq_is_false_assume(
+; CHECK-NEXT:    [[LB:%.*]] = and i32 [[X:%.*]], 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[LB]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT:    [[X2:%.*]] = add nsw i32 [[X]], -1
+; CHECK-NEXT:    [[X3:%.*]] = xor i32 [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = icmp eq i32 [[X3]], 8
+; CHECK-NEXT:    ret i1 [[Z]]
+;
+  %lb = and i32 %x, 4
+  %cmp = icmp ne i32 %lb, 0
+  call void @llvm.assume(i1 %cmp)
+  %x2 = add i32 %x, -1
+  %x3 = xor i32 %x2, %x
+  %z = icmp eq i32 %x3, 8
+  ret i1 %z
+}
+
+define i1 @blsmsk_gt_is_false_assume(i32 %x) {
+; CHECK-LABEL: @blsmsk_gt_is_false_assume(
+; CHECK-NEXT:    [[LB:%.*]] = and i32 [[X:%.*]], 2
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[LB]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT:    [[X2:%.*]] = add nsw i32 [[X]], -1
+; CHECK-NEXT:    [[X3:%.*]] = xor i32 [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = icmp ugt i32 [[X3]], 8
+; CHECK-NEXT:    ret i1 [[Z]]
+;
+  %lb = and i32 %x, 2
+  %cmp = icmp ne i32 %lb, 0
+  call void @llvm.assume(i1 %cmp)
+  %x2 = sub i32 %x, 1
+  %x3 = xor i32 %x2, %x
+  %z = icmp ugt i32 %x3, 8
+  ret i1 %z
+}
+
+define i32 @blsmsk_add_eval_assume(i32 %x) {
+; CHECK-LABEL: @blsmsk_add_eval_assume(
+; CHECK-NEXT:    [[LB:%.*]] = and i32 [[X:%.*]], 1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[LB]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT:    [[X2:%.*]] = add nsw i32 [[X]], -1
+; CHECK-NEXT:    [[X3:%.*]] = xor i32 [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = add i32 [[X3]], 32
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %lb = and i32 %x, 1
+  %cmp = icmp ne i32 %lb, 0
+  call void @llvm.assume(i1 %cmp)
+  %x2 = sub i32 %x, 1
+  %x3 = xor i32 %x2, %x
+  %z = add i32 %x3, 32
+  ret i32 %z
+}
+
+define <2 x i32> @blsmsk_add_eval_assume_vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsmsk_add_eval_assume_vec(
+; CHECK-NEXT:    [[CMP:%.*]] = trunc <2 x i32> [[X:%.*]] to <2 x i1>
+; CHECK-NEXT:    [[CMP0:%.*]] = extractelement <2 x i1> [[CMP]], i64 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP0]])
+; CHECK-NEXT:    [[CMP1:%.*]] = extractelement <2 x i1> [[CMP]], i64 1
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP1]])
+; CHECK-NEXT:    [[X2:%.*]] = add <2 x i32> [[X]], <i32 -1, i32 -1>
+; CHECK-NEXT:    [[X3:%.*]] = xor <2 x i32> [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = add <2 x i32> [[X3]], <i32 32, i32 32>
+; CHECK-NEXT:    ret <2 x i32> [[Z]]
+;
+  %lb = and <2 x i32> %x, <i32 1, i32 1>
+  %cmp = icmp ne <2 x i32> %lb, <i32 0, i32 0>
+  %cmp0 = extractelement <2 x i1> %cmp, i32 0
+  call void @llvm.assume(i1 %cmp0)
+  %cmp1 = extractelement <2 x i1> %cmp, i32 1
+  call void @llvm.assume(i1 %cmp1)
+  %x2 = sub <2 x i32> %x, <i32 1, i32 1>
+  %x3 = xor <2 x i32> %x2, %x
+  %z = add <2 x i32> %x3, <i32 32, i32 32>
+  ret <2 x i32> %z
+}
+
+define i32 @blsmsk_sub_eval_assume(i32 %x) {
+; CHECK-LABEL: @blsmsk_sub_eval_assume(
+; CHECK-NEXT:    [[LB:%.*]] = and i32 [[X:%.*]], 1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[LB]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT:    [[X2:%.*]] = add nsw i32 [[X]], -1
+; CHECK-NEXT:    [[X3:%.*]] = xor i32 [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = add i32 [[X3]], -32
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %lb = and i32 %x, 1
+  %cmp = icmp ne i32 %lb, 0
+  call void @llvm.assume(i1 %cmp)
+  %x2 = sub i32 %x, 1
+  %x3 = xor i32 %x, %x2
+  %z = sub i32 %x3, 32
+  ret i32 %z
+}
+
+define i32 @blsmsk_or_eval_assume(i32 %x) {
+; CHECK-LABEL: @blsmsk_or_eval_assume(
+; CHECK-NEXT:    [[LB:%.*]] = and i32 [[X:%.*]], 1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[LB]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT:    [[X2:%.*]] = add nsw i32 [[X]], -1
+; CHECK-NEXT:    [[X3:%.*]] = xor i32 [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = or i32 [[X3]], 32
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %lb = and i32 %x, 1
+  %cmp = icmp ne i32 %lb, 0
+  call void @llvm.assume(i1 %cmp)
+  %x2 = sub i32 %x, 1
+  %x3 = xor i32 %x2, %x
+  %z = or i32 %x3, 32
+  ret i32 %z
+}
+
+define i32 @blsmsk_and_eval_assume(i32 %x) {
+; CHECK-LABEL: @blsmsk_and_eval_assume(
+; CHECK-NEXT:    [[LB:%.*]] = and i32 [[X:%.*]], 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[LB]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT:    [[X2:%.*]] = add i32 [[X]], 63
+; CHECK-NEXT:    [[X3:%.*]] = xor i32 [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = and i32 [[X3]], 32
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %lb = and i32 %x, 4
+  %cmp = icmp ne i32 %lb, 0
+  call void @llvm.assume(i1 %cmp)
+  %x2 = sub i32 %x, 1
+  %x3 = xor i32 %x2, %x
+  %z = and i32 %x3, 32
+  ret i32 %z
+}
+
+
+define <2 x i1> @blsi_eq_is_false_vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsi_eq_is_false_vec(
+; CHECK-NEXT:    ret <2 x i1> zeroinitializer
+;
+  %x1 = or <2 x i32> %x, <i32 10, i32 10>
+  %x2 = sub <2 x i32> <i32 0, i32 0>, %x1
+  %x3 = and <2 x i32> %x1, %x2
+  %z = icmp eq <2 x i32> %x3, <i32 8, i32 8>
+  ret <2 x i1> %z
+}
+
+define i1 @blsi_ne_is_true(i32 %x) {
+; CHECK-LABEL: @blsi_ne_is_true(
+; CHECK-NEXT:    ret i1 true
+;
+  %x1 = or i32 %x, 10
+  %x2 = sub i32 0, %x1
+  %x3 = and i32 %x1, %x2
+  %z = icmp ne i32 %x3, 8
+  ret i1 %z
+}
+
+define <2 x i1> @blsi_ge_is_false_vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsi_ge_is_false_vec(
+; CHECK-NEXT:    [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 10, i32 10>
+; CHECK-NEXT:    [[X2:%.*]] = sub nsw <2 x i32> zeroinitializer, [[X1]]
+; CHECK-NEXT:    [[X3:%.*]] = and <2 x i32> [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = icmp ugt <2 x i32> [[X3]], <i32 7, i32 7>
+; CHECK-NEXT:    ret <2 x i1> [[Z]]
+;
+  %x1 = or <2 x i32> %x, <i32 10, i32 10>
+  %x2 = sub <2 x i32> <i32 0, i32 0>, %x1
+  %x3 = and <2 x i32> %x1, %x2
+  %z = icmp uge <2 x i32> %x3, <i32 8, i32 8>
+  ret <2 x i1> %z
+}
+
+define <2 x i1> @blsi_ge_is_false_
diff _vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsi_ge_is_false_
diff _vec(
+; CHECK-NEXT:    [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 10, i32 11>
+; CHECK-NEXT:    [[X2:%.*]] = sub nsw <2 x i32> zeroinitializer, [[X1]]
+; CHECK-NEXT:    [[X3:%.*]] = and <2 x i32> [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = icmp ugt <2 x i32> [[X3]], <i32 7, i32 7>
+; CHECK-NEXT:    ret <2 x i1> [[Z]]
+;
+  %x1 = or <2 x i32> %x, <i32 10, i32 11>
+  %x2 = sub <2 x i32> <i32 0, i32 0>, %x1
+  %x3 = and <2 x i32> %x1, %x2
+  %z = icmp uge <2 x i32> %x3, <i32 8, i32 8>
+  ret <2 x i1> %z
+}
+
+define i1 @blsi_gt_is_false(i32 %x) {
+; CHECK-LABEL: @blsi_gt_is_false(
+; CHECK-NEXT:    [[X1:%.*]] = or i32 [[X:%.*]], 10
+; CHECK-NEXT:    [[X2:%.*]] = sub nsw i32 0, [[X1]]
+; CHECK-NEXT:    [[X3:%.*]] = and i32 [[X1]], [[X2]]
+; CHECK-NEXT:    [[Z:%.*]] = icmp ugt i32 [[X3]], 8
+; CHECK-NEXT:    ret i1 [[Z]]
+;
+  %x1 = or i32 %x, 10
+  %x2 = sub i32 0, %x1
+  %x3 = and i32 %x2, %x1
+  %z = icmp ugt i32 %x3, 8
+  ret i1 %z
+}
+
+
+define i32 @blsi_add_eval(i32 %x) {
+; CHECK-LABEL: @blsi_add_eval(
+; CHECK-NEXT:    [[X1:%.*]] = or i32 [[X:%.*]], 9
+; CHECK-NEXT:    [[X2:%.*]] = sub nsw i32 0, [[X1]]
+; CHECK-NEXT:    [[X3:%.*]] = and i32 [[X1]], [[X2]]
+; CHECK-NEXT:    [[Z:%.*]] = add i32 [[X3]], 32
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %x1 = or i32 %x, 9
+  %x2 = sub i32 0, %x1
+  %x3 = and i32 %x2, %x1
+  %z = add i32 %x3, 32
+  ret i32 %z
+}
+
+define i32 @blsi_sub_eval(i32 %x) {
+; CHECK-LABEL: @blsi_sub_eval(
+; CHECK-NEXT:    [[X1:%.*]] = or i32 [[X:%.*]], 33
+; CHECK-NEXT:    [[X2:%.*]] = sub nsw i32 0, [[X1]]
+; CHECK-NEXT:    [[X3:%.*]] = and i32 [[X1]], [[X2]]
+; CHECK-NEXT:    [[Z:%.*]] = add i32 [[X3]], -32
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %x1 = or i32 %x, 33
+  %x2 = sub i32 0, %x1
+  %x3 = and i32 %x2, %x1
+  %z = sub i32 %x3, 32
+  ret i32 %z
+}
+
+define <2 x i32> @blsi_sub_eval_vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsi_sub_eval_vec(
+; CHECK-NEXT:    [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 33, i32 33>
+; CHECK-NEXT:    [[X2:%.*]] = sub nsw <2 x i32> zeroinitializer, [[X1]]
+; CHECK-NEXT:    [[X3:%.*]] = and <2 x i32> [[X1]], [[X2]]
+; CHECK-NEXT:    [[Z:%.*]] = add <2 x i32> [[X3]], <i32 -32, i32 -32>
+; CHECK-NEXT:    ret <2 x i32> [[Z]]
+;
+  %x1 = or <2 x i32> %x, <i32 33, i32 33>
+  %x2 = sub <2 x i32> <i32 0, i32 0>, %x1
+  %x3 = and <2 x i32> %x1, %x2
+  %z = sub <2 x i32> %x3, <i32 32, i32 32>
+  ret <2 x i32> %z
+}
+
+define i32 @blsi_or_eval(i32 %x) {
+; CHECK-LABEL: @blsi_or_eval(
+; CHECK-NEXT:    [[X1:%.*]] = or i32 [[X:%.*]], 129
+; CHECK-NEXT:    [[X2:%.*]] = sub nsw i32 0, [[X1]]
+; CHECK-NEXT:    [[X3:%.*]] = and i32 [[X1]], [[X2]]
+; CHECK-NEXT:    [[Z:%.*]] = or i32 [[X3]], 32
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %x1 = or i32 %x, 129
+  %x2 = sub i32 0, %x1
+  %x3 = and i32 %x2, %x1
+  %z = or i32 %x3, 32
+  ret i32 %z
+}
+
+define <2 x i32> @blsi_xor_eval_vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsi_xor_eval_vec(
+; CHECK-NEXT:    [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 33, i32 33>
+; CHECK-NEXT:    [[X2:%.*]] = sub nsw <2 x i32> zeroinitializer, [[X1]]
+; CHECK-NEXT:    [[X3:%.*]] = and <2 x i32> [[X1]], [[X2]]
+; CHECK-NEXT:    [[Z1:%.*]] = or <2 x i32> [[X3]], <i32 32, i32 32>
+; CHECK-NEXT:    ret <2 x i32> [[Z1]]
+;
+  %x1 = or <2 x i32> %x, <i32 33, i32 33>
+  %x2 = sub <2 x i32> <i32 0, i32 0>, %x1
+  %x3 = and <2 x i32> %x2, %x1
+  %z = xor <2 x i32> %x3, <i32 32, i32 32>
+  ret <2 x i32> %z
+}
+
+define i32 @blsi_and_eval(i32 %x) {
+; CHECK-LABEL: @blsi_and_eval(
+; CHECK-NEXT:    ret i32 0
+;
+  %x1 = or i32 %x, 34
+  %x2 = sub i32 0, %x1
+  %x3 = and i32 %x2, %x1
+  %z = and i32 %x3, 32
+  ret i32 %z
+}
+
+define <2 x i32> @blsi_and_eval2_vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsi_and_eval2_vec(
+; CHECK-NEXT:    [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 30, i32 30>
+; CHECK-NEXT:    [[X2:%.*]] = sub nsw <2 x i32> zeroinitializer, [[X1]]
+; CHECK-NEXT:    [[X3:%.*]] = and <2 x i32> [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = and <2 x i32> [[X3]], <i32 32, i32 32>
+; CHECK-NEXT:    ret <2 x i32> [[Z]]
+;
+  %x1 = or <2 x i32> %x, <i32 30, i32 30>
+  %x2 = sub <2 x i32> <i32 0, i32 0>, %x1
+  %x3 = and <2 x i32> %x1, %x2
+  %z = and <2 x i32> %x3, <i32 32, i32 32>
+  ret <2 x i32> %z
+}
+
+define i32 @blsi_and_eval3(i32 %x) {
+; CHECK-LABEL: @blsi_and_eval3(
+; CHECK-NEXT:    [[X1:%.*]] = or i32 [[X:%.*]], 34
+; CHECK-NEXT:    [[X2:%.*]] = sub nsw i32 0, [[X1]]
+; CHECK-NEXT:    [[X3:%.*]] = and i32 [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = and i32 [[X3]], 208
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %x1 = or i32 %x, 34
+  %x2 = sub i32 0, %x1
+  %x3 = and i32 %x2, %x1
+  %z = and i32 %x3, 240
+  ret i32 %z
+}
+
+define <2 x i1> @blsi_eq_is_false_assume_vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsi_eq_is_false_assume_vec(
+; CHECK-NEXT:    [[LB:%.*]] = and <2 x i32> [[X:%.*]], <i32 2, i32 2>
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i32> [[LB]], zeroinitializer
+; CHECK-NEXT:    [[CMP0:%.*]] = extractelement <2 x i1> [[CMP]], i64 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP0]])
+; CHECK-NEXT:    [[CMP1:%.*]] = extractelement <2 x i1> [[CMP]], i64 1
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP1]])
+; CHECK-NEXT:    [[X2:%.*]] = sub <2 x i32> zeroinitializer, [[X]]
+; CHECK-NEXT:    [[X3:%.*]] = and <2 x i32> [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = icmp eq <2 x i32> [[X3]], <i32 8, i32 8>
+; CHECK-NEXT:    ret <2 x i1> [[Z]]
+;
+  %lb = and <2 x i32> %x, <i32 2, i32 2>
+  %cmp = icmp ne <2 x i32> %lb, <i32 0, i32 0>
+  %cmp0 = extractelement <2 x i1> %cmp, i32 0
+  call void @llvm.assume(i1 %cmp0)
+  %cmp1 = extractelement <2 x i1> %cmp, i32 1
+  call void @llvm.assume(i1 %cmp1)
+  %x2 = sub <2 x i32> <i32 0, i32 0>, %x
+  %x3 = and <2 x i32> %x2, %x
+  %z = icmp eq <2 x i32> %x3, <i32 8, i32 8>
+  ret <2 x i1> %z
+}
+
+define i1 @blsi_ne_is_true_assume(i32 %x) {
+; CHECK-LABEL: @blsi_ne_is_true_assume(
+; CHECK-NEXT:    [[LB:%.*]] = and i32 [[X:%.*]], 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[LB]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT:    [[X2:%.*]] = sub nsw i32 0, [[X]]
+; CHECK-NEXT:    [[X3:%.*]] = and i32 [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = icmp ne i32 [[X3]], 8
+; CHECK-NEXT:    ret i1 [[Z]]
+;
+  %lb = and i32 %x, 4
+  %cmp = icmp ne i32 %lb, 0
+  call void @llvm.assume(i1 %cmp)
+  %x2 = sub i32 0, %x
+  %x3 = and i32 %x2, %x
+  %z = icmp ne i32 %x3, 8
+  ret i1 %z
+}
+
+define i1 @blsi_ge_is_false_assume(i32 %x) {
+; CHECK-LABEL: @blsi_ge_is_false_assume(
+; CHECK-NEXT:    [[LB:%.*]] = and i32 [[X:%.*]], 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[LB]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT:    [[X2:%.*]] = sub nsw i32 0, [[X]]
+; CHECK-NEXT:    [[X3:%.*]] = and i32 [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = icmp ugt i32 [[X3]], 7
+; CHECK-NEXT:    ret i1 [[Z]]
+;
+  %lb = and i32 %x, 4
+  %cmp = icmp ne i32 %lb, 0
+  call void @llvm.assume(i1 %cmp)
+  %x2 = sub i32 0, %x
+  %x3 = and i32 %x2, %x
+  %z = icmp uge i32 %x3, 8
+  ret i1 %z
+}
+
+define <2 x i1> @blsi_cmp_eq_
diff _bits_vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsi_cmp_eq_
diff _bits_vec(
+; CHECK-NEXT:    [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 1, i32 2>
+; CHECK-NEXT:    [[X2:%.*]] = sub <2 x i32> zeroinitializer, [[X1]]
+; CHECK-NEXT:    [[X3:%.*]] = and <2 x i32> [[X1]], [[X2]]
+; CHECK-NEXT:    [[Z:%.*]] = icmp eq <2 x i32> [[X3]], <i32 32, i32 32>
+; CHECK-NEXT:    ret <2 x i1> [[Z]]
+;
+  %x1 = or <2 x i32> %x, <i32 1, i32 2>
+  %x2 = sub <2 x i32> <i32 0, i32 0>, %x1
+  %x3 = and <2 x i32> %x1, %x2
+  %z = icmp eq <2 x i32> %x3, <i32 32, i32 32>
+  ret <2 x i1> %z
+}
+
+define i32 @blsi_xor_eval_assume(i32 %x) {
+; CHECK-LABEL: @blsi_xor_eval_assume(
+; CHECK-NEXT:    [[LB:%.*]] = and i32 [[X:%.*]], 1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[LB]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT:    [[X2:%.*]] = sub nsw i32 0, [[X]]
+; CHECK-NEXT:    [[X3:%.*]] = and i32 [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = xor i32 [[X3]], 32
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %lb = and i32 %x, 1
+  %cmp = icmp ne i32 %lb, 0
+  call void @llvm.assume(i1 %cmp)
+  %x2 = sub i32 0, %x
+  %x3 = and i32 %x2, %x
+  %z = xor i32 %x3, 32
+  ret i32 %z
+}
+
+define i32 @blsi_and_eval_assume(i32 %x) {
+; CHECK-LABEL: @blsi_and_eval_assume(
+; CHECK-NEXT:    [[LB:%.*]] = and i32 [[X:%.*]], 8
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[LB]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT:    [[X2:%.*]] = sub nsw i32 0, [[X]]
+; CHECK-NEXT:    [[X3:%.*]] = and i32 [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = and i32 [[X3]], 32
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %lb = and i32 %x, 8
+  %cmp = icmp ne i32 %lb, 0
+  call void @llvm.assume(i1 %cmp)
+  %x2 = sub i32 0, %x
+  %x3 = and i32 %x2, %x
+  %z = and i32 %x3, 32
+  ret i32 %z
+}
+
+define <2 x i1> @blsmsk_ne_no_proof_vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsmsk_ne_no_proof_vec(
+; CHECK-NEXT:    [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 10, i32 10>
+; CHECK-NEXT:    [[X2:%.*]] = add nsw <2 x i32> [[X1]], <i32 -2, i32 -3>
+; CHECK-NEXT:    [[X3:%.*]] = xor <2 x i32> [[X2]], [[X1]]
+; CHECK-NEXT:    [[Z:%.*]] = icmp ne <2 x i32> [[X3]], <i32 8, i32 8>
+; CHECK-NEXT:    ret <2 x i1> [[Z]]
+;
+  %x1 = or <2 x i32> %x, <i32 10, i32 10>
+  %x2 = sub <2 x i32> %x1, <i32 2, i32 3>
+  %x3 = xor <2 x i32> %x2, %x1
+  %z = icmp ne <2 x i32> %x3, <i32 8, i32 8>
+  ret <2 x i1> %z
+}
+
+define <2 x i32> @blsmsk_add_noeval_vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsmsk_add_noeval_vec(
+; CHECK-NEXT:    [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 9, i32 9>
+; CHECK-NEXT:    [[X2:%.*]] = add <2 x i32> [[X1]], <i32 1, i32 -2>
+; CHECK-NEXT:    [[X3:%.*]] = xor <2 x i32> [[X2]], [[X1]]
+; CHECK-NEXT:    [[Z:%.*]] = add <2 x i32> [[X3]], <i32 32, i32 32>
+; CHECK-NEXT:    ret <2 x i32> [[Z]]
+;
+  %x1 = or <2 x i32> %x, <i32 9, i32 9>
+  %x2 = add <2 x i32> %x1, <i32 1, i32 -2>
+  %x3 = xor <2 x i32> %x2, %x1
+  %z = add <2 x i32> %x3, <i32 32, i32 32>
+  ret <2 x i32> %z
+}
+
+define i1 @blsmsk_eq_no_proof(i32 %x) {
+; CHECK-LABEL: @blsmsk_eq_no_proof(
+; CHECK-NEXT:    ret i1 false
+;
+  %x1 = or i32 %x, 10
+  %x2 = add i32 %x1, -2
+  %x3 = xor i32 %x2, %x1
+  %z = icmp eq i32 %x3, 8
+  ret i1 %z
+}
+
+
+define i32 @blsmsk_add_no_eval(i32 %x) {
+; CHECK-LABEL: @blsmsk_add_no_eval(
+; CHECK-NEXT:    [[X1:%.*]] = or i32 [[X:%.*]], 9
+; CHECK-NEXT:    [[X2:%.*]] = add nsw i32 [[X1]], -3
+; CHECK-NEXT:    [[X3:%.*]] = xor i32 [[X1]], [[X2]]
+; CHECK-NEXT:    [[Z:%.*]] = add i32 [[X3]], 32
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %x1 = or i32 %x, 9
+  %x2 = sub i32 %x1, 3
+  %x3 = xor i32 %x1, %x2
+  %z = add i32 %x3, 32
+  ret i32 %z
+}
+
+define i32 @blsmsk_add_no_eval2(i32 %x) {
+; CHECK-LABEL: @blsmsk_add_no_eval2(
+; CHECK-NEXT:    [[X1:%.*]] = or i32 [[X:%.*]], 256
+; CHECK-NEXT:    [[X2:%.*]] = add nsw i32 [[X1]], -1
+; CHECK-NEXT:    [[X3:%.*]] = xor i32 [[X1]], [[X2]]
+; CHECK-NEXT:    [[Z:%.*]] = add i32 [[X3]], 32
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %x1 = or i32 %x, 256
+  %x2 = sub i32 %x1, 1
+  %x3 = xor i32 %x1, %x2
+  %z = add i32 %x3, 32
+  ret i32 %z
+}
+
+define <2 x i32> @blsmsk_xor_no_eval_vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsmsk_xor_no_eval_vec(
+; CHECK-NEXT:    [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 34, i32 34>
+; CHECK-NEXT:    [[X2:%.*]] = add <2 x i32> [[X1]], <i32 -9, i32 1>
+; CHECK-NEXT:    [[X3:%.*]] = xor <2 x i32> [[X2]], [[X1]]
+; CHECK-NEXT:    [[Z:%.*]] = xor <2 x i32> [[X3]], <i32 32, i32 32>
+; CHECK-NEXT:    ret <2 x i32> [[Z]]
+;
+  %x1 = or <2 x i32> %x, <i32 34, i32 34>
+  %x2 = add <2 x i32> %x1, <i32 -9, i32 1>
+  %x3 = xor <2 x i32> %x2, %x1
+  %z = xor <2 x i32> %x3, <i32 32, i32 32>
+  ret <2 x i32> %z
+}
+
+define i32 @blsmsk_xor_no_eval_assume(i32 %x) {
+; CHECK-LABEL: @blsmsk_xor_no_eval_assume(
+; CHECK-NEXT:    [[LB:%.*]] = and i32 [[X:%.*]], 1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[LB]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT:    [[X2:%.*]] = add i32 [[X]], -1
+; CHECK-NEXT:    [[X3:%.*]] = xor i32 [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = xor i32 [[X3]], 32
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %lb = and i32 %x, 1
+  %cmp = icmp ne i32 %lb, 1
+  call void @llvm.assume(i1 %cmp)
+  %x2 = sub i32 %x, 1
+  %x3 = xor i32 %x, %x2
+  %z = xor i32 %x3, 32
+  ret i32 %z
+}
+
+define i32 @blsmsk_xor_no_eval_assume2(i32 %x) {
+; CHECK-LABEL: @blsmsk_xor_no_eval_assume2(
+; CHECK-NEXT:    [[LB:%.*]] = and i32 [[X:%.*]], 128
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[LB]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT:    [[X2:%.*]] = add nsw i32 [[X]], -1
+; CHECK-NEXT:    [[X3:%.*]] = xor i32 [[X2]], [[X]]
+; CHECK-NEXT:    [[Z:%.*]] = xor i32 [[X3]], 32
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %lb = and i32 %x, 128
+  %cmp = icmp ne i32 %lb, 0
+  call void @llvm.assume(i1 %cmp)
+  %x2 = sub i32 %x, 1
+  %x3 = xor i32 %x, %x2
+  %z = xor i32 %x3, 32
+  ret i32 %z
+}
+
+define i1 @blsi_ne_no_proof2(i32 %x) {
+; CHECK-LABEL: @blsi_ne_no_proof2(
+; CHECK-NEXT:    [[X1:%.*]] = or i32 [[X:%.*]], 512
+; CHECK-NEXT:    [[X2:%.*]] = sub nsw i32 0, [[X1]]
+; CHECK-NEXT:    [[X3:%.*]] = and i32 [[X1]], [[X2]]
+; CHECK-NEXT:    [[Z:%.*]] = icmp ne i32 [[X3]], 8
+; CHECK-NEXT:    ret i1 [[Z]]
+;
+  %x1 = or i32 %x, 512
+  %x2 = sub i32 0, %x1
+  %x3 = and i32 %x2, %x1
+  %z = icmp ne i32 %x3, 8
+  ret i1 %z
+}
+
+
+define i32 @blsi_or_no_eval(i32 %x) {
+; CHECK-LABEL: @blsi_or_no_eval(
+; CHECK-NEXT:    [[X1:%.*]] = or i32 [[X:%.*]], 129
+; CHECK-NEXT:    [[X2:%.*]] = sub nsw i32 2, [[X1]]
+; CHECK-NEXT:    [[X3:%.*]] = and i32 [[X2]], [[X1]]
+; CHECK-NEXT:    [[Z:%.*]] = or i32 [[X3]], 32
+; CHECK-NEXT:    ret i32 [[Z]]
+;
+  %x1 = or i32 %x, 129
+  %x2 = sub i32 2, %x1
+  %x3 = and i32 %x2, %x1
+  %z = or i32 %x3, 32
+  ret i32 %z
+}
+
+define <2 x i32> @blsi_or_no_partial_eval_vec(<2 x i32> %x) {
+; CHECK-LABEL: @blsi_or_no_partial_eval_vec(
+; CHECK-NEXT:    [[X1:%.*]] = or <2 x i32> [[X:%.*]], <i32 30, i32 30>
+; CHECK-NEXT:    [[X2:%.*]] = sub nsw <2 x i32> <i32 0, i32 1>, [[X1]]
+; CHECK-NEXT:    [[X3:%.*]] = and <2 x i32> [[X1]], [[X2]]
+; CHECK-NEXT:    [[Z:%.*]] = or <2 x i32> [[X3]], <i32 32, i32 32>
+; CHECK-NEXT:    ret <2 x i32> [[Z]]
+;
+  %x1 = or <2 x i32> %x, <i32 30, i32 30>
+  %x2 = sub <2 x i32> <i32 0, i32 1>, %x1
+  %x3 = and <2 x i32> %x1, %x2
+  %z = or <2 x i32> %x3, <i32 32, i32 32>
+  ret <2 x i32> %z
+}
+
+
+;; Test that if we have 
diff erent knowledge about lowbit of X/-X that we select the minimum.
+define i1 @blsi_
diff ering_lowbits(i8 %x) {
+; CHECK-LABEL: @blsi_
diff ering_lowbits(
+; CHECK-NEXT:    [[Y:%.*]] = or i8 [[X:%.*]], 8
+; CHECK-NEXT:    [[Z:%.*]] = sub nsw i8 0, [[Y]]
+; CHECK-NEXT:    [[LB:%.*]] = and i8 [[Z]], 2
+; CHECK-NEXT:    [[NE:%.*]] = icmp ne i8 [[LB]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[NE]])
+; CHECK-NEXT:    [[O:%.*]] = and i8 [[Y]], [[Z]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[O]], 4
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %y = or i8 %x, 8
+  %z = sub i8 0, %y
+  %lb = and i8 %z, 2
+  %ne = icmp ne i8 %lb, 0
+  call void @llvm.assume(i1 %ne)
+  %o = and i8 %z, %y
+  %r = icmp eq i8 %o, 4
+  ret i1 %r
+}
+
+define i1 @blsi_
diff ering_lowbits2(i8 %x) {
+; CHECK-LABEL: @blsi_
diff ering_lowbits2(
+; CHECK-NEXT:    [[Z:%.*]] = sub nsw i8 0, [[X:%.*]]
+; CHECK-NEXT:    [[LB:%.*]] = and i8 [[Z]], 8
+; CHECK-NEXT:    [[NE:%.*]] = icmp ne i8 [[LB]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[NE]])
+; CHECK-NEXT:    [[LB2:%.*]] = and i8 [[X]], 2
+; CHECK-NEXT:    [[NE2:%.*]] = icmp ne i8 [[LB2]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[NE2]])
+; CHECK-NEXT:    [[O:%.*]] = and i8 [[Z]], [[X]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[O]], 4
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %z = sub i8 0, %x
+  %lb = and i8 %z, 8
+  %ne = icmp ne i8 %lb, 0
+  call void @llvm.assume(i1 %ne)
+  %lb2 = and i8 %x, 2
+  %ne2 = icmp ne i8 %lb2, 0
+  call void @llvm.assume(i1 %ne2)
+  %o = and i8 %z, %x
+  %r = icmp eq i8 %o, 4
+  ret i1 %r
+}


        


More information about the llvm-commits mailing list