[llvm] 5c3496f - [InstCombine] Check isGuaranteedNotToBeUndef in haveNoCommonBitsSetSpecialCases. (#74390)

via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 5 10:33:48 PST 2023


Author: Craig Topper
Date: 2023-12-05T10:33:44-08:00
New Revision: 5c3496ff33ce8e4cc6f8c18edd7ae5fc65d23fdf

URL: https://github.com/llvm/llvm-project/commit/5c3496ff33ce8e4cc6f8c18edd7ae5fc65d23fdf
DIFF: https://github.com/llvm/llvm-project/commit/5c3496ff33ce8e4cc6f8c18edd7ae5fc65d23fdf.diff

LOG: [InstCombine] Check isGuaranteedNotToBeUndef in haveNoCommonBitsSetSpecialCases. (#74390)

It's not safe for InstCombine to add disjoint metadata when converting
Add to Or otherwise.

I've added noundef attribute to preserve existing test behavior.

Added: 
    

Modified: 
    llvm/lib/Analysis/ValueTracking.cpp
    llvm/test/Transforms/InstCombine/add.ll
    llvm/test/Transforms/InstCombine/and-or-not.ll
    llvm/test/Transforms/InstCombine/logical-select.ll
    llvm/test/Transforms/InstCombine/masked-merge-add.ll
    llvm/test/Transforms/InstCombine/masked-merge-or.ll
    llvm/test/Transforms/InstCombine/masked-merge-xor.ll
    llvm/test/Transforms/InstCombine/pr53357.ll
    llvm/unittests/Analysis/ValueTrackingTest.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index d8dd6b7dabfc1..a53f2f68cfab3 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -186,37 +186,45 @@ KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
       SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
 }
 
-static bool haveNoCommonBitsSetSpecialCases(const Value *LHS,
-                                            const Value *RHS) {
+static bool haveNoCommonBitsSetSpecialCases(const Value *LHS, const Value *RHS,
+                                            const SimplifyQuery &SQ) {
   // Look for an inverted mask: (X & ~M) op (Y & M).
   {
     Value *M;
     if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
-        match(RHS, m_c_And(m_Specific(M), m_Value())))
+        match(RHS, m_c_And(m_Specific(M), m_Value())) &&
+        isGuaranteedNotToBeUndef(M, SQ.AC, SQ.CxtI, SQ.DT))
       return true;
   }
 
   // X op (Y & ~X)
-  if (match(RHS, m_c_And(m_Not(m_Specific(LHS)), m_Value())))
+  if (match(RHS, m_c_And(m_Not(m_Specific(LHS)), m_Value())) &&
+      isGuaranteedNotToBeUndef(LHS, SQ.AC, SQ.CxtI, SQ.DT))
     return true;
 
   // X op ((X & Y) ^ Y) -- this is the canonical form of the previous pattern
   // for constant Y.
   Value *Y;
-  if (match(RHS, m_c_Xor(m_c_And(m_Specific(LHS), m_Value(Y)), m_Deferred(Y))))
+  if (match(RHS,
+            m_c_Xor(m_c_And(m_Specific(LHS), m_Value(Y)), m_Deferred(Y))) &&
+      isGuaranteedNotToBeUndef(LHS, SQ.AC, SQ.CxtI, SQ.DT) &&
+      isGuaranteedNotToBeUndef(Y, SQ.AC, SQ.CxtI, SQ.DT))
     return true;
 
   // Peek through extends to find a 'not' of the other side:
   // (ext Y) op ext(~Y)
   if (match(LHS, m_ZExtOrSExt(m_Value(Y))) &&
-      match(RHS, m_ZExtOrSExt(m_Not(m_Specific(Y)))))
+      match(RHS, m_ZExtOrSExt(m_Not(m_Specific(Y)))) &&
+      isGuaranteedNotToBeUndef(Y, SQ.AC, SQ.CxtI, SQ.DT))
     return true;
 
   // Look for: (A & B) op ~(A | B)
   {
     Value *A, *B;
     if (match(LHS, m_And(m_Value(A), m_Value(B))) &&
-        match(RHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
+        match(RHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))) &&
+        isGuaranteedNotToBeUndef(A, SQ.AC, SQ.CxtI, SQ.DT) &&
+        isGuaranteedNotToBeUndef(B, SQ.AC, SQ.CxtI, SQ.DT))
       return true;
   }
 
@@ -234,8 +242,8 @@ bool llvm::haveNoCommonBitsSet(const WithCache<const Value *> &LHSCache,
   assert(LHS->getType()->isIntOrIntVectorTy() &&
          "LHS and RHS should be integers");
 
-  if (haveNoCommonBitsSetSpecialCases(LHS, RHS) ||
-      haveNoCommonBitsSetSpecialCases(RHS, LHS))
+  if (haveNoCommonBitsSetSpecialCases(LHS, RHS, SQ) ||
+      haveNoCommonBitsSetSpecialCases(RHS, LHS, SQ))
     return true;
 
   return KnownBits::haveNoCommonBitsSet(LHSCache.getKnownBits(SQ),

diff  --git a/llvm/test/Transforms/InstCombine/add.ll b/llvm/test/Transforms/InstCombine/add.ll
index 42558a6d98f8f..6242fc6f528a4 100644
--- a/llvm/test/Transforms/InstCombine/add.ll
+++ b/llvm/test/Transforms/InstCombine/add.ll
@@ -1521,7 +1521,7 @@ define i8 @add_like_or_disjoint(i8 %x) {
   ret i8 %r
 }
 
-define i8 @add_and_xor(i8 %x, i8 %y) {
+define i8 @add_and_xor(i8 noundef %x, i8 %y) {
 ; CHECK-LABEL: @add_and_xor(
 ; CHECK-NEXT:    [[ADD:%.*]] = or i8 [[Y:%.*]], [[X:%.*]]
 ; CHECK-NEXT:    ret i8 [[ADD]]
@@ -1558,7 +1558,7 @@ define i8 @add_and_xor_wrong_op(i8 %x, i8 %y, i8 %z) {
   ret i8 %add
 }
 
-define i8 @add_and_xor_commuted1(i8 %x, i8 %_y) {
+define i8 @add_and_xor_commuted1(i8 noundef %x, i8 %_y) {
 ; CHECK-LABEL: @add_and_xor_commuted1(
 ; CHECK-NEXT:    [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
 ; CHECK-NEXT:    [[ADD:%.*]] = or i8 [[Y]], [[X:%.*]]
@@ -1571,7 +1571,7 @@ define i8 @add_and_xor_commuted1(i8 %x, i8 %_y) {
   ret i8 %add
 }
 
-define i8 @add_and_xor_commuted2(i8 %_x, i8 %y) {
+define i8 @add_and_xor_commuted2(i8 noundef %_x, i8 %y) {
 ; CHECK-LABEL: @add_and_xor_commuted2(
 ; CHECK-NEXT:    [[X:%.*]] = udiv i8 42, [[_X:%.*]]
 ; CHECK-NEXT:    [[ADD:%.*]] = or i8 [[X]], [[Y:%.*]]
@@ -1584,7 +1584,7 @@ define i8 @add_and_xor_commuted2(i8 %_x, i8 %y) {
   ret i8 %add
 }
 
-define i8 @add_and_xor_commuted3(i8 %_x, i8 %_y) {
+define i8 @add_and_xor_commuted3(i8 noundef %_x, i8 %_y) {
 ; CHECK-LABEL: @add_and_xor_commuted3(
 ; CHECK-NEXT:    [[X:%.*]] = udiv i8 42, [[_X:%.*]]
 ; CHECK-NEXT:    [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
@@ -1599,7 +1599,7 @@ define i8 @add_and_xor_commuted3(i8 %_x, i8 %_y) {
   ret i8 %add
 }
 
-define i8 @add_and_xor_extra_use(i8 %x, i8 %y) {
+define i8 @add_and_xor_extra_use(i8 noundef %x, i8 %y) {
 ; CHECK-LABEL: @add_and_xor_extra_use(
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use(i8 [[XOR]])
@@ -1616,7 +1616,7 @@ define i8 @add_and_xor_extra_use(i8 %x, i8 %y) {
   ret i8 %add
 }
 
-define i8 @add_xor_and_const(i8 %x) {
+define i8 @add_xor_and_const(i8 noundef %x) {
 ; CHECK-LABEL: @add_xor_and_const(
 ; CHECK-NEXT:    [[ADD:%.*]] = or i8 [[X:%.*]], 42
 ; CHECK-NEXT:    ret i8 [[ADD]]
@@ -1640,7 +1640,7 @@ define i8 @add_xor_and_const_wrong_const(i8 %x) {
   ret i8 %add
 }
 
-define i8 @add_xor_and_var(i8 %x, i8 %y) {
+define i8 @add_xor_and_var(i8 noundef %x, i8 noundef %y) {
 ; CHECK-LABEL: @add_xor_and_var(
 ; CHECK-NEXT:    [[AND:%.*]] = and i8 [[X:%.*]], [[Y:%.*]]
 ; CHECK-NEXT:    call void @use(i8 [[AND]])
@@ -1684,7 +1684,7 @@ define i8 @add_xor_and_var_wrong_op2(i8 %x, i8 %y, i8 %z) {
   ret i8 %add
 }
 
-define i8 @add_xor_and_var_commuted1(i8 %x, i8 %y) {
+define i8 @add_xor_and_var_commuted1(i8 noundef %x, i8 noundef %y) {
 ; CHECK-LABEL: @add_xor_and_var_commuted1(
 ; CHECK-NEXT:    [[AND:%.*]] = and i8 [[Y:%.*]], [[X:%.*]]
 ; CHECK-NEXT:    call void @use(i8 [[AND]])
@@ -1698,7 +1698,7 @@ define i8 @add_xor_and_var_commuted1(i8 %x, i8 %y) {
   ret i8 %add
 }
 
-define i8 @add_xor_and_var_commuted2(i8 %_x, i8 %_y) {
+define i8 @add_xor_and_var_commuted2(i8 noundef %_x, i8 noundef %_y) {
 ; CHECK-LABEL: @add_xor_and_var_commuted2(
 ; CHECK-NEXT:    [[X:%.*]] = udiv i8 42, [[_X:%.*]]
 ; CHECK-NEXT:    [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
@@ -1716,7 +1716,7 @@ define i8 @add_xor_and_var_commuted2(i8 %_x, i8 %_y) {
   ret i8 %add
 }
 
-define i8 @add_xor_and_var_commuted3(i8 %x, i8 %_y) {
+define i8 @add_xor_and_var_commuted3(i8 noundef %x, i8 noundef %_y) {
 ; CHECK-LABEL: @add_xor_and_var_commuted3(
 ; CHECK-NEXT:    [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i8 [[Y]], [[X:%.*]]
@@ -1732,7 +1732,7 @@ define i8 @add_xor_and_var_commuted3(i8 %x, i8 %_y) {
   ret i8 %add
 }
 
-define i8 @add_xor_and_var_commuted4(i8 %_x, i8 %y) {
+define i8 @add_xor_and_var_commuted4(i8 noundef %_x, i8 noundef %y) {
 ; CHECK-LABEL: @add_xor_and_var_commuted4(
 ; CHECK-NEXT:    [[X:%.*]] = udiv i8 42, [[_X:%.*]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i8 [[X]], [[Y:%.*]]
@@ -1748,7 +1748,7 @@ define i8 @add_xor_and_var_commuted4(i8 %_x, i8 %y) {
   ret i8 %add
 }
 
-define i8 @add_xor_and_var_commuted5(i8 %_x, i8 %_y) {
+define i8 @add_xor_and_var_commuted5(i8 noundef %_x, i8 noundef %_y) {
 ; CHECK-LABEL: @add_xor_and_var_commuted5(
 ; CHECK-NEXT:    [[X:%.*]] = udiv i8 42, [[_X:%.*]]
 ; CHECK-NEXT:    [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
@@ -1766,7 +1766,7 @@ define i8 @add_xor_and_var_commuted5(i8 %_x, i8 %_y) {
   ret i8 %add
 }
 
-define i8 @add_xor_and_var_commuted6(i8 %_x, i8 %_y) {
+define i8 @add_xor_and_var_commuted6(i8 noundef %_x, i8 noundef %_y) {
 ; CHECK-LABEL: @add_xor_and_var_commuted6(
 ; CHECK-NEXT:    [[X:%.*]] = udiv i8 42, [[_X:%.*]]
 ; CHECK-NEXT:    [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
@@ -1784,7 +1784,7 @@ define i8 @add_xor_and_var_commuted6(i8 %_x, i8 %_y) {
   ret i8 %add
 }
 
-define i8 @add_xor_and_var_commuted7(i8 %_x, i8 %_y) {
+define i8 @add_xor_and_var_commuted7(i8 noundef %_x, i8 noundef %_y) {
 ; CHECK-LABEL: @add_xor_and_var_commuted7(
 ; CHECK-NEXT:    [[X:%.*]] = udiv i8 42, [[_X:%.*]]
 ; CHECK-NEXT:    [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
@@ -1802,7 +1802,7 @@ define i8 @add_xor_and_var_commuted7(i8 %_x, i8 %_y) {
   ret i8 %add
 }
 
-define i8 @add_xor_and_var_extra_use(i8 %x, i8 %y) {
+define i8 @add_xor_and_var_extra_use(i8 noundef %x, i8 noundef %y) {
 ; CHECK-LABEL: @add_xor_and_var_extra_use(
 ; CHECK-NEXT:    [[AND:%.*]] = and i8 [[X:%.*]], [[Y:%.*]]
 ; CHECK-NEXT:    call void @use(i8 [[AND]])
@@ -2583,7 +2583,7 @@ define <vscale x 1 x i32> @add_to_or_scalable(<vscale x 1 x i32> %in) {
   ret <vscale x 1 x i32> %add
 }
 
-define i5 @zext_zext_not(i3 %x) {
+define i5 @zext_zext_not(i3 noundef %x) {
 ; CHECK-LABEL: @zext_zext_not(
 ; CHECK-NEXT:    ret i5 7
 ;
@@ -2594,7 +2594,7 @@ define i5 @zext_zext_not(i3 %x) {
   ret i5 %r
 }
 
-define <2 x i5> @zext_zext_not_commute(<2 x i3> %x) {
+define <2 x i5> @zext_zext_not_commute(<2 x i3> noundef %x) {
 ; CHECK-LABEL: @zext_zext_not_commute(
 ; CHECK-NEXT:    ret <2 x i5> <i5 7, i5 7>
 ;
@@ -2605,7 +2605,7 @@ define <2 x i5> @zext_zext_not_commute(<2 x i3> %x) {
   ret <2 x i5> %r
 }
 
-define i9 @sext_sext_not(i3 %x) {
+define i9 @sext_sext_not(i3 noundef %x) {
 ; CHECK-LABEL: @sext_sext_not(
 ; CHECK-NEXT:    ret i9 -1
 ;
@@ -2616,7 +2616,7 @@ define i9 @sext_sext_not(i3 %x) {
   ret i9 %r
 }
 
-define i8 @sext_sext_not_commute(i3 %x) {
+define i8 @sext_sext_not_commute(i3 noundef %x) {
 ; CHECK-LABEL: @sext_sext_not_commute(
 ; CHECK-NEXT:    [[SX:%.*]] = sext i3 [[X:%.*]] to i8
 ; CHECK-NEXT:    call void @use(i8 [[SX]])
@@ -2631,7 +2631,7 @@ define i8 @sext_sext_not_commute(i3 %x) {
   ret i8 %r
 }
 
-define i5 @zext_sext_not(i4 %x) {
+define i5 @zext_sext_not(i4 noundef %x) {
 ; CHECK-LABEL: @zext_sext_not(
 ; CHECK-NEXT:    [[ZX:%.*]] = zext i4 [[X:%.*]] to i5
 ; CHECK-NEXT:    [[NOTX:%.*]] = xor i4 [[X]], -1
@@ -2646,7 +2646,7 @@ define i5 @zext_sext_not(i4 %x) {
   ret i5 %r
 }
 
-define i8 @zext_sext_not_commute(i4 %x) {
+define i8 @zext_sext_not_commute(i4 noundef %x) {
 ; CHECK-LABEL: @zext_sext_not_commute(
 ; CHECK-NEXT:    [[ZX:%.*]] = zext i4 [[X:%.*]] to i8
 ; CHECK-NEXT:    call void @use(i8 [[ZX]])
@@ -2665,7 +2665,7 @@ define i8 @zext_sext_not_commute(i4 %x) {
   ret i8 %r
 }
 
-define i9 @sext_zext_not(i4 %x) {
+define i9 @sext_zext_not(i4 noundef %x) {
 ; CHECK-LABEL: @sext_zext_not(
 ; CHECK-NEXT:    [[SX:%.*]] = sext i4 [[X:%.*]] to i9
 ; CHECK-NEXT:    [[NOTX:%.*]] = xor i4 [[X]], -1
@@ -2680,7 +2680,7 @@ define i9 @sext_zext_not(i4 %x) {
   ret i9 %r
 }
 
-define i9 @sext_zext_not_commute(i4 %x) {
+define i9 @sext_zext_not_commute(i4 noundef %x) {
 ; CHECK-LABEL: @sext_zext_not_commute(
 ; CHECK-NEXT:    [[SX:%.*]] = sext i4 [[X:%.*]] to i9
 ; CHECK-NEXT:    [[NOTX:%.*]] = xor i4 [[X]], -1

diff  --git a/llvm/test/Transforms/InstCombine/and-or-not.ll b/llvm/test/Transforms/InstCombine/and-or-not.ll
index c896c8f100380..ca093eba1b568 100644
--- a/llvm/test/Transforms/InstCombine/and-or-not.ll
+++ b/llvm/test/Transforms/InstCombine/and-or-not.ll
@@ -548,7 +548,7 @@ define i32 @and_to_nxor_multiuse(float %fa, float %fb) {
 
 ; (a & b) | ~(a | b) --> ~(a ^ b)
 ; TODO: this increases instruction count if the pieces have additional users
-define i32 @or_to_nxor_multiuse(i32 %a, i32 %b) {
+define i32 @or_to_nxor_multiuse(i32 noundef %a, i32 noundef %b) {
 ; CHECK-LABEL: @or_to_nxor_multiuse(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[A:%.*]], [[B:%.*]]
 ; CHECK-NEXT:    [[OR:%.*]] = or i32 [[A]], [[B]]

diff  --git a/llvm/test/Transforms/InstCombine/logical-select.ll b/llvm/test/Transforms/InstCombine/logical-select.ll
index 31848bc911892..c3eec5eebf2eb 100644
--- a/llvm/test/Transforms/InstCombine/logical-select.ll
+++ b/llvm/test/Transforms/InstCombine/logical-select.ll
@@ -762,7 +762,7 @@ define <vscale x 2 x i64> @bitcast_vec_cond_scalable(<vscale x 16 x i1> %cond, <
 
 ; Negative test - bitcast of condition from wide source element type cannot be converted to select.
 
-define <8 x i3> @bitcast_vec_cond_commute1(<3 x i1> %cond, <8 x i3> %pc, <8 x i3> %d) {
+define <8 x i3> @bitcast_vec_cond_commute1(<3 x i1> noundef %cond, <8 x i3> %pc, <8 x i3> %d) {
 ; CHECK-LABEL: @bitcast_vec_cond_commute1(
 ; CHECK-NEXT:    [[C:%.*]] = mul <8 x i3> [[PC:%.*]], [[PC]]
 ; CHECK-NEXT:    [[S:%.*]] = sext <3 x i1> [[COND:%.*]] to <3 x i8>
@@ -830,7 +830,7 @@ define <2 x i16> @bitcast_vec_cond_commute3(<4 x i8> %cond, <2 x i16> %pc, <2 x
 
 ; Don't crash on invalid type for compute signbits.
 
-define <2 x i64> @bitcast_fp_vec_cond(<2 x double> %s, <2 x i64> %c, <2 x i64> %d) {
+define <2 x i64> @bitcast_fp_vec_cond(<2 x double> noundef %s, <2 x i64> %c, <2 x i64> %d) {
 ; CHECK-LABEL: @bitcast_fp_vec_cond(
 ; CHECK-NEXT:    [[T9:%.*]] = bitcast <2 x double> [[S:%.*]] to <2 x i64>
 ; CHECK-NEXT:    [[NOTT9:%.*]] = xor <2 x i64> [[T9]], <i64 -1, i64 -1>
@@ -849,7 +849,7 @@ define <2 x i64> @bitcast_fp_vec_cond(<2 x double> %s, <2 x i64> %c, <2 x i64> %
 
 ; Wider source type would be ok except poison could leak across elements.
 
-define <2 x i64> @bitcast_int_vec_cond(i1 %b, <2 x i64> %c, <2 x i64> %d) {
+define <2 x i64> @bitcast_int_vec_cond(i1 noundef %b, <2 x i64> %c, <2 x i64> %d) {
 ; CHECK-LABEL: @bitcast_int_vec_cond(
 ; CHECK-NEXT:    [[S:%.*]] = sext i1 [[B:%.*]] to i128
 ; CHECK-NEXT:    [[T9:%.*]] = bitcast i128 [[S]] to <2 x i64>

diff  --git a/llvm/test/Transforms/InstCombine/masked-merge-add.ll b/llvm/test/Transforms/InstCombine/masked-merge-add.ll
index c4265eb896b20..f655153108a43 100644
--- a/llvm/test/Transforms/InstCombine/masked-merge-add.ll
+++ b/llvm/test/Transforms/InstCombine/masked-merge-add.ll
@@ -16,7 +16,7 @@
 ; Most basic positive tests
 ; ============================================================================ ;
 
-define i32 @p(i32 %x, i32 %y, i32 %m) {
+define i32 @p(i32 %x, i32 %y, i32 noundef %m) {
 ; CHECK-LABEL: @p(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor i32 [[M]], -1
@@ -31,7 +31,7 @@ define i32 @p(i32 %x, i32 %y, i32 %m) {
   ret i32 %ret
 }
 
-define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) {
+define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> noundef %m) {
 ; CHECK-LABEL: @p_splatvec(
 ; CHECK-NEXT:    [[AND:%.*]] = and <2 x i32> [[X:%.*]], [[M:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor <2 x i32> [[M]], <i32 -1, i32 -1>
@@ -46,7 +46,7 @@ define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) {
   ret <2 x i32> %ret
 }
 
-define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> %m) {
+define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m) {
 ; CHECK-LABEL: @p_vec_undef(
 ; CHECK-NEXT:    [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor <3 x i32> [[M]], <i32 -1, i32 undef, i32 -1>
@@ -180,7 +180,7 @@ define <3 x i32> @p_constmask2_vec_undef(<3 x i32> %x, <3 x i32> %y) {
 ; Used to make sure that the IR complexity sorting does not interfere.
 declare i32 @gen32()
 
-define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) {
+define i32 @p_commutative0(i32 %x, i32 %y, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative0(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor i32 [[M]], -1
@@ -195,7 +195,7 @@ define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) {
   ret i32 %ret
 }
 
-define i32 @p_commutative1(i32 %x, i32 %m) {
+define i32 @p_commutative1(i32 %x, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative1(
 ; CHECK-NEXT:    [[Y:%.*]] = call i32 @gen32()
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
@@ -212,7 +212,7 @@ define i32 @p_commutative1(i32 %x, i32 %m) {
   ret i32 %ret
 }
 
-define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) {
+define i32 @p_commutative2(i32 %x, i32 %y, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative2(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor i32 [[M]], -1
@@ -227,7 +227,7 @@ define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) {
   ret i32 %ret
 }
 
-define i32 @p_commutative3(i32 %x, i32 %m) {
+define i32 @p_commutative3(i32 %x, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative3(
 ; CHECK-NEXT:    [[Y:%.*]] = call i32 @gen32()
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
@@ -244,7 +244,7 @@ define i32 @p_commutative3(i32 %x, i32 %m) {
   ret i32 %ret
 }
 
-define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) {
+define i32 @p_commutative4(i32 %x, i32 %y, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative4(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor i32 [[M]], -1
@@ -259,7 +259,7 @@ define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) {
   ret i32 %ret
 }
 
-define i32 @p_commutative5(i32 %x, i32 %m) {
+define i32 @p_commutative5(i32 %x, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative5(
 ; CHECK-NEXT:    [[Y:%.*]] = call i32 @gen32()
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
@@ -276,7 +276,7 @@ define i32 @p_commutative5(i32 %x, i32 %m) {
   ret i32 %ret
 }
 
-define i32 @p_commutative6(i32 %x, i32 %m) {
+define i32 @p_commutative6(i32 %x, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative6(
 ; CHECK-NEXT:    [[Y:%.*]] = call i32 @gen32()
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
@@ -314,7 +314,7 @@ define i32 @p_constmask_commutative(i32 %x, i32 %y) {
 
 declare void @use32(i32)
 
-define i32 @n0_oneuse(i32 %x, i32 %y, i32 %m) {
+define i32 @n0_oneuse(i32 %x, i32 %y, i32 noundef %m) {
 ; CHECK-LABEL: @n0_oneuse(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor i32 [[M]], -1

diff  --git a/llvm/test/Transforms/InstCombine/masked-merge-or.ll b/llvm/test/Transforms/InstCombine/masked-merge-or.ll
index 7d67b2d495412..b49ec07706e28 100644
--- a/llvm/test/Transforms/InstCombine/masked-merge-or.ll
+++ b/llvm/test/Transforms/InstCombine/masked-merge-or.ll
@@ -16,7 +16,7 @@
 ; Most basic positive tests
 ; ============================================================================ ;
 
-define i32 @p(i32 %x, i32 %y, i32 %m) {
+define i32 @p(i32 %x, i32 %y, i32 noundef %m) {
 ; CHECK-LABEL: @p(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor i32 [[M]], -1
@@ -31,7 +31,7 @@ define i32 @p(i32 %x, i32 %y, i32 %m) {
   ret i32 %ret
 }
 
-define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) {
+define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> noundef %m) {
 ; CHECK-LABEL: @p_splatvec(
 ; CHECK-NEXT:    [[AND:%.*]] = and <2 x i32> [[X:%.*]], [[M:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor <2 x i32> [[M]], <i32 -1, i32 -1>
@@ -46,7 +46,7 @@ define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) {
   ret <2 x i32> %ret
 }
 
-define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> %m) {
+define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m) {
 ; CHECK-LABEL: @p_vec_undef(
 ; CHECK-NEXT:    [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor <3 x i32> [[M]], <i32 -1, i32 undef, i32 -1>
@@ -180,7 +180,7 @@ define <3 x i32> @p_constmask2_vec_undef(<3 x i32> %x, <3 x i32> %y) {
 ; Used to make sure that the IR complexity sorting does not interfere.
 declare i32 @gen32()
 
-define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) {
+define i32 @p_commutative0(i32 %x, i32 %y, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative0(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor i32 [[M]], -1
@@ -195,7 +195,7 @@ define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) {
   ret i32 %ret
 }
 
-define i32 @p_commutative1(i32 %x, i32 %m) {
+define i32 @p_commutative1(i32 %x, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative1(
 ; CHECK-NEXT:    [[Y:%.*]] = call i32 @gen32()
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
@@ -212,7 +212,7 @@ define i32 @p_commutative1(i32 %x, i32 %m) {
   ret i32 %ret
 }
 
-define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) {
+define i32 @p_commutative2(i32 %x, i32 %y, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative2(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor i32 [[M]], -1
@@ -227,7 +227,7 @@ define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) {
   ret i32 %ret
 }
 
-define i32 @p_commutative3(i32 %x, i32 %m) {
+define i32 @p_commutative3(i32 %x, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative3(
 ; CHECK-NEXT:    [[Y:%.*]] = call i32 @gen32()
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
@@ -244,7 +244,7 @@ define i32 @p_commutative3(i32 %x, i32 %m) {
   ret i32 %ret
 }
 
-define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) {
+define i32 @p_commutative4(i32 %x, i32 %y, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative4(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor i32 [[M]], -1
@@ -259,7 +259,7 @@ define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) {
   ret i32 %ret
 }
 
-define i32 @p_commutative5(i32 %x, i32 %m) {
+define i32 @p_commutative5(i32 %x, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative5(
 ; CHECK-NEXT:    [[Y:%.*]] = call i32 @gen32()
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
@@ -276,7 +276,7 @@ define i32 @p_commutative5(i32 %x, i32 %m) {
   ret i32 %ret
 }
 
-define i32 @p_commutative6(i32 %x, i32 %m) {
+define i32 @p_commutative6(i32 %x, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative6(
 ; CHECK-NEXT:    [[Y:%.*]] = call i32 @gen32()
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
@@ -314,7 +314,7 @@ define i32 @p_constmask_commutative(i32 %x, i32 %y) {
 
 declare void @use32(i32)
 
-define i32 @n0_oneuse(i32 %x, i32 %y, i32 %m) {
+define i32 @n0_oneuse(i32 %x, i32 %y, i32 noundef %m) {
 ; CHECK-LABEL: @n0_oneuse(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor i32 [[M]], -1

diff  --git a/llvm/test/Transforms/InstCombine/masked-merge-xor.ll b/llvm/test/Transforms/InstCombine/masked-merge-xor.ll
index 4f0845c8c8f46..a6d201be68cee 100644
--- a/llvm/test/Transforms/InstCombine/masked-merge-xor.ll
+++ b/llvm/test/Transforms/InstCombine/masked-merge-xor.ll
@@ -16,7 +16,7 @@
 ; Most basic positive tests
 ; ============================================================================ ;
 
-define i32 @p(i32 %x, i32 %y, i32 %m) {
+define i32 @p(i32 %x, i32 %y, i32 noundef %m) {
 ; CHECK-LABEL: @p(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor i32 [[M]], -1
@@ -31,7 +31,7 @@ define i32 @p(i32 %x, i32 %y, i32 %m) {
   ret i32 %ret
 }
 
-define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) {
+define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> noundef %m) {
 ; CHECK-LABEL: @p_splatvec(
 ; CHECK-NEXT:    [[AND:%.*]] = and <2 x i32> [[X:%.*]], [[M:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor <2 x i32> [[M]], <i32 -1, i32 -1>
@@ -46,7 +46,7 @@ define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) {
   ret <2 x i32> %ret
 }
 
-define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> %m) {
+define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m) {
 ; CHECK-LABEL: @p_vec_undef(
 ; CHECK-NEXT:    [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor <3 x i32> [[M]], <i32 -1, i32 undef, i32 -1>
@@ -180,7 +180,7 @@ define <3 x i32> @p_constmask2_vec_undef(<3 x i32> %x, <3 x i32> %y) {
 ; Used to make sure that the IR complexity sorting does not interfere.
 declare i32 @gen32()
 
-define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) {
+define i32 @p_commutative0(i32 %x, i32 %y, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative0(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor i32 [[M]], -1
@@ -195,7 +195,7 @@ define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) {
   ret i32 %ret
 }
 
-define i32 @p_commutative1(i32 %x, i32 %m) {
+define i32 @p_commutative1(i32 %x, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative1(
 ; CHECK-NEXT:    [[Y:%.*]] = call i32 @gen32()
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
@@ -212,7 +212,7 @@ define i32 @p_commutative1(i32 %x, i32 %m) {
   ret i32 %ret
 }
 
-define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) {
+define i32 @p_commutative2(i32 %x, i32 %y, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative2(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor i32 [[M]], -1
@@ -227,7 +227,7 @@ define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) {
   ret i32 %ret
 }
 
-define i32 @p_commutative3(i32 %x, i32 %m) {
+define i32 @p_commutative3(i32 %x, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative3(
 ; CHECK-NEXT:    [[Y:%.*]] = call i32 @gen32()
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
@@ -244,7 +244,7 @@ define i32 @p_commutative3(i32 %x, i32 %m) {
   ret i32 %ret
 }
 
-define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) {
+define i32 @p_commutative4(i32 %x, i32 %y, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative4(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor i32 [[M]], -1
@@ -259,7 +259,7 @@ define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) {
   ret i32 %ret
 }
 
-define i32 @p_commutative5(i32 %x, i32 %m) {
+define i32 @p_commutative5(i32 %x, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative5(
 ; CHECK-NEXT:    [[Y:%.*]] = call i32 @gen32()
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
@@ -276,7 +276,7 @@ define i32 @p_commutative5(i32 %x, i32 %m) {
   ret i32 %ret
 }
 
-define i32 @p_commutative6(i32 %x, i32 %m) {
+define i32 @p_commutative6(i32 %x, i32 noundef %m) {
 ; CHECK-LABEL: @p_commutative6(
 ; CHECK-NEXT:    [[Y:%.*]] = call i32 @gen32()
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
@@ -314,7 +314,7 @@ define i32 @p_constmask_commutative(i32 %x, i32 %y) {
 
 declare void @use32(i32)
 
-define i32 @n0_oneuse(i32 %x, i32 %y, i32 %m) {
+define i32 @n0_oneuse(i32 %x, i32 %y, i32 noundef %m) {
 ; CHECK-LABEL: @n0_oneuse(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
 ; CHECK-NEXT:    [[NEG:%.*]] = xor i32 [[M]], -1

diff  --git a/llvm/test/Transforms/InstCombine/pr53357.ll b/llvm/test/Transforms/InstCombine/pr53357.ll
index bdb63068fd4ae..0a6d2993ce46a 100644
--- a/llvm/test/Transforms/InstCombine/pr53357.ll
+++ b/llvm/test/Transforms/InstCombine/pr53357.ll
@@ -3,7 +3,7 @@
 ; RUN: opt < %s -passes=instcombine -S | FileCheck %s
 
 ; (x & y) + ~(x | y)
-define i32 @src(i32 %0, i32 %1) {
+define i32 @src(i32 noundef %0, i32 noundef %1) {
 ; CHECK-LABEL: @src(
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1:%.*]], [[TMP0:%.*]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], -1
@@ -17,7 +17,7 @@ define i32 @src(i32 %0, i32 %1) {
 }
 
 ; vector version of src
-define <2 x i32> @src_vec(<2 x i32> %0, <2 x i32> %1) {
+define <2 x i32> @src_vec(<2 x i32> noundef %0, <2 x i32> noundef %1) {
 ; CHECK-LABEL: @src_vec(
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor <2 x i32> [[TMP1:%.*]], [[TMP0:%.*]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = xor <2 x i32> [[TMP3]], <i32 -1, i32 -1>
@@ -31,7 +31,7 @@ define <2 x i32> @src_vec(<2 x i32> %0, <2 x i32> %1) {
 }
 
 ; vector version of src with undef values
-define <2 x i32> @src_vec_undef(<2 x i32> %0, <2 x i32> %1) {
+define <2 x i32> @src_vec_undef(<2 x i32> noundef %0, <2 x i32> noundef %1) {
 ; CHECK-LABEL: @src_vec_undef(
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor <2 x i32> [[TMP1:%.*]], [[TMP0:%.*]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = xor <2 x i32> [[TMP3]], <i32 -1, i32 -1>
@@ -45,7 +45,7 @@ define <2 x i32> @src_vec_undef(<2 x i32> %0, <2 x i32> %1) {
 }
 
 ; (x & y) + ~(y | x)
-define i32 @src2(i32 %0, i32 %1) {
+define i32 @src2(i32 noundef %0, i32 noundef %1) {
 ; CHECK-LABEL: @src2(
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1:%.*]], [[TMP0:%.*]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], -1
@@ -59,7 +59,7 @@ define i32 @src2(i32 %0, i32 %1) {
 }
 
 ; (x & y) + (~x & ~y)
-define i32 @src3(i32 %0, i32 %1) {
+define i32 @src3(i32 noundef %0, i32 noundef %1) {
 ; CHECK-LABEL: @src3(
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1:%.*]], [[TMP0:%.*]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], -1
@@ -74,7 +74,7 @@ define i32 @src3(i32 %0, i32 %1) {
 }
 
 ; ~(x | y) + (y & x)
-define i32 @src4(i32 %0, i32 %1) {
+define i32 @src4(i32 noundef %0, i32 noundef %1) {
 ; CHECK-LABEL: @src4(
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], -1
@@ -88,7 +88,7 @@ define i32 @src4(i32 %0, i32 %1) {
 }
 
 ; ~(x | y) + (x & y)
-define i32 @src5(i32 %0, i32 %1) {
+define i32 @src5(i32 noundef %0, i32 noundef %1) {
 ; CHECK-LABEL: @src5(
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1:%.*]], [[TMP0:%.*]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], -1

diff  --git a/llvm/unittests/Analysis/ValueTrackingTest.cpp b/llvm/unittests/Analysis/ValueTrackingTest.cpp
index 1190550340efb..0d3a594da0c06 100644
--- a/llvm/unittests/Analysis/ValueTrackingTest.cpp
+++ b/llvm/unittests/Analysis/ValueTrackingTest.cpp
@@ -2604,7 +2604,7 @@ TEST_F(ValueTrackingTest, HaveNoCommonBitsSet) {
   {
     // Check for an inverted mask: (X & ~M) op (Y & M).
     auto M = parseModule(R"(
-  define i32 @test(i32 %X, i32 %Y, i32 %M) {
+  define i32 @test(i32 %X, i32 %Y, i32 noundef %M) {
     %1 = xor i32 %M, -1
     %LHS = and i32 %1, %X
     %RHS = and i32 %Y, %M
@@ -2623,7 +2623,7 @@ TEST_F(ValueTrackingTest, HaveNoCommonBitsSet) {
   {
     // Check for (A & B) and ~(A | B)
     auto M = parseModule(R"(
-  define void @test(i32 %A, i32 %B) {
+  define void @test(i32 noundef %A, i32 noundef %B) {
     %LHS = and i32 %A, %B
     %or = or i32 %A, %B
     %RHS = xor i32 %or, -1
@@ -2651,7 +2651,7 @@ TEST_F(ValueTrackingTest, HaveNoCommonBitsSet) {
   {
     // Check for (A & B) and ~(A | B) in vector version
     auto M = parseModule(R"(
-  define void @test(<2 x i32> %A, <2 x i32> %B) {
+  define void @test(<2 x i32> noundef %A, <2 x i32> noundef %B) {
     %LHS = and <2 x i32> %A, %B
     %or = or <2 x i32> %A, %B
     %RHS = xor <2 x i32> %or, <i32 -1, i32 -1>


        


More information about the llvm-commits mailing list