[llvm] [InstCombine] Remove some of the complexity-based canonicalization (PR #91185)
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Wed May 8 23:21:38 PDT 2024
https://github.com/nikic updated https://github.com/llvm/llvm-project/pull/91185
>From 391f8d6b4e7d9d182fe36070ec10addf9db1a5ad Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Mon, 6 May 2024 19:05:21 +0900
Subject: [PATCH] [InstCombine] Remove some of the complexity-based
canonicalization
The idea behind is that the canonicalization allows us to handle
less pattern, because we know that some will be canonicalized away.
This is indeed very useful to e.g. know that constants are always
on the right.
However, the fact that arguments are also canonicalized to the
right seems like it may be doing more damage than good: This means
that writing tests to cover both commuted forms requires special
care ("thwart complexity-based canonicalization").
I think we should consider dropping this canonicalization to make
testing simpler.
---
.../Transforms/InstCombine/InstCombiner.h | 25 +-
.../IndVarSimplify/rewrite-loop-exit-value.ll | 4 +-
...004-11-27-SetCCForCastLargerAndConstant.ll | 26 +-
.../InstCombine/2010-11-23-Distributed.ll | 2 +-
llvm/test/Transforms/InstCombine/abs-1.ll | 4 +-
.../Transforms/InstCombine/add-mask-neg.ll | 6 +-
llvm/test/Transforms/InstCombine/add.ll | 46 +--
llvm/test/Transforms/InstCombine/add2.ll | 2 +-
.../test/Transforms/InstCombine/add_or_sub.ll | 8 +-
.../InstCombine/and-or-icmp-const-icmp.ll | 74 ++---
.../Transforms/InstCombine/and-or-icmps.ll | 80 +++---
.../test/Transforms/InstCombine/and-or-not.ll | 10 +-
llvm/test/Transforms/InstCombine/and-or.ll | 26 +-
.../Transforms/InstCombine/and-xor-merge.ll | 26 +-
.../test/Transforms/InstCombine/and-xor-or.ll | 222 +++++++--------
llvm/test/Transforms/InstCombine/and.ll | 54 ++--
.../InstCombine/apint-and-xor-merge.ll | 2 +-
llvm/test/Transforms/InstCombine/apint-or.ll | 4 +-
.../Transforms/InstCombine/apint-shift.ll | 10 +-
llvm/test/Transforms/InstCombine/apint-sub.ll | 2 +-
.../Transforms/InstCombine/assume-align.ll | 2 +-
.../InstCombine/assume-separate_storage.ll | 2 +-
.../InstCombine/binop-and-shifts.ll | 38 +--
.../test/Transforms/InstCombine/binop-cast.ll | 8 +-
.../test/Transforms/InstCombine/bit-checks.ll | 58 ++--
.../InstCombine/bitcast-inseltpoison.ll | 8 +-
llvm/test/Transforms/InstCombine/bitcast.ll | 12 +-
.../test/Transforms/InstCombine/bitreverse.ll | 6 +-
.../test/Transforms/InstCombine/bswap-fold.ll | 10 +-
.../test/Transforms/InstCombine/call-guard.ll | 2 +-
...nt-low-bit-mask-and-icmp-eq-to-icmp-ule.ll | 2 +-
...nt-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll | 2 +-
...t-low-bit-mask-and-icmp-uge-to-icmp-ule.ll | 2 +-
...t-low-bit-mask-and-icmp-ult-to-icmp-ugt.ll | 2 +-
...ze-low-bit-mask-and-icmp-eq-to-icmp-ule.ll | 12 +-
...ze-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll | 12 +-
...low-bit-mask-v2-and-icmp-eq-to-icmp-ule.ll | 20 +-
...low-bit-mask-v2-and-icmp-ne-to-icmp-ugt.ll | 20 +-
...low-bit-mask-v3-and-icmp-eq-to-icmp-ule.ll | 6 +-
...low-bit-mask-v3-and-icmp-ne-to-icmp-ugt.ll | 6 +-
...low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll | 12 +-
...low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll | 12 +-
.../Transforms/InstCombine/cast-mul-select.ll | 76 ++---
llvm/test/Transforms/InstCombine/cast.ll | 18 +-
llvm/test/Transforms/InstCombine/cast_phi.ll | 2 +-
.../Transforms/InstCombine/cmp-x-vs-neg-x.ll | 4 +-
.../InstCombine/conditional-negation.ll | 18 +-
.../test/Transforms/InstCombine/ctpop-cttz.ll | 2 +-
.../test/Transforms/InstCombine/ctpop-pow2.ll | 6 +-
llvm/test/Transforms/InstCombine/cttz.ll | 4 +-
llvm/test/Transforms/InstCombine/demorgan.ll | 24 +-
.../Transforms/InstCombine/dependent-ivs.ll | 4 +-
.../InstCombine/fadd-fsub-factor.ll | 38 +--
llvm/test/Transforms/InstCombine/fadd.ll | 10 +-
.../Transforms/InstCombine/fast-basictest.ll | 35 ++-
llvm/test/Transforms/InstCombine/fast-math.ll | 12 +-
llvm/test/Transforms/InstCombine/fcmp.ll | 30 +-
llvm/test/Transforms/InstCombine/fdiv-sqrt.ll | 6 +-
llvm/test/Transforms/InstCombine/fdiv.ll | 8 +-
.../InstCombine/float-shrink-compare.ll | 28 +-
llvm/test/Transforms/InstCombine/fmul.ll | 20 +-
...c-of-add-of-not-x-and-y-to-sub-x-from-y.ll | 6 +-
.../InstCombine/fold-select-fmul-if-zero.ll | 32 +--
.../InstCombine/fold-signbit-test-power2.ll | 2 +-
llvm/test/Transforms/InstCombine/fpextend.ll | 8 +-
llvm/test/Transforms/InstCombine/fptrunc.ll | 2 +-
.../Transforms/InstCombine/free-inversion.ll | 10 +-
llvm/test/Transforms/InstCombine/fsh.ll | 4 +-
llvm/test/Transforms/InstCombine/fsub.ll | 46 +--
llvm/test/Transforms/InstCombine/funnel.ll | 4 +-
.../Transforms/InstCombine/getelementptr.ll | 6 +-
.../hoist-negation-out-of-bias-calculation.ll | 16 +-
...hoist-xor-by-constant-from-xor-by-value.ll | 2 +-
llvm/test/Transforms/InstCombine/icmp-add.ll | 66 ++---
.../InstCombine/icmp-and-add-sub-xor-p2.ll | 24 +-
.../InstCombine/icmp-and-lowbit-mask.ll | 52 ++--
.../Transforms/InstCombine/icmp-and-shift.ll | 12 +-
.../Transforms/InstCombine/icmp-custom-dl.ll | 2 +-
.../InstCombine/icmp-equality-rotate.ll | 8 +-
.../InstCombine/icmp-equality-xor.ll | 2 +-
.../Transforms/InstCombine/icmp-ext-ext.ll | 26 +-
llvm/test/Transforms/InstCombine/icmp-gep.ll | 12 +-
.../Transforms/InstCombine/icmp-mul-zext.ll | 10 +-
llvm/test/Transforms/InstCombine/icmp-mul.ll | 8 +-
.../Transforms/InstCombine/icmp-of-and-x.ll | 12 +-
.../Transforms/InstCombine/icmp-of-or-x.ll | 8 +-
.../InstCombine/icmp-of-trunc-ext.ll | 46 +--
.../Transforms/InstCombine/icmp-of-xor-x.ll | 42 +--
llvm/test/Transforms/InstCombine/icmp-or.ll | 8 +-
.../test/Transforms/InstCombine/icmp-range.ll | 66 ++---
.../Transforms/InstCombine/icmp-rotate.ll | 2 +-
.../icmp-select-implies-common-op.ll | 24 +-
.../Transforms/InstCombine/icmp-select.ll | 8 +-
llvm/test/Transforms/InstCombine/icmp-sub.ll | 4 +-
...al-to-icmp-eq-of-lshr-val-by-bits-and-0.ll | 10 +-
...al-to-icmp-ne-of-lshr-val-by-bits-and-0.ll | 10 +-
llvm/test/Transforms/InstCombine/icmp.ll | 56 ++--
llvm/test/Transforms/InstCombine/implies.ll | 4 +-
...rt-variable-mask-in-masked-merge-scalar.ll | 2 +-
...rt-variable-mask-in-masked-merge-vector.ll | 2 +-
llvm/test/Transforms/InstCombine/ispow2.ll | 14 +-
.../test/Transforms/InstCombine/known-bits.ll | 4 +-
.../Transforms/InstCombine/known-never-nan.ll | 2 +-
llvm/test/Transforms/InstCombine/log-pow.ll | 6 +-
.../logical-select-inseltpoison.ll | 22 +-
.../Transforms/InstCombine/logical-select.ll | 50 ++--
.../InstCombine/lshr-and-negC-icmpeq-zero.ll | 2 +-
llvm/test/Transforms/InstCombine/lshr.ll | 2 +-
.../InstCombine/masked-merge-add.ll | 16 +-
.../InstCombine/masked-merge-and-of-ors.ll | 42 +--
.../Transforms/InstCombine/masked-merge-or.ll | 16 +-
.../InstCombine/masked-merge-xor.ll | 40 +--
.../Transforms/InstCombine/minmax-fold.ll | 10 +-
.../Transforms/InstCombine/minmax-of-xor-x.ll | 20 +-
.../Transforms/InstCombine/mul-masked-bits.ll | 6 +-
llvm/test/Transforms/InstCombine/mul-pow2.ll | 2 +-
llvm/test/Transforms/InstCombine/mul.ll | 8 +-
llvm/test/Transforms/InstCombine/mul_fold.ll | 12 +-
.../Transforms/InstCombine/mul_full_64.ll | 4 +-
llvm/test/Transforms/InstCombine/not-add.ll | 8 +-
llvm/test/Transforms/InstCombine/not.ll | 48 ++--
.../Transforms/InstCombine/onehot_merge.ll | 48 ++--
.../test/Transforms/InstCombine/or-xor-xor.ll | 4 +-
llvm/test/Transforms/InstCombine/or-xor.ll | 38 +--
llvm/test/Transforms/InstCombine/or.ll | 20 +-
...nput-masking-after-truncation-variant-b.ll | 6 +-
...dant-left-shift-input-masking-variant-b.ll | 2 +-
llvm/test/Transforms/InstCombine/phi.ll | 10 +-
llvm/test/Transforms/InstCombine/pr14365.ll | 4 +-
llvm/test/Transforms/InstCombine/pr44242.ll | 8 +-
llvm/test/Transforms/InstCombine/pr49688.ll | 4 +-
llvm/test/Transforms/InstCombine/pr75369.ll | 2 +-
.../InstCombine/ptr-int-ptr-icmp.ll | 14 +-
llvm/test/Transforms/InstCombine/ptrmask.ll | 22 +-
.../Transforms/InstCombine/range-check.ll | 48 ++--
.../Transforms/InstCombine/reassociate-nuw.ll | 8 +-
...nput-masking-after-truncation-variant-b.ll | 10 +-
...dant-left-shift-input-masking-variant-b.ll | 20 +-
llvm/test/Transforms/InstCombine/rem.ll | 22 +-
...f-negative-is-non-zero-and-no-underflow.ll | 36 +--
...ve-or-zero-is-non-zero-and-no-underflow.ll | 32 +--
...ult-of-usub-is-non-zero-and-no-overflow.ll | 56 ++--
.../InstCombine/saturating-add-sub.ll | 26 +-
.../InstCombine/scalarization-inseltpoison.ll | 12 +-
.../Transforms/InstCombine/scalarization.ll | 16 +-
.../Transforms/InstCombine/select-and-or.ll | 26 +-
.../InstCombine/select-binop-cmp.ll | 2 +-
.../select-binop-foldable-floating-point.ll | 24 +-
.../test/Transforms/InstCombine/select-cmp.ll | 42 +--
.../InstCombine/select-ctlz-to-cttz.ll | 12 +-
.../Transforms/InstCombine/select-divrem.ll | 2 +-
.../InstCombine/select-factorize.ll | 24 +-
.../InstCombine/select-masked_gather.ll | 2 +-
.../InstCombine/select-masked_load.ll | 2 +-
.../InstCombine/select-of-bittest.ll | 18 +-
.../InstCombine/select-safe-transforms.ll | 4 +-
.../InstCombine/select-with-bitwise-ops.ll | 86 +++---
llvm/test/Transforms/InstCombine/select.ll | 36 +--
.../Transforms/InstCombine/select_meta.ll | 10 +-
llvm/test/Transforms/InstCombine/set.ll | 4 +-
llvm/test/Transforms/InstCombine/shift-add.ll | 12 +-
...ciation-in-bittest-with-truncation-lshr.ll | 2 +-
...ociation-in-bittest-with-truncation-shl.ll | 8 +-
.../shift-direction-in-bit-test.ll | 4 +-
.../Transforms/InstCombine/shift-logic.ll | 6 +-
llvm/test/Transforms/InstCombine/shift.ll | 10 +-
llvm/test/Transforms/InstCombine/shl-bo.ll | 36 +--
.../Transforms/InstCombine/shuffle-binop.ll | 4 +-
.../InstCombine/signed-truncation-check.ll | 4 +-
.../InstCombine/simplify-demanded-fpclass.ll | 2 +-
.../InstCombine/sink-not-into-and.ll | 2 +-
.../InstCombine/sink-not-into-or.ll | 2 +-
llvm/test/Transforms/InstCombine/smax-icmp.ll | 8 +-
llvm/test/Transforms/InstCombine/smin-icmp.ll | 8 +-
.../InstCombine/sub-ashr-or-to-icmp-select.ll | 4 +-
llvm/test/Transforms/InstCombine/sub-gep.ll | 2 +-
.../InstCombine/sub-lshr-or-to-icmp-select.ll | 2 +-
.../test/Transforms/InstCombine/sub-minmax.ll | 10 +-
llvm/test/Transforms/InstCombine/sub-not.ll | 16 +-
.../sub-of-negatible-inseltpoison.ll | 16 +-
.../InstCombine/sub-of-negatible.ll | 18 +-
.../Transforms/InstCombine/sub-xor-cmp.ll | 8 +-
llvm/test/Transforms/InstCombine/sub.ll | 24 +-
.../Transforms/InstCombine/trunc-binop-ext.ll | 40 +--
llvm/test/Transforms/InstCombine/uaddo.ll | 20 +-
llvm/test/Transforms/InstCombine/umax-icmp.ll | 8 +-
llvm/test/Transforms/InstCombine/umin-icmp.ll | 8 +-
.../unordered-compare-and-ordered.ll | 8 +-
...gned-add-lack-of-overflow-check-via-add.ll | 2 +-
...gned-add-lack-of-overflow-check-via-xor.ll | 22 +-
.../unsigned-add-lack-of-overflow-check.ll | 12 +-
.../unsigned-add-overflow-check-via-add.ll | 4 +-
.../unsigned-add-overflow-check-via-xor.ll | 22 +-
.../unsigned-add-overflow-check.ll | 12 +-
.../unsigned-sub-lack-of-overflow-check.ll | 2 +-
.../unsigned-sub-overflow-check.ll | 2 +-
.../InstCombine/vec_demanded_elts.ll | 6 +-
.../InstCombine/vec_shuffle-inseltpoison.ll | 14 +-
.../Transforms/InstCombine/vec_shuffle.ll | 14 +-
.../Transforms/InstCombine/vector-reverse.ll | 2 +-
.../test/Transforms/InstCombine/vector-xor.ll | 8 +-
.../InstCombine/widenable-conditions.ll | 16 +-
llvm/test/Transforms/InstCombine/xor.ll | 30 +-
llvm/test/Transforms/InstCombine/xor2.ll | 32 +--
.../InstCombine/zext-bool-add-sub.ll | 16 +-
.../Transforms/InstCombine/zext-or-icmp.ll | 2 +-
llvm/test/Transforms/InstCombine/zext.ll | 8 +-
.../AArch64/deterministic-type-shrinkage.ll | 2 +-
.../AArch64/sve-cond-inv-loads.ll | 18 +-
.../AArch64/sve-gather-scatter.ll | 30 +-
.../LoopVectorize/AArch64/sve-inductions.ll | 2 +-
.../AArch64/sve-interleaved-accesses.ll | 2 +-
.../AArch64/sve-vector-reverse.ll | 136 ++++-----
.../LoopVectorize/AArch64/sve-widen-phi.ll | 134 ++++-----
.../AArch64/vector-reverse-mask4.ll | 10 +-
.../Transforms/LoopVectorize/ARM/mve-qabs.ll | 74 ++---
.../LoopVectorize/ARM/mve-reductions.ll | 14 +-
.../LoopVectorize/ARM/mve-selectandorcost.ll | 64 ++---
.../LoopVectorize/ARM/pointer_iv.ll | 264 +++++++++---------
.../ARM/tail-fold-multiple-icmps.ll | 36 +--
.../X86/invariant-load-gather.ll | 4 +-
.../X86/invariant-store-vectorization.ll | 20 +-
.../LoopVectorize/extract-last-veclane.ll | 4 +-
.../LoopVectorize/float-induction.ll | 60 ++--
.../LoopVectorize/if-conversion-nest.ll | 20 +-
.../Transforms/LoopVectorize/induction.ll | 174 ++++++------
.../LoopVectorize/interleaved-accesses.ll | 84 +++---
.../invariant-store-vectorization-2.ll | 14 +-
.../invariant-store-vectorization.ll | 28 +-
.../LoopVectorize/reduction-inloop-cond.ll | 18 +-
.../LoopVectorize/reduction-inloop.ll | 26 +-
.../Transforms/LoopVectorize/reduction.ll | 22 +-
.../Transforms/LoopVectorize/runtime-check.ll | 8 +-
.../LoopVectorize/scalable-inductions.ll | 8 +-
.../uniform-args-call-variants.ll | 4 +-
llvm/test/Transforms/PGOProfile/chr.ll | 14 +-
.../AArch64/hoist-runtime-checks.ll | 8 +-
...ting-sinking-required-for-vectorization.ll | 14 +-
...ple-unreachable-exits-for-vectorization.ll | 182 +++---------
.../PhaseOrdering/AArch64/quant_4x4.ll | 8 +-
.../PhaseOrdering/ARM/arm_mult_q15.ll | 54 ++--
.../X86/hoist-load-of-baseptr.ll | 4 +-
.../PhaseOrdering/X86/speculation-vs-tbaa.ll | 2 +-
.../PhaseOrdering/fast-basictest.ll | 2 +-
.../PhaseOrdering/reassociate-instcombine.ll | 4 +-
.../PhaseOrdering/runtime-check-removal.ll | 2 +-
.../Reassociate/fast-ArrayOutOfBounds.ll | 12 +-
.../Reassociate/fast-SubReassociate.ll | 6 +-
.../X86/cmp_commute-inseltpoison.ll | 52 ++--
.../SLPVectorizer/X86/cmp_commute.ll | 52 ++--
250 files changed, 2552 insertions(+), 2654 deletions(-)
diff --git a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
index 855d1aeddfaee..6536f1c6b2d8a 100644
--- a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
+++ b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
@@ -132,21 +132,18 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner {
/// This routine maps IR values to various complexity ranks:
/// 0 -> undef
/// 1 -> Constants
- /// 2 -> Other non-instructions
- /// 3 -> Arguments
- /// 4 -> Cast and (f)neg/not instructions
- /// 5 -> Other instructions
+ /// 2 -> Cast and (f)neg/not instructions
+ /// 3 -> Other instructions and arguments
static unsigned getComplexity(Value *V) {
- if (isa<Instruction>(V)) {
- if (isa<CastInst>(V) || match(V, m_Neg(PatternMatch::m_Value())) ||
- match(V, m_Not(PatternMatch::m_Value())) ||
- match(V, m_FNeg(PatternMatch::m_Value())))
- return 4;
- return 5;
- }
- if (isa<Argument>(V))
- return 3;
- return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2;
+ if (isa<Constant>(V))
+ return isa<UndefValue>(V) ? 0 : 1;
+
+ if (isa<CastInst>(V) || match(V, m_Neg(PatternMatch::m_Value())) ||
+ match(V, m_Not(PatternMatch::m_Value())) ||
+ match(V, m_FNeg(PatternMatch::m_Value())))
+ return 2;
+
+ return 3;
}
/// Predicate canonicalization reduces the number of patterns that need to be
diff --git a/llvm/test/Transforms/IndVarSimplify/rewrite-loop-exit-value.ll b/llvm/test/Transforms/IndVarSimplify/rewrite-loop-exit-value.ll
index 0d3d2425806ff..c1ef09785454e 100644
--- a/llvm/test/Transforms/IndVarSimplify/rewrite-loop-exit-value.ll
+++ b/llvm/test/Transforms/IndVarSimplify/rewrite-loop-exit-value.ll
@@ -165,8 +165,8 @@ define i16 @pr57336(i16 %end, i16 %m) mustprogress {
; CHECK: for.body:
; CHECK-NEXT: [[INC8:%.*]] = phi i16 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[INC]] = add nuw nsw i16 [[INC8]], 1
-; CHECK-NEXT: [[MUL:%.*]] = mul nsw i16 [[INC8]], [[M:%.*]]
-; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp sgt i16 [[MUL]], [[END:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i16 [[M:%.*]], [[INC8]]
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp slt i16 [[END:%.*]], [[MUL]]
; CHECK-NEXT: br i1 [[CMP_NOT]], label [[CRIT_EDGE:%.*]], label [[FOR_BODY]]
; CHECK: crit_edge:
; CHECK-NEXT: [[TMP0:%.*]] = add i16 [[END]], 1
diff --git a/llvm/test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll b/llvm/test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll
index 68444db15d12a..045dc126addde 100644
--- a/llvm/test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll
+++ b/llvm/test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll
@@ -271,7 +271,7 @@ define i1 @gt_unsigned_to_small_negative(i8 %SB) {
define i1 @different_size_zext_zext_ugt(i7 %x, i4 %y) {
; CHECK-LABEL: @different_size_zext_zext_ugt(
; CHECK-NEXT: [[TMP1:%.*]] = zext i4 [[Y:%.*]] to i7
-; CHECK-NEXT: [[R:%.*]] = icmp ult i7 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i7 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%zx = zext i7 %x to i25
@@ -283,7 +283,7 @@ define i1 @different_size_zext_zext_ugt(i7 %x, i4 %y) {
define <2 x i1> @different_size_zext_zext_ugt_commute(<2 x i4> %x, <2 x i7> %y) {
; CHECK-LABEL: @different_size_zext_zext_ugt_commute(
; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i4> [[X:%.*]] to <2 x i7>
-; CHECK-NEXT: [[R:%.*]] = icmp ugt <2 x i7> [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ult <2 x i7> [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i1> [[R]]
;
%zx = zext <2 x i4> %x to <2 x i25>
@@ -295,7 +295,7 @@ define <2 x i1> @different_size_zext_zext_ugt_commute(<2 x i4> %x, <2 x i7> %y)
define i1 @different_size_zext_zext_ult(i4 %x, i7 %y) {
; CHECK-LABEL: @different_size_zext_zext_ult(
; CHECK-NEXT: [[TMP1:%.*]] = zext i4 [[X:%.*]] to i7
-; CHECK-NEXT: [[R:%.*]] = icmp ult i7 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i7 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%zx = zext i4 %x to i25
@@ -307,7 +307,7 @@ define i1 @different_size_zext_zext_ult(i4 %x, i7 %y) {
define i1 @different_size_zext_zext_eq(i4 %x, i7 %y) {
; CHECK-LABEL: @different_size_zext_zext_eq(
; CHECK-NEXT: [[TMP1:%.*]] = zext i4 [[X:%.*]] to i7
-; CHECK-NEXT: [[R:%.*]] = icmp eq i7 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i7 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%zx = zext i4 %x to i25
@@ -319,7 +319,7 @@ define i1 @different_size_zext_zext_eq(i4 %x, i7 %y) {
define i1 @different_size_zext_zext_ne_commute(i7 %x, i4 %y) {
; CHECK-LABEL: @different_size_zext_zext_ne_commute(
; CHECK-NEXT: [[TMP1:%.*]] = zext i4 [[Y:%.*]] to i7
-; CHECK-NEXT: [[R:%.*]] = icmp ne i7 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ne i7 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%zx = zext i7 %x to i25
@@ -331,7 +331,7 @@ define i1 @different_size_zext_zext_ne_commute(i7 %x, i4 %y) {
define i1 @different_size_zext_zext_slt(i7 %x, i4 %y) {
; CHECK-LABEL: @different_size_zext_zext_slt(
; CHECK-NEXT: [[TMP1:%.*]] = zext i4 [[Y:%.*]] to i7
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i7 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ult i7 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%zx = zext i7 %x to i25
@@ -343,7 +343,7 @@ define i1 @different_size_zext_zext_slt(i7 %x, i4 %y) {
define i1 @different_size_zext_zext_sgt(i7 %x, i4 %y) {
; CHECK-LABEL: @different_size_zext_zext_sgt(
; CHECK-NEXT: [[TMP1:%.*]] = zext i4 [[Y:%.*]] to i7
-; CHECK-NEXT: [[R:%.*]] = icmp ult i7 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i7 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%zx = zext i7 %x to i25
@@ -355,7 +355,7 @@ define i1 @different_size_zext_zext_sgt(i7 %x, i4 %y) {
define i1 @different_size_sext_sext_sgt(i7 %x, i4 %y) {
; CHECK-LABEL: @different_size_sext_sext_sgt(
; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[Y:%.*]] to i7
-; CHECK-NEXT: [[R:%.*]] = icmp slt i7 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sgt i7 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%sx = sext i7 %x to i25
@@ -367,7 +367,7 @@ define i1 @different_size_sext_sext_sgt(i7 %x, i4 %y) {
define i1 @different_size_sext_sext_sle(i7 %x, i4 %y) {
; CHECK-LABEL: @different_size_sext_sext_sle(
; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[Y:%.*]] to i7
-; CHECK-NEXT: [[R:%.*]] = icmp sge i7 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sle i7 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%sx = sext i7 %x to i25
@@ -379,7 +379,7 @@ define i1 @different_size_sext_sext_sle(i7 %x, i4 %y) {
define i1 @different_size_sext_sext_eq(i7 %x, i4 %y) {
; CHECK-LABEL: @different_size_sext_sext_eq(
; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[Y:%.*]] to i7
-; CHECK-NEXT: [[R:%.*]] = icmp eq i7 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i7 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%sx = sext i7 %x to i25
@@ -391,7 +391,7 @@ define i1 @different_size_sext_sext_eq(i7 %x, i4 %y) {
define i1 @different_size_sext_sext_ule(i7 %x, i4 %y) {
; CHECK-LABEL: @different_size_sext_sext_ule(
; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[Y:%.*]] to i7
-; CHECK-NEXT: [[R:%.*]] = icmp uge i7 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i7 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%sx = sext i7 %x to i25
@@ -422,7 +422,7 @@ define i1 @different_size_sext_sext_ule_extra_use1(i7 %x, i4 %y) {
; CHECK-NEXT: [[SY:%.*]] = sext i4 [[Y:%.*]] to i25
; CHECK-NEXT: call void @use(i25 [[SY]])
; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[Y]] to i7
-; CHECK-NEXT: [[R:%.*]] = icmp uge i7 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i7 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%sx = sext i7 %x to i25
@@ -437,7 +437,7 @@ define i1 @different_size_sext_sext_ule_extra_use2(i7 %x, i4 %y) {
; CHECK-NEXT: [[SX:%.*]] = sext i7 [[X:%.*]] to i25
; CHECK-NEXT: call void @use(i25 [[SX]])
; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[Y:%.*]] to i7
-; CHECK-NEXT: [[R:%.*]] = icmp uge i7 [[TMP1]], [[X]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i7 [[X]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%sx = sext i7 %x to i25
diff --git a/llvm/test/Transforms/InstCombine/2010-11-23-Distributed.ll b/llvm/test/Transforms/InstCombine/2010-11-23-Distributed.ll
index 70fd7274f35d4..45564cd9d95f3 100644
--- a/llvm/test/Transforms/InstCombine/2010-11-23-Distributed.ll
+++ b/llvm/test/Transforms/InstCombine/2010-11-23-Distributed.ll
@@ -16,7 +16,7 @@ define i32 @foo(i32 %x, i32 %y) {
define i1 @bar(i64 %x, i64 %y) {
; CHECK-LABEL: @bar(
; CHECK-NEXT: [[Y1:%.*]] = xor i64 [[X:%.*]], -1
-; CHECK-NEXT: [[B:%.*]] = and i64 [[Y1]], [[Y:%.*]]
+; CHECK-NEXT: [[B:%.*]] = and i64 [[Y:%.*]], [[Y1]]
; CHECK-NEXT: [[R:%.*]] = icmp eq i64 [[B]], 0
; CHECK-NEXT: ret i1 [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/abs-1.ll b/llvm/test/Transforms/InstCombine/abs-1.ll
index 32bd7a37053ed..1569e9707b793 100644
--- a/llvm/test/Transforms/InstCombine/abs-1.ll
+++ b/llvm/test/Transforms/InstCombine/abs-1.ll
@@ -306,7 +306,7 @@ define i32 @nabs_canonical_9(i32 %a, i32 %b) {
; CHECK-LABEL: @nabs_canonical_9(
; CHECK-NEXT: [[T1:%.*]] = sub i32 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.abs.i32(i32 [[T1]], i1 false)
-; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], [[A]]
+; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[A]], [[TMP1]]
; CHECK-NEXT: [[ADD:%.*]] = sub i32 [[B]], [[TMP2]]
; CHECK-NEXT: ret i32 [[ADD]]
;
@@ -417,7 +417,7 @@ declare void @extra_use_i1(i1)
define i8 @shifty_abs_too_many_uses(i8 %x) {
; CHECK-LABEL: @shifty_abs_too_many_uses(
; CHECK-NEXT: [[SIGNBIT:%.*]] = ashr i8 [[X:%.*]], 7
-; CHECK-NEXT: [[ADD:%.*]] = add i8 [[SIGNBIT]], [[X]]
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[X]], [[SIGNBIT]]
; CHECK-NEXT: [[ABS:%.*]] = xor i8 [[ADD]], [[SIGNBIT]]
; CHECK-NEXT: call void @extra_use(i8 [[SIGNBIT]])
; CHECK-NEXT: ret i8 [[ABS]]
diff --git a/llvm/test/Transforms/InstCombine/add-mask-neg.ll b/llvm/test/Transforms/InstCombine/add-mask-neg.ll
index 0e579f3097607..b72f051a0b799 100644
--- a/llvm/test/Transforms/InstCombine/add-mask-neg.ll
+++ b/llvm/test/Transforms/InstCombine/add-mask-neg.ll
@@ -49,7 +49,7 @@ define i32 @dec_commute_mask_neg_i32(i32 %X) {
define i32 @dec_mask_neg_multiuse_i32(i32 %X) {
; CHECK-LABEL: @dec_mask_neg_multiuse_i32(
; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]]
-; CHECK-NEXT: [[MASK:%.*]] = and i32 [[NEG]], [[X]]
+; CHECK-NEXT: [[MASK:%.*]] = and i32 [[X]], [[NEG]]
; CHECK-NEXT: [[DEC:%.*]] = add i32 [[MASK]], -1
; CHECK-NEXT: call void @use(i32 [[NEG]])
; CHECK-NEXT: ret i32 [[DEC]]
@@ -64,7 +64,7 @@ define i32 @dec_mask_neg_multiuse_i32(i32 %X) {
define i32 @dec_mask_multiuse_neg_i32(i32 %X) {
; CHECK-LABEL: @dec_mask_multiuse_neg_i32(
; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]]
-; CHECK-NEXT: [[MASK:%.*]] = and i32 [[NEG]], [[X]]
+; CHECK-NEXT: [[MASK:%.*]] = and i32 [[X]], [[NEG]]
; CHECK-NEXT: [[DEC:%.*]] = add i32 [[MASK]], -1
; CHECK-NEXT: call void @use(i32 [[MASK]])
; CHECK-NEXT: ret i32 [[DEC]]
@@ -105,7 +105,7 @@ define <2 x i32> @dec_mask_neg_v2i32_poison(<2 x i32> %X) {
define <2 x i32> @dec_mask_multiuse_neg_multiuse_v2i32(<2 x i32> %X) {
; CHECK-LABEL: @dec_mask_multiuse_neg_multiuse_v2i32(
; CHECK-NEXT: [[NEG:%.*]] = sub <2 x i32> zeroinitializer, [[X:%.*]]
-; CHECK-NEXT: [[MASK:%.*]] = and <2 x i32> [[NEG]], [[X]]
+; CHECK-NEXT: [[MASK:%.*]] = and <2 x i32> [[X]], [[NEG]]
; CHECK-NEXT: [[DEC:%.*]] = add <2 x i32> [[MASK]], <i32 -1, i32 -1>
; CHECK-NEXT: call void @usev(<2 x i32> [[NEG]])
; CHECK-NEXT: call void @usev(<2 x i32> [[MASK]])
diff --git a/llvm/test/Transforms/InstCombine/add.ll b/llvm/test/Transforms/InstCombine/add.ll
index 25087fef68a11..5b6647fa0d496 100644
--- a/llvm/test/Transforms/InstCombine/add.ll
+++ b/llvm/test/Transforms/InstCombine/add.ll
@@ -122,7 +122,7 @@ define i32 @test5(i32 %A, i32 %B) {
define i32 @test5_both_nsw(i32 %A, i32 %B) {
; CHECK-LABEL: @test5_both_nsw(
-; CHECK-NEXT: [[D:%.*]] = sub nsw i32 [[B:%.*]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = sub i32 [[B:%.*]], [[A:%.*]]
; CHECK-NEXT: ret i32 [[D]]
;
%C = sub nsw i32 0, %A
@@ -222,7 +222,7 @@ define i32 @test9(i32 %A) {
define i1 @test10(i8 %a, i8 %b) {
; CHECK-LABEL: @test10(
; CHECK-NEXT: [[ADD:%.*]] = sub i8 0, [[B:%.*]]
-; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[ADD]], [[A:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[A:%.*]], [[ADD]]
; CHECK-NEXT: ret i1 [[C]]
;
%add = add i8 %a, %b
@@ -233,7 +233,7 @@ define i1 @test10(i8 %a, i8 %b) {
define <2 x i1> @test10vec(<2 x i8> %a, <2 x i8> %b) {
; CHECK-LABEL: @test10vec(
; CHECK-NEXT: [[C:%.*]] = sub <2 x i8> zeroinitializer, [[B:%.*]]
-; CHECK-NEXT: [[D:%.*]] = icmp ne <2 x i8> [[C]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = icmp ne <2 x i8> [[A:%.*]], [[C]]
; CHECK-NEXT: ret <2 x i1> [[D]]
;
%c = add <2 x i8> %a, %b
@@ -264,7 +264,7 @@ define <2 x i1> @test11vec(<2 x i8> %a) {
define i8 @reassoc_shl1(i8 %x, i8 %y) {
; CHECK-LABEL: @reassoc_shl1(
; CHECK-NEXT: [[REASS_ADD:%.*]] = shl i8 [[X:%.*]], 1
-; CHECK-NEXT: [[R:%.*]] = add i8 [[REASS_ADD]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = add i8 [[Y:%.*]], [[REASS_ADD]]
; CHECK-NEXT: ret i8 [[R]]
;
%a = add i8 %y, %x
@@ -275,7 +275,7 @@ define i8 @reassoc_shl1(i8 %x, i8 %y) {
define <2 x i8> @reassoc_shl1_commute1(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @reassoc_shl1_commute1(
; CHECK-NEXT: [[REASS_ADD:%.*]] = shl <2 x i8> [[X:%.*]], <i8 1, i8 1>
-; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[REASS_ADD]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[Y:%.*]], [[REASS_ADD]]
; CHECK-NEXT: ret <2 x i8> [[R]]
;
%a = add <2 x i8> %x, %y
@@ -1221,7 +1221,7 @@ define <2 x i32> @test44_vec_non_splat(<2 x i32> %A) {
define i32 @lshr_add(i1 %x, i1 %y) {
; CHECK-LABEL: @lshr_add(
; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[X:%.*]], true
-; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = zext i1 [[TMP2]] to i32
; CHECK-NEXT: ret i32 [[R]]
;
@@ -1235,7 +1235,7 @@ define i32 @lshr_add(i1 %x, i1 %y) {
define i5 @and_add(i1 %x, i1 %y) {
; CHECK-LABEL: @and_add(
; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[X:%.*]], true
-; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP2]], i5 -2, i5 0
; CHECK-NEXT: ret i5 [[R]]
;
@@ -1249,7 +1249,7 @@ define i5 @and_add(i1 %x, i1 %y) {
define <2 x i8> @ashr_add_commute(<2 x i1> %x, <2 x i1> %y) {
; CHECK-LABEL: @ashr_add_commute(
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i1> [[X:%.*]], <i1 true, i1 true>
-; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i1> [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i1> [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = sext <2 x i1> [[TMP2]] to <2 x i8>
; CHECK-NEXT: ret <2 x i8> [[TMP3]]
;
@@ -1548,7 +1548,7 @@ define i8 @add_and_xor_wrong_const(i8 %x, i8 %y) {
define i8 @add_and_xor_wrong_op(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @add_and_xor_wrong_op(
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[Z:%.*]], -1
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[XOR]], [[Y:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[Y:%.*]], [[XOR]]
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[AND]], [[X:%.*]]
; CHECK-NEXT: ret i8 [[ADD]]
;
@@ -1603,7 +1603,7 @@ define i8 @add_and_xor_extra_use(i8 noundef %x, i8 %y) {
; CHECK-LABEL: @add_and_xor_extra_use(
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1
; CHECK-NEXT: call void @use(i8 [[XOR]])
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[XOR]], [[Y:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[Y:%.*]], [[XOR]]
; CHECK-NEXT: call void @use(i8 [[AND]])
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[Y]], [[X]]
; CHECK-NEXT: ret i8 [[ADD]]
@@ -1848,7 +1848,7 @@ define i32 @add_add_add_commute1(i32 %A, i32 %B, i32 %C, i32 %D) {
define i32 @add_add_add_commute2(i32 %A, i32 %B, i32 %C, i32 %D) {
; CHECK-LABEL: @add_add_add_commute2(
; CHECK-NEXT: [[E:%.*]] = add i32 [[B:%.*]], [[A:%.*]]
-; CHECK-NEXT: [[F:%.*]] = add i32 [[E]], [[C:%.*]]
+; CHECK-NEXT: [[F:%.*]] = add i32 [[C:%.*]], [[E]]
; CHECK-NEXT: [[G:%.*]] = add i32 [[F]], [[D:%.*]]
; CHECK-NEXT: ret i32 [[G]]
;
@@ -1861,8 +1861,8 @@ define i32 @add_add_add_commute2(i32 %A, i32 %B, i32 %C, i32 %D) {
define i32 @add_add_add_commute3(i32 %A, i32 %B, i32 %C, i32 %D) {
; CHECK-LABEL: @add_add_add_commute3(
; CHECK-NEXT: [[E:%.*]] = add i32 [[B:%.*]], [[A:%.*]]
-; CHECK-NEXT: [[F:%.*]] = add i32 [[E]], [[C:%.*]]
-; CHECK-NEXT: [[G:%.*]] = add i32 [[F]], [[D:%.*]]
+; CHECK-NEXT: [[F:%.*]] = add i32 [[C:%.*]], [[E]]
+; CHECK-NEXT: [[G:%.*]] = add i32 [[D:%.*]], [[F]]
; CHECK-NEXT: ret i32 [[G]]
;
%E = add i32 %B, %A
@@ -1876,7 +1876,7 @@ define i32 @add_add_add_commute3(i32 %A, i32 %B, i32 %C, i32 %D) {
define i8 @mul_add_common_factor_commute1(i8 %x, i8 %y) {
; CHECK-LABEL: @mul_add_common_factor_commute1(
; CHECK-NEXT: [[X1:%.*]] = add i8 [[Y:%.*]], 1
-; CHECK-NEXT: [[A:%.*]] = mul i8 [[X1]], [[X:%.*]]
+; CHECK-NEXT: [[A:%.*]] = mul i8 [[X:%.*]], [[X1]]
; CHECK-NEXT: ret i8 [[A]]
;
%m = mul nsw i8 %x, %y
@@ -1970,7 +1970,7 @@ define i8 @not_mul_wrong_op(i8 %x, i8 %y) {
; CHECK-LABEL: @not_mul_wrong_op(
; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[X:%.*]], 42
; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[MUL]], -1
-; CHECK-NEXT: [[PLUSX:%.*]] = add i8 [[NOT]], [[Y:%.*]]
+; CHECK-NEXT: [[PLUSX:%.*]] = add i8 [[Y:%.*]], [[NOT]]
; CHECK-NEXT: ret i8 [[PLUSX]]
;
%mul = mul i8 %x, 42
@@ -1986,7 +1986,7 @@ define i8 @not_mul_use1(i8 %x) {
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i8 [[X:%.*]], 42
; CHECK-NEXT: call void @use(i8 [[MUL]])
; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[MUL]], -1
-; CHECK-NEXT: [[PLUSX:%.*]] = add nsw i8 [[NOT]], [[X]]
+; CHECK-NEXT: [[PLUSX:%.*]] = add nsw i8 [[X]], [[NOT]]
; CHECK-NEXT: ret i8 [[PLUSX]]
;
%mul = mul nsw i8 %x, 42
@@ -2003,7 +2003,7 @@ define i8 @not_mul_use2(i8 %x) {
; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[X:%.*]], 42
; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[MUL]], -1
; CHECK-NEXT: call void @use(i8 [[NOT]])
-; CHECK-NEXT: [[PLUSX:%.*]] = add i8 [[NOT]], [[X]]
+; CHECK-NEXT: [[PLUSX:%.*]] = add i8 [[X]], [[NOT]]
; CHECK-NEXT: ret i8 [[PLUSX]]
;
%mul = mul i8 %x, 42
@@ -3287,7 +3287,7 @@ define i32 @add_reduce_sqr_sum_flipped(i32 %a, i32 %b) {
define i32 @add_reduce_sqr_sum_flipped2(i32 %a, i32 %bx) {
; CHECK-LABEL: @add_reduce_sqr_sum_flipped2(
; CHECK-NEXT: [[B:%.*]] = xor i32 [[BX:%.*]], 42
-; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[B]], [[A:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B]]
; CHECK-NEXT: [[ADD:%.*]] = mul i32 [[TMP1]], [[TMP1]]
; CHECK-NEXT: ret i32 [[ADD]]
;
@@ -3347,7 +3347,7 @@ define i32 @add_reduce_sqr_sum_order2_flipped(i32 %a, i32 %b) {
define i32 @add_reduce_sqr_sum_order2_flipped2(i32 %a, i32 %bx) {
; CHECK-LABEL: @add_reduce_sqr_sum_order2_flipped2(
; CHECK-NEXT: [[B:%.*]] = xor i32 [[BX:%.*]], 42
-; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[B]], [[A:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B]]
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
; CHECK-NEXT: ret i32 [[AB2]]
;
@@ -3364,7 +3364,7 @@ define i32 @add_reduce_sqr_sum_order2_flipped2(i32 %a, i32 %bx) {
define i32 @add_reduce_sqr_sum_order2_flipped3(i32 %a, i32 %bx) {
; CHECK-LABEL: @add_reduce_sqr_sum_order2_flipped3(
; CHECK-NEXT: [[B:%.*]] = xor i32 [[BX:%.*]], 42
-; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[B]], [[A:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B]]
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
; CHECK-NEXT: ret i32 [[AB2]]
;
@@ -3561,7 +3561,7 @@ define i32 @add_reduce_sqr_sum_order5_flipped2(i32 %a, i32 %b) {
define i32 @add_reduce_sqr_sum_order5_flipped3(i32 %ax, i32 %b) {
; CHECK-LABEL: @add_reduce_sqr_sum_order5_flipped3(
; CHECK-NEXT: [[A:%.*]] = xor i32 [[AX:%.*]], 42
-; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A]], [[B:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[B:%.*]], [[A]]
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
; CHECK-NEXT: ret i32 [[AB2]]
;
@@ -3936,7 +3936,7 @@ define i32 @add_reduce_sqr_sum_varB_invalid3(i32 %a, i32 %b) {
; CHECK-NEXT: [[A_B:%.*]] = mul nsw i32 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TWOAB:%.*]] = shl i32 [[A_B]], 1
; CHECK-NEXT: [[B_SQ1:%.*]] = add i32 [[A]], [[B]]
-; CHECK-NEXT: [[A2_B2:%.*]] = mul i32 [[B_SQ1]], [[B]]
+; CHECK-NEXT: [[A2_B2:%.*]] = mul i32 [[B]], [[B_SQ1]]
; CHECK-NEXT: [[AB2:%.*]] = add i32 [[TWOAB]], [[A2_B2]]
; CHECK-NEXT: ret i32 [[AB2]]
;
@@ -3954,7 +3954,7 @@ define i32 @add_reduce_sqr_sum_varB_invalid4(i32 %a, i32 %b) {
; CHECK-NEXT: [[A_B:%.*]] = mul nsw i32 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[TWOAB:%.*]] = shl i32 [[A_B]], 1
; CHECK-NEXT: [[NOT_B_SQ1:%.*]] = add i32 [[A]], [[B]]
-; CHECK-NEXT: [[A2_B2:%.*]] = mul i32 [[NOT_B_SQ1]], [[A]]
+; CHECK-NEXT: [[A2_B2:%.*]] = mul i32 [[A]], [[NOT_B_SQ1]]
; CHECK-NEXT: [[AB2:%.*]] = add i32 [[TWOAB]], [[A2_B2]]
; CHECK-NEXT: ret i32 [[AB2]]
;
diff --git a/llvm/test/Transforms/InstCombine/add2.ll b/llvm/test/Transforms/InstCombine/add2.ll
index 9ebcdac77179e..ae80ab2e92ad1 100644
--- a/llvm/test/Transforms/InstCombine/add2.ll
+++ b/llvm/test/Transforms/InstCombine/add2.ll
@@ -452,7 +452,7 @@ define i8 @add_of_mul(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @add_of_mul(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[MB1:%.*]] = add i8 [[Y:%.*]], [[Z:%.*]]
-; CHECK-NEXT: [[SUM:%.*]] = mul i8 [[MB1]], [[X:%.*]]
+; CHECK-NEXT: [[SUM:%.*]] = mul i8 [[X:%.*]], [[MB1]]
; CHECK-NEXT: ret i8 [[SUM]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/add_or_sub.ll b/llvm/test/Transforms/InstCombine/add_or_sub.ll
index 5f1234618b9a6..ef44f036b71fa 100644
--- a/llvm/test/Transforms/InstCombine/add_or_sub.ll
+++ b/llvm/test/Transforms/InstCombine/add_or_sub.ll
@@ -103,7 +103,7 @@ define i12 @add_or_sub_comb_i12_multiuse_only_sub(i12 %p) {
define i8 @add_or_sub_comb_i8_negative_y_sub(i8 %x, i8 %y) {
; CHECK-LABEL: @add_or_sub_comb_i8_negative_y_sub(
; CHECK-NEXT: [[SUB:%.*]] = sub i8 0, [[Y:%.*]]
-; CHECK-NEXT: [[OR:%.*]] = or i8 [[SUB]], [[X:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i8 [[X:%.*]], [[SUB]]
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[OR]], [[X]]
; CHECK-NEXT: ret i8 [[ADD]]
;
@@ -116,7 +116,7 @@ define i8 @add_or_sub_comb_i8_negative_y_sub(i8 %x, i8 %y) {
define i8 @add_or_sub_comb_i8_negative_y_or(i8 %x, i8 %y) {
; CHECK-LABEL: @add_or_sub_comb_i8_negative_y_or(
; CHECK-NEXT: [[SUB:%.*]] = sub i8 0, [[X:%.*]]
-; CHECK-NEXT: [[OR:%.*]] = or i8 [[SUB]], [[Y:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i8 [[Y:%.*]], [[SUB]]
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[OR]], [[X]]
; CHECK-NEXT: ret i8 [[ADD]]
;
@@ -129,7 +129,7 @@ define i8 @add_or_sub_comb_i8_negative_y_or(i8 %x, i8 %y) {
define i8 @add_or_sub_comb_i8_negative_y_add(i8 %x, i8 %y) {
; CHECK-LABEL: @add_or_sub_comb_i8_negative_y_add(
; CHECK-NEXT: [[SUB:%.*]] = sub i8 0, [[X:%.*]]
-; CHECK-NEXT: [[OR:%.*]] = or i8 [[SUB]], [[X]]
+; CHECK-NEXT: [[OR:%.*]] = or i8 [[X]], [[SUB]]
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[OR]], [[Y:%.*]]
; CHECK-NEXT: ret i8 [[ADD]]
;
@@ -142,7 +142,7 @@ define i8 @add_or_sub_comb_i8_negative_y_add(i8 %x, i8 %y) {
define i8 @add_or_sub_comb_i8_negative_xor_instead_or(i8 %x) {
; CHECK-LABEL: @add_or_sub_comb_i8_negative_xor_instead_or(
; CHECK-NEXT: [[SUB:%.*]] = sub i8 0, [[X:%.*]]
-; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[SUB]], [[X]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X]], [[SUB]]
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[XOR]], [[X]]
; CHECK-NEXT: ret i8 [[ADD]]
;
diff --git a/llvm/test/Transforms/InstCombine/and-or-icmp-const-icmp.ll b/llvm/test/Transforms/InstCombine/and-or-icmp-const-icmp.ll
index 9143a2d0ccda2..de5de37fe2df6 100644
--- a/llvm/test/Transforms/InstCombine/and-or-icmp-const-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/and-or-icmp-const-icmp.ll
@@ -13,8 +13,8 @@
; Basic tests
; ==============================================================================
define i1 @eq_basic(i8 %x, i8 %y) {
-; CHECK-LABEL: define i1 @eq_basic
-; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]]) {
+; CHECK-LABEL: define i1 @eq_basic(
+; CHECK-SAME: i8 [[X:%.*]], i8 [[Y:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], -1
; CHECK-NEXT: [[OR:%.*]] = icmp uge i8 [[TMP1]], [[Y]]
; CHECK-NEXT: ret i1 [[OR]]
@@ -26,8 +26,8 @@ define i1 @eq_basic(i8 %x, i8 %y) {
}
define i1 @ne_basic_equal_5(i8 %x, i8 %y) {
-; CHECK-LABEL: define i1 @ne_basic_equal_5
-; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]]) {
+; CHECK-LABEL: define i1 @ne_basic_equal_5(
+; CHECK-SAME: i8 [[X:%.*]], i8 [[Y:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], -6
; CHECK-NEXT: [[AND:%.*]] = icmp ult i8 [[TMP1]], [[Y]]
; CHECK-NEXT: ret i1 [[AND]]
@@ -40,8 +40,8 @@ define i1 @ne_basic_equal_5(i8 %x, i8 %y) {
}
define i1 @eq_basic_equal_minus_1(i8 %x, i8 %y) {
-; CHECK-LABEL: define i1 @eq_basic_equal_minus_1
-; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]]) {
+; CHECK-LABEL: define i1 @eq_basic_equal_minus_1(
+; CHECK-SAME: i8 [[X:%.*]], i8 [[Y:%.*]]) {
; CHECK-NEXT: [[OR:%.*]] = icmp uge i8 [[X]], [[Y]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -53,8 +53,8 @@ define i1 @eq_basic_equal_minus_1(i8 %x, i8 %y) {
}
define i1 @ne_basic_equal_minus_7(i8 %x, i8 %y) {
-; CHECK-LABEL: define i1 @ne_basic_equal_minus_7
-; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]]) {
+; CHECK-LABEL: define i1 @ne_basic_equal_minus_7(
+; CHECK-SAME: i8 [[X:%.*]], i8 [[Y:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], 6
; CHECK-NEXT: [[AND:%.*]] = icmp ult i8 [[TMP1]], [[Y]]
; CHECK-NEXT: ret i1 [[AND]]
@@ -67,8 +67,8 @@ define i1 @ne_basic_equal_minus_7(i8 %x, i8 %y) {
}
define i1 @eq_basic_unequal(i8 %x, i8 %y) {
-; CHECK-LABEL: define i1 @eq_basic_unequal
-; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]]) {
+; CHECK-LABEL: define i1 @eq_basic_unequal(
+; CHECK-SAME: i8 [[X:%.*]], i8 [[Y:%.*]]) {
; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X]], -5
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[X]], 6
; CHECK-NEXT: [[C2:%.*]] = icmp ugt i8 [[SUB]], [[Y]]
@@ -83,8 +83,8 @@ define i1 @eq_basic_unequal(i8 %x, i8 %y) {
}
define i1 @ne_basic_unequal(i8 %x, i8 %y) {
-; CHECK-LABEL: define i1 @ne_basic_unequal
-; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]]) {
+; CHECK-LABEL: define i1 @ne_basic_unequal(
+; CHECK-SAME: i8 [[X:%.*]], i8 [[Y:%.*]]) {
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[X]], 7
; CHECK-NEXT: [[C1:%.*]] = icmp ne i8 [[X]], -4
; CHECK-NEXT: [[C2:%.*]] = icmp ule i8 [[ADD]], [[Y]]
@@ -102,8 +102,8 @@ define i1 @ne_basic_unequal(i8 %x, i8 %y) {
; Tests with multiple uses
; ==============================================================================
define i1 @eq_multi_c1(i8 %x, i8 %y) {
-; CHECK-LABEL: define i1 @eq_multi_c1
-; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]]) {
+; CHECK-LABEL: define i1 @eq_multi_c1(
+; CHECK-SAME: i8 [[X:%.*]], i8 [[Y:%.*]]) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[X]], 0
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], -1
; CHECK-NEXT: [[OR:%.*]] = icmp uge i8 [[TMP1]], [[Y]]
@@ -118,8 +118,8 @@ define i1 @eq_multi_c1(i8 %x, i8 %y) {
}
define i1 @ne_multi_c2(i8 %x, i8 %y) {
-; CHECK-LABEL: define i1 @ne_multi_c2
-; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]]) {
+; CHECK-LABEL: define i1 @ne_multi_c2(
+; CHECK-SAME: i8 [[X:%.*]], i8 [[Y:%.*]]) {
; CHECK-NEXT: [[C2:%.*]] = icmp ule i8 [[X]], [[Y]]
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], -1
; CHECK-NEXT: [[AND:%.*]] = icmp ult i8 [[TMP1]], [[Y]]
@@ -137,8 +137,8 @@ define i1 @ne_multi_c2(i8 %x, i8 %y) {
; Tests with vector types
; ==============================================================================
define <2 x i1> @eq_vector(<2 x i8> %x, <2 x i8> %y) {
-; CHECK-LABEL: define <2 x i1> @eq_vector
-; CHECK-SAME: (<2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
+; CHECK-LABEL: define <2 x i1> @eq_vector(
+; CHECK-SAME: <2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[X]], <i8 -1, i8 -1>
; CHECK-NEXT: [[OR:%.*]] = icmp uge <2 x i8> [[TMP1]], [[Y]]
; CHECK-NEXT: ret <2 x i1> [[OR]]
@@ -150,8 +150,8 @@ define <2 x i1> @eq_vector(<2 x i8> %x, <2 x i8> %y) {
}
define <2 x i1> @ne_vector_equal_5(<2 x i8> %x, <2 x i8> %y) {
-; CHECK-LABEL: define <2 x i1> @ne_vector_equal_5
-; CHECK-SAME: (<2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
+; CHECK-LABEL: define <2 x i1> @ne_vector_equal_5(
+; CHECK-SAME: <2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[X]], <i8 -6, i8 -6>
; CHECK-NEXT: [[AND:%.*]] = icmp ult <2 x i8> [[TMP1]], [[Y]]
; CHECK-NEXT: ret <2 x i1> [[AND]]
@@ -164,8 +164,8 @@ define <2 x i1> @ne_vector_equal_5(<2 x i8> %x, <2 x i8> %y) {
}
define <2 x i1> @eq_vector_equal_minus_1(<2 x i8> %x, <2 x i8> %y) {
-; CHECK-LABEL: define <2 x i1> @eq_vector_equal_minus_1
-; CHECK-SAME: (<2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
+; CHECK-LABEL: define <2 x i1> @eq_vector_equal_minus_1(
+; CHECK-SAME: <2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
; CHECK-NEXT: [[OR:%.*]] = icmp uge <2 x i8> [[X]], [[Y]]
; CHECK-NEXT: ret <2 x i1> [[OR]]
;
@@ -177,8 +177,8 @@ define <2 x i1> @eq_vector_equal_minus_1(<2 x i8> %x, <2 x i8> %y) {
}
define <2 x i1> @ne_vector_equal_minus_7(<2 x i8> %x, <2 x i8> %y) {
-; CHECK-LABEL: define <2 x i1> @ne_vector_equal_minus_7
-; CHECK-SAME: (<2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
+; CHECK-LABEL: define <2 x i1> @ne_vector_equal_minus_7(
+; CHECK-SAME: <2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[X]], <i8 6, i8 6>
; CHECK-NEXT: [[AND:%.*]] = icmp ult <2 x i8> [[TMP1]], [[Y]]
; CHECK-NEXT: ret <2 x i1> [[AND]]
@@ -191,8 +191,8 @@ define <2 x i1> @ne_vector_equal_minus_7(<2 x i8> %x, <2 x i8> %y) {
}
define <2 x i1> @eq_vector_unequal1(<2 x i8> %x, <2 x i8> %y) {
-; CHECK-LABEL: define <2 x i1> @eq_vector_unequal1
-; CHECK-SAME: (<2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
+; CHECK-LABEL: define <2 x i1> @eq_vector_unequal1(
+; CHECK-SAME: <2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
; CHECK-NEXT: [[SUB:%.*]] = add <2 x i8> [[X]], <i8 -5, i8 -5>
; CHECK-NEXT: [[C1:%.*]] = icmp eq <2 x i8> [[X]], <i8 2, i8 2>
; CHECK-NEXT: [[C2:%.*]] = icmp ugt <2 x i8> [[SUB]], [[Y]]
@@ -207,8 +207,8 @@ define <2 x i1> @eq_vector_unequal1(<2 x i8> %x, <2 x i8> %y) {
}
define <2 x i1> @ne_vector_unequal2(<2 x i8> %x, <2 x i8> %y) {
-; CHECK-LABEL: define <2 x i1> @ne_vector_unequal2
-; CHECK-SAME: (<2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
+; CHECK-LABEL: define <2 x i1> @ne_vector_unequal2(
+; CHECK-SAME: <2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
; CHECK-NEXT: [[ADD:%.*]] = add <2 x i8> [[X]], <i8 7, i8 7>
; CHECK-NEXT: [[C1:%.*]] = icmp ne <2 x i8> [[X]], <i8 -3, i8 -3>
; CHECK-NEXT: [[C2:%.*]] = icmp ule <2 x i8> [[ADD]], [[Y]]
@@ -226,8 +226,8 @@ define <2 x i1> @ne_vector_unequal2(<2 x i8> %x, <2 x i8> %y) {
; Tests with poison
; ==============================================================================
define <2 x i1> @eq_vector_poison_icmp(<2 x i8> %x, <2 x i8> %y) {
-; CHECK-LABEL: define <2 x i1> @eq_vector_poison_icmp
-; CHECK-SAME: (<2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
+; CHECK-LABEL: define <2 x i1> @eq_vector_poison_icmp(
+; CHECK-SAME: <2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[X]], <i8 -6, i8 -6>
; CHECK-NEXT: [[OR:%.*]] = icmp uge <2 x i8> [[TMP1]], [[Y]]
; CHECK-NEXT: ret <2 x i1> [[OR]]
@@ -240,8 +240,8 @@ define <2 x i1> @eq_vector_poison_icmp(<2 x i8> %x, <2 x i8> %y) {
}
define <2 x i1> @eq_vector_poison_add(<2 x i8> %x, <2 x i8> %y) {
-; CHECK-LABEL: define <2 x i1> @eq_vector_poison_add
-; CHECK-SAME: (<2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
+; CHECK-LABEL: define <2 x i1> @eq_vector_poison_add(
+; CHECK-SAME: <2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[X]], <i8 -6, i8 -6>
; CHECK-NEXT: [[OR:%.*]] = icmp uge <2 x i8> [[TMP1]], [[Y]]
; CHECK-NEXT: ret <2 x i1> [[OR]]
@@ -257,8 +257,8 @@ define <2 x i1> @eq_vector_poison_add(<2 x i8> %x, <2 x i8> %y) {
; Tests with values commuted
; ==============================================================================
define i1 @eq_commuted(i8 %x, i8 %py) {
-; CHECK-LABEL: define i1 @eq_commuted
-; CHECK-SAME: (i8 [[X:%.*]], i8 [[PY:%.*]]) {
+; CHECK-LABEL: define i1 @eq_commuted(
+; CHECK-SAME: i8 [[X:%.*]], i8 [[PY:%.*]]) {
; CHECK-NEXT: [[Y:%.*]] = sdiv i8 43, [[PY]]
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], -1
; CHECK-NEXT: [[OR:%.*]] = icmp uge i8 [[TMP1]], [[Y]]
@@ -272,10 +272,10 @@ define i1 @eq_commuted(i8 %x, i8 %py) {
}
define i1 @ne_commuted_equal_minus_1(i8 %x, i8 %py) {
-; CHECK-LABEL: define i1 @ne_commuted_equal_minus_1
-; CHECK-SAME: (i8 [[X:%.*]], i8 [[PY:%.*]]) {
+; CHECK-LABEL: define i1 @ne_commuted_equal_minus_1(
+; CHECK-SAME: i8 [[X:%.*]], i8 [[PY:%.*]]) {
; CHECK-NEXT: [[Y:%.*]] = sdiv i8 42, [[PY]]
-; CHECK-NEXT: [[AND:%.*]] = icmp ugt i8 [[Y]], [[X]]
+; CHECK-NEXT: [[AND:%.*]] = icmp ult i8 [[X]], [[Y]]
; CHECK-NEXT: ret i1 [[AND]]
;
%y = sdiv i8 42, %py ; thwart complexity-based canonicalization
diff --git a/llvm/test/Transforms/InstCombine/and-or-icmps.ll b/llvm/test/Transforms/InstCombine/and-or-icmps.ll
index c20f48a985b3e..e73b98462327c 100644
--- a/llvm/test/Transforms/InstCombine/and-or-icmps.ll
+++ b/llvm/test/Transforms/InstCombine/and-or-icmps.ll
@@ -1320,7 +1320,7 @@ define i1 @bitwise_and_bitwise_and_icmps(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = and i1 [[C1]], [[TMP3]]
; CHECK-NEXT: ret i1 [[AND2]]
@@ -1341,7 +1341,7 @@ define i1 @bitwise_and_bitwise_and_icmps_comm1(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = and i1 [[C1]], [[TMP3]]
; CHECK-NEXT: ret i1 [[AND2]]
@@ -1362,7 +1362,7 @@ define i1 @bitwise_and_bitwise_and_icmps_comm2(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = and i1 [[TMP3]], [[C1]]
; CHECK-NEXT: ret i1 [[AND2]]
@@ -1383,7 +1383,7 @@ define i1 @bitwise_and_bitwise_and_icmps_comm3(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = and i1 [[TMP3]], [[C1]]
; CHECK-NEXT: ret i1 [[AND2]]
@@ -1404,7 +1404,7 @@ define i1 @bitwise_and_logical_and_icmps(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = select i1 [[C1]], i1 [[TMP3]], i1 false
; CHECK-NEXT: ret i1 [[AND2]]
@@ -1425,7 +1425,7 @@ define i1 @bitwise_and_logical_and_icmps_comm1(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = select i1 [[C1]], i1 [[TMP3]], i1 false
; CHECK-NEXT: ret i1 [[AND2]]
@@ -1447,7 +1447,7 @@ define i1 @bitwise_and_logical_and_icmps_comm2(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = freeze i8 [[Z_SHIFT]]
; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[TMP1]], 1
-; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[TMP2]], [[X:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[X:%.*]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i8 [[TMP3]], [[TMP2]]
; CHECK-NEXT: [[AND2:%.*]] = select i1 [[TMP4]], i1 [[C1]], i1 false
; CHECK-NEXT: ret i1 [[AND2]]
@@ -1468,7 +1468,7 @@ define i1 @bitwise_and_logical_and_icmps_comm3(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = select i1 [[TMP3]], i1 [[C1]], i1 false
; CHECK-NEXT: ret i1 [[AND2]]
@@ -1489,7 +1489,7 @@ define i1 @logical_and_bitwise_and_icmps(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
-; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]]
+; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]]
; CHECK-NEXT: [[C2:%.*]] = icmp ne i8 [[X_M1]], 0
; CHECK-NEXT: [[C3:%.*]] = icmp ne i8 [[X_M2]], 0
; CHECK-NEXT: [[AND1:%.*]] = and i1 [[C1]], [[C2]]
@@ -1512,7 +1512,7 @@ define i1 @logical_and_bitwise_and_icmps_comm1(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
-; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]]
+; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]]
; CHECK-NEXT: [[C2:%.*]] = icmp ne i8 [[X_M1]], 0
; CHECK-NEXT: [[C3:%.*]] = icmp ne i8 [[X_M2]], 0
; CHECK-NEXT: [[AND1:%.*]] = and i1 [[C1]], [[C2]]
@@ -1535,7 +1535,7 @@ define i1 @logical_and_bitwise_and_icmps_comm2(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
-; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]]
+; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]]
; CHECK-NEXT: [[C2:%.*]] = icmp ne i8 [[X_M1]], 0
; CHECK-NEXT: [[C3:%.*]] = icmp ne i8 [[X_M2]], 0
; CHECK-NEXT: [[AND1:%.*]] = and i1 [[C2]], [[C1]]
@@ -1558,7 +1558,7 @@ define i1 @logical_and_bitwise_and_icmps_comm3(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
-; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]]
+; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]]
; CHECK-NEXT: [[C2:%.*]] = icmp ne i8 [[X_M1]], 0
; CHECK-NEXT: [[C3:%.*]] = icmp ne i8 [[X_M2]], 0
; CHECK-NEXT: [[AND1:%.*]] = and i1 [[C2]], [[C1]]
@@ -1581,7 +1581,7 @@ define i1 @logical_and_logical_and_icmps(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
-; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]]
+; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]]
; CHECK-NEXT: [[C2:%.*]] = icmp ne i8 [[X_M1]], 0
; CHECK-NEXT: [[C3:%.*]] = icmp ne i8 [[X_M2]], 0
; CHECK-NEXT: [[AND1:%.*]] = select i1 [[C1]], i1 [[C2]], i1 false
@@ -1604,7 +1604,7 @@ define i1 @logical_and_logical_and_icmps_comm1(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
-; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]]
+; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]]
; CHECK-NEXT: [[C2:%.*]] = icmp ne i8 [[X_M1]], 0
; CHECK-NEXT: [[C3:%.*]] = icmp ne i8 [[X_M2]], 0
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[C3]], i1 [[C1]], i1 false
@@ -1627,7 +1627,7 @@ define i1 @logical_and_logical_and_icmps_comm2(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
-; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]]
+; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]]
; CHECK-NEXT: [[C2:%.*]] = icmp ne i8 [[X_M1]], 0
; CHECK-NEXT: [[C3:%.*]] = icmp ne i8 [[X_M2]], 0
; CHECK-NEXT: [[AND1:%.*]] = select i1 [[C2]], i1 [[C1]], i1 false
@@ -1650,7 +1650,7 @@ define i1 @logical_and_logical_and_icmps_comm3(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = select i1 [[TMP3]], i1 [[C1]], i1 false
; CHECK-NEXT: ret i1 [[AND2]]
@@ -1671,7 +1671,7 @@ define i1 @bitwise_or_bitwise_or_icmps(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i8 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = or i1 [[C1]], [[TMP3]]
; CHECK-NEXT: ret i1 [[OR2]]
@@ -1692,7 +1692,7 @@ define i1 @bitwise_or_bitwise_or_icmps_comm1(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i8 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = or i1 [[C1]], [[TMP3]]
; CHECK-NEXT: ret i1 [[OR2]]
@@ -1713,7 +1713,7 @@ define i1 @bitwise_or_bitwise_or_icmps_comm2(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i8 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = or i1 [[TMP3]], [[C1]]
; CHECK-NEXT: ret i1 [[OR2]]
@@ -1734,7 +1734,7 @@ define i1 @bitwise_or_bitwise_or_icmps_comm3(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i8 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = or i1 [[TMP3]], [[C1]]
; CHECK-NEXT: ret i1 [[OR2]]
@@ -1755,7 +1755,7 @@ define i1 @bitwise_or_logical_or_icmps(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i8 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = select i1 [[C1]], i1 true, i1 [[TMP3]]
; CHECK-NEXT: ret i1 [[OR2]]
@@ -1776,7 +1776,7 @@ define i1 @bitwise_or_logical_or_icmps_comm1(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i8 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = select i1 [[C1]], i1 true, i1 [[TMP3]]
; CHECK-NEXT: ret i1 [[OR2]]
@@ -1798,7 +1798,7 @@ define i1 @bitwise_or_logical_or_icmps_comm2(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = freeze i8 [[Z_SHIFT]]
; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[TMP1]], 1
-; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[TMP2]], [[X:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[X:%.*]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i8 [[TMP3]], [[TMP2]]
; CHECK-NEXT: [[OR2:%.*]] = select i1 [[TMP4]], i1 true, i1 [[C1]]
; CHECK-NEXT: ret i1 [[OR2]]
@@ -1819,7 +1819,7 @@ define i1 @bitwise_or_logical_or_icmps_comm3(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i8 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = select i1 [[TMP3]], i1 true, i1 [[C1]]
; CHECK-NEXT: ret i1 [[OR2]]
@@ -1840,7 +1840,7 @@ define i1 @logical_or_bitwise_or_icmps(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
-; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]]
+; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]]
; CHECK-NEXT: [[C2:%.*]] = icmp eq i8 [[X_M1]], 0
; CHECK-NEXT: [[C3:%.*]] = icmp eq i8 [[X_M2]], 0
; CHECK-NEXT: [[OR1:%.*]] = or i1 [[C1]], [[C2]]
@@ -1863,7 +1863,7 @@ define i1 @logical_or_bitwise_or_icmps_comm1(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
-; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]]
+; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]]
; CHECK-NEXT: [[C2:%.*]] = icmp eq i8 [[X_M1]], 0
; CHECK-NEXT: [[C3:%.*]] = icmp eq i8 [[X_M2]], 0
; CHECK-NEXT: [[OR1:%.*]] = or i1 [[C1]], [[C2]]
@@ -1886,7 +1886,7 @@ define i1 @logical_or_bitwise_or_icmps_comm2(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
-; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]]
+; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]]
; CHECK-NEXT: [[C2:%.*]] = icmp eq i8 [[X_M1]], 0
; CHECK-NEXT: [[C3:%.*]] = icmp eq i8 [[X_M2]], 0
; CHECK-NEXT: [[OR1:%.*]] = or i1 [[C2]], [[C1]]
@@ -1909,7 +1909,7 @@ define i1 @logical_or_bitwise_or_icmps_comm3(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
-; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]]
+; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]]
; CHECK-NEXT: [[C2:%.*]] = icmp eq i8 [[X_M1]], 0
; CHECK-NEXT: [[C3:%.*]] = icmp eq i8 [[X_M2]], 0
; CHECK-NEXT: [[OR1:%.*]] = or i1 [[C2]], [[C1]]
@@ -1932,7 +1932,7 @@ define i1 @logical_or_logical_or_icmps(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
-; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]]
+; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]]
; CHECK-NEXT: [[C2:%.*]] = icmp eq i8 [[X_M1]], 0
; CHECK-NEXT: [[C3:%.*]] = icmp eq i8 [[X_M2]], 0
; CHECK-NEXT: [[OR1:%.*]] = select i1 [[C1]], i1 true, i1 [[C2]]
@@ -1955,7 +1955,7 @@ define i1 @logical_or_logical_or_icmps_comm1(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
-; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]]
+; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]]
; CHECK-NEXT: [[C2:%.*]] = icmp eq i8 [[X_M1]], 0
; CHECK-NEXT: [[C3:%.*]] = icmp eq i8 [[X_M2]], 0
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[C3]], i1 true, i1 [[C1]]
@@ -1978,7 +1978,7 @@ define i1 @logical_or_logical_or_icmps_comm2(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
-; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]]
+; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]]
; CHECK-NEXT: [[C2:%.*]] = icmp eq i8 [[X_M1]], 0
; CHECK-NEXT: [[C3:%.*]] = icmp eq i8 [[X_M2]], 0
; CHECK-NEXT: [[OR1:%.*]] = select i1 [[C2]], i1 true, i1 [[C1]]
@@ -2001,7 +2001,7 @@ define i1 @logical_or_logical_or_icmps_comm3(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42
; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i8 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = select i1 [[TMP3]], i1 true, i1 [[C1]]
; CHECK-NEXT: ret i1 [[OR2]]
@@ -2052,7 +2052,7 @@ define i1 @bitwise_and_logical_and_masked_icmp_allzeros(i1 %c, i32 %x) {
define i1 @bitwise_and_logical_and_masked_icmp_allzeros_poison1(i1 %c, i32 %x, i32 %y) {
; CHECK-LABEL: @bitwise_and_logical_and_masked_icmp_allzeros_poison1(
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[Y:%.*]], 7
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[TMP2]], 0
; CHECK-NEXT: [[AND2:%.*]] = select i1 [[TMP3]], i1 [[C:%.*]], i1 false
; CHECK-NEXT: ret i1 [[AND2]]
@@ -2104,7 +2104,7 @@ define i1 @bitwise_and_logical_and_masked_icmp_allones(i1 %c, i32 %x) {
define i1 @bitwise_and_logical_and_masked_icmp_allones_poison1(i1 %c, i32 %x, i32 %y) {
; CHECK-LABEL: @bitwise_and_logical_and_masked_icmp_allones_poison1(
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[Y:%.*]], 7
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = select i1 [[TMP3]], i1 [[C:%.*]], i1 false
; CHECK-NEXT: ret i1 [[AND2]]
@@ -3057,9 +3057,9 @@ define i32 @icmp_slt_0_or_icmp_add_1_sge_100_i32_fail(i32 %x) {
define i1 @logical_and_icmps1(i32 %a, i1 %other_cond) {
; CHECK-LABEL: @logical_and_icmps1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i32 [[A:%.*]], 10086
-; CHECK-NEXT: [[RET2:%.*]] = select i1 [[RET1:%.*]], i1 [[CMP3]], i1 false
-; CHECK-NEXT: ret i1 [[RET2]]
+; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[A:%.*]], 10086
+; CHECK-NEXT: [[RET:%.*]] = select i1 [[OTHER_COND:%.*]], i1 [[TMP0]], i1 false
+; CHECK-NEXT: ret i1 [[RET]]
;
entry:
%cmp1 = icmp sgt i32 %a, -1
@@ -3085,9 +3085,9 @@ entry:
define <4 x i1> @logical_and_icmps_vec1(<4 x i32> %a, <4 x i1> %other_cond) {
; CHECK-LABEL: @logical_and_icmps_vec1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CMP3:%.*]] = icmp ult <4 x i32> [[A:%.*]], <i32 10086, i32 10086, i32 10086, i32 10086>
-; CHECK-NEXT: [[RET2:%.*]] = select <4 x i1> [[RET1:%.*]], <4 x i1> [[CMP3]], <4 x i1> zeroinitializer
-; CHECK-NEXT: ret <4 x i1> [[RET2]]
+; CHECK-NEXT: [[TMP0:%.*]] = icmp ult <4 x i32> [[A:%.*]], <i32 10086, i32 10086, i32 10086, i32 10086>
+; CHECK-NEXT: [[RET:%.*]] = select <4 x i1> [[OTHER_COND:%.*]], <4 x i1> [[TMP0]], <4 x i1> zeroinitializer
+; CHECK-NEXT: ret <4 x i1> [[RET]]
;
entry:
%cmp1 = icmp sgt <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1 >
diff --git a/llvm/test/Transforms/InstCombine/and-or-not.ll b/llvm/test/Transforms/InstCombine/and-or-not.ll
index 2e351c30ea1f7..5e6c480df5d10 100644
--- a/llvm/test/Transforms/InstCombine/and-or-not.ll
+++ b/llvm/test/Transforms/InstCombine/and-or-not.ll
@@ -506,8 +506,8 @@ define i64 @PR32830(i64 %a, i64 %b, i64 %c) {
; CHECK-LABEL: @PR32830(
; CHECK-NEXT: [[NOTA:%.*]] = xor i64 [[A:%.*]], -1
; CHECK-NEXT: [[NOTB:%.*]] = xor i64 [[B:%.*]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i64 [[NOTB]], [[A]]
-; CHECK-NEXT: [[OR2:%.*]] = or i64 [[NOTA]], [[C:%.*]]
+; CHECK-NEXT: [[OR1:%.*]] = or i64 [[A]], [[NOTB]]
+; CHECK-NEXT: [[OR2:%.*]] = or i64 [[C:%.*]], [[NOTA]]
; CHECK-NEXT: [[AND:%.*]] = and i64 [[OR1]], [[OR2]]
; CHECK-NEXT: ret i64 [[AND]]
;
@@ -813,7 +813,7 @@ define i4 @reduce_xor_common_op_commute1(i4 %x, i4 %y, i4 %z) {
define i4 @annihilate_xor_common_op_commute2(i4 %x, i4 %y, i4 %p, i4 %q) {
; CHECK-LABEL: @annihilate_xor_common_op_commute2(
; CHECK-NEXT: [[Z:%.*]] = mul i4 [[P:%.*]], [[P]]
-; CHECK-NEXT: [[TMP1:%.*]] = xor i4 [[Z]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i4 [[Y:%.*]], [[Z]]
; CHECK-NEXT: [[TMP2:%.*]] = xor i4 [[TMP1]], [[Q:%.*]]
; CHECK-NEXT: ret i4 [[TMP2]]
;
@@ -828,8 +828,8 @@ define i4 @annihilate_xor_common_op_commute2(i4 %x, i4 %y, i4 %p, i4 %q) {
define <2 x i4> @reduce_xor_common_op_commute3(<2 x i4> %x, <2 x i4> %y, <2 x i4> %p) {
; CHECK-LABEL: @reduce_xor_common_op_commute3(
; CHECK-NEXT: [[Z:%.*]] = mul <2 x i4> [[P:%.*]], [[P]]
-; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i4> [[Z]], [[Y:%.*]]
-; CHECK-NEXT: [[R:%.*]] = or <2 x i4> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i4> [[Y:%.*]], [[Z]]
+; CHECK-NEXT: [[R:%.*]] = or <2 x i4> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i4> [[R]]
;
%z = mul <2 x i4> %p, %p ; thwart complexity-based canonicalization
diff --git a/llvm/test/Transforms/InstCombine/and-or.ll b/llvm/test/Transforms/InstCombine/and-or.ll
index b4ef27607121d..fee055a2e1245 100644
--- a/llvm/test/Transforms/InstCombine/and-or.ll
+++ b/llvm/test/Transforms/InstCombine/and-or.ll
@@ -385,7 +385,7 @@ define i8 @or_or_and_noOneUse(i8 %a, i8 %b, i8 %c, i8 %d) {
; CHECK-NEXT: call void @use(i8 [[AND1]])
; CHECK-NEXT: [[AND2:%.*]] = and i8 [[A]], [[D:%.*]]
; CHECK-NEXT: call void @use(i8 [[AND2]])
-; CHECK-NEXT: [[OR1:%.*]] = or i8 [[AND2]], [[C:%.*]]
+; CHECK-NEXT: [[OR1:%.*]] = or i8 [[C:%.*]], [[AND2]]
; CHECK-NEXT: call void @use(i8 [[OR1]])
; CHECK-NEXT: [[OR2:%.*]] = or i8 [[OR1]], [[AND1]]
; CHECK-NEXT: ret i8 [[OR2]]
@@ -405,7 +405,7 @@ define i8 @or_or_and_pat1(i8 %a, i8 %b, i8 %c, i8 %d) {
; CHECK-LABEL: @or_or_and_pat1(
; CHECK-NEXT: [[CT:%.*]] = udiv i8 42, [[C:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[D:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = or i8 [[CT]], [[TMP2]]
; CHECK-NEXT: ret i8 [[OR2]]
;
@@ -439,7 +439,7 @@ define i8 @or_or_and_pat3(i8 %a, i8 %b, i8 %c, i8 %d) {
; CHECK-LABEL: @or_or_and_pat3(
; CHECK-NEXT: [[CT:%.*]] = udiv i8 42, [[C:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[D:%.*]], [[A:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = or i8 [[CT]], [[TMP2]]
; CHECK-NEXT: ret i8 [[OR2]]
;
@@ -472,7 +472,7 @@ define i8 @or_or_and_pat4(i8 %a, i8 %b, i8 %c, i8 %d) {
define i8 @or_or_and_pat5(i8 %a, i8 %b, i8 %c, i8 %d) {
; CHECK-LABEL: @or_or_and_pat5(
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[D:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = or i8 [[TMP2]], [[C:%.*]]
; CHECK-NEXT: ret i8 [[OR2]]
;
@@ -502,7 +502,7 @@ define i8 @or_or_and_pat6(i8 %a, i8 %b, i8 %c, i8 %d) {
define i8 @or_or_and_pat7(i8 %a, i8 %b, i8 %c, i8 %d) {
; CHECK-LABEL: @or_or_and_pat7(
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[D:%.*]], [[A:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = or i8 [[TMP2]], [[C:%.*]]
; CHECK-NEXT: ret i8 [[OR2]]
;
@@ -535,7 +535,7 @@ define i8 @or_and_or_noOneUse(i8 %a, i8 %b, i8 %c, i8 %d) {
; CHECK-NEXT: call void @use(i8 [[AND1]])
; CHECK-NEXT: [[AND2:%.*]] = and i8 [[A]], [[D:%.*]]
; CHECK-NEXT: call void @use(i8 [[AND2]])
-; CHECK-NEXT: [[OR1:%.*]] = or i8 [[AND2]], [[C:%.*]]
+; CHECK-NEXT: [[OR1:%.*]] = or i8 [[C:%.*]], [[AND2]]
; CHECK-NEXT: call void @use(i8 [[OR1]])
; CHECK-NEXT: [[OR2:%.*]] = or i8 [[AND1]], [[OR1]]
; CHECK-NEXT: ret i8 [[OR2]]
@@ -555,7 +555,7 @@ define i8 @or_and_or_pat1(i8 %a, i8 %b, i8 %c, i8 %d) {
; CHECK-LABEL: @or_and_or_pat1(
; CHECK-NEXT: [[CT:%.*]] = udiv i8 42, [[C:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[D:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = or i8 [[CT]], [[TMP2]]
; CHECK-NEXT: ret i8 [[OR2]]
;
@@ -589,7 +589,7 @@ define i8 @or_and_or_pat3(i8 %a, i8 %b, i8 %c, i8 %d) {
; CHECK-LABEL: @or_and_or_pat3(
; CHECK-NEXT: [[CT:%.*]] = udiv i8 42, [[C:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[D:%.*]], [[A:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = or i8 [[CT]], [[TMP2]]
; CHECK-NEXT: ret i8 [[OR2]]
;
@@ -622,7 +622,7 @@ define i8 @or_and_or_pat4(i8 %a, i8 %b, i8 %c, i8 %d) {
define i8 @or_and_or_pat5(i8 %a, i8 %b, i8 %c, i8 %d) {
; CHECK-LABEL: @or_and_or_pat5(
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[D:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = or i8 [[TMP2]], [[C:%.*]]
; CHECK-NEXT: ret i8 [[OR2]]
;
@@ -652,7 +652,7 @@ define i8 @or_and_or_pat6(i8 %a, i8 %b, i8 %c, i8 %d) {
define i8 @or_and_or_pat7(i8 %a, i8 %b, i8 %c, i8 %d) {
; CHECK-LABEL: @or_and_or_pat7(
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[D:%.*]], [[A:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = or i8 [[TMP2]], [[C:%.*]]
; CHECK-NEXT: ret i8 [[OR2]]
;
@@ -687,8 +687,8 @@ define i32 @or_or_and_noOneUse_fail1(i32 %a, i32 %b) {
; CHECK-NEXT: call void @use2(i32 [[AND]])
; CHECK-NEXT: [[AND1:%.*]] = or i32 [[B:%.*]], 157
; CHECK-NEXT: [[OR:%.*]] = and i32 [[SHR]], [[AND1]]
-; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[B]], 23
-; CHECK-NEXT: [[AND9:%.*]] = and i32 [[TMP1]], 157
+; CHECK-NEXT: [[SHR8:%.*]] = lshr i32 [[B]], 23
+; CHECK-NEXT: [[AND9:%.*]] = and i32 [[SHR8]], 157
; CHECK-NEXT: [[R:%.*]] = or i32 [[OR]], [[AND9]]
; CHECK-NEXT: ret i32 [[R]]
;
@@ -714,7 +714,7 @@ define { i1, i1, i1, i1, i1 } @or_or_and_noOneUse_fail2(i1 %a_0, i1 %a_1, i1 %a_
; CHECK-NEXT: [[TMP3:%.*]] = and i1 [[A_1:%.*]], [[B_1:%.*]]
; CHECK-NEXT: [[TMP4:%.*]] = xor i1 [[TMP3]], true
; CHECK-NEXT: [[TMP5:%.*]] = and i1 [[TMP0]], [[A_1]]
-; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[TMP2]], [[A_1]]
+; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[A_1]], [[TMP2]]
; CHECK-NEXT: [[TMP7:%.*]] = and i1 [[TMP6]], [[B_1]]
; CHECK-NEXT: [[D:%.*]] = or i1 [[TMP7]], [[TMP5]]
; CHECK-NEXT: [[DOTNOT1:%.*]] = or i1 [[TMP1]], [[TMP3]]
diff --git a/llvm/test/Transforms/InstCombine/and-xor-merge.ll b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
index e6df4e32bae36..cf1285cbc11a4 100644
--- a/llvm/test/Transforms/InstCombine/and-xor-merge.ll
+++ b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
@@ -5,7 +5,7 @@
define i32 @test1(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: [[T61:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[T7:%.*]] = and i32 [[T61]], [[Z:%.*]]
+; CHECK-NEXT: [[T7:%.*]] = and i32 [[Z:%.*]], [[T61]]
; CHECK-NEXT: ret i32 [[T7]]
;
%t3 = and i32 %z, %x
@@ -43,8 +43,8 @@ define i32 @PR38781(i32 %a, i32 %b) {
; (a ^ 4) & (a ^ ~4) -> 0
define i32 @PR75692_1(i32 %x) {
-; CHECK-LABEL: @PR75692_1
-; CHECK-NEXT: ret i32 0
+; CHECK-LABEL: @PR75692_1(
+; CHECK-NEXT: ret i32 0
;
%t2 = xor i32 %x, 4
%t3 = xor i32 %x, -5
@@ -54,11 +54,11 @@ define i32 @PR75692_1(i32 %x) {
; (a ^ 4) & (a ^ 3) is not zero
define i32 @PR75692_2(i32 %x) {
-; CHECK-LABEL: @PR75692_2
-; CHECK-NEXT: %t2 = xor i32 %x, 4
-; CHECK-NEXT: %t3 = xor i32 %x, -4
-; CHECK-NEXT: %t4 = and i32 %t2, %t3
-; CHECK-NEXT: ret i32 %t4
+; CHECK-LABEL: @PR75692_2(
+; CHECK-NEXT: [[T2:%.*]] = xor i32 [[X:%.*]], 4
+; CHECK-NEXT: [[T3:%.*]] = xor i32 [[X]], -4
+; CHECK-NEXT: [[T4:%.*]] = and i32 [[T2]], [[T3]]
+; CHECK-NEXT: ret i32 [[T4]]
;
%t2 = xor i32 %x, 4
%t3 = xor i32 %x, -4
@@ -68,11 +68,11 @@ define i32 @PR75692_2(i32 %x) {
; (a ^ 4) & (b ^ ~4) is not zero, since a != b is possible
define i32 @PR75692_3(i32 %x, i32 %y) {
-; CHECK-LABEL: @PR75692_3
-; CHECK-NEXT: %t2 = xor i32 %x, 4
-; CHECK-NEXT: %t3 = xor i32 %y, -5
-; CHECK-NEXT: %t4 = and i32 %t2, %t3
-; CHECK-NEXT: ret i32 %t4
+; CHECK-LABEL: @PR75692_3(
+; CHECK-NEXT: [[T2:%.*]] = xor i32 [[X:%.*]], 4
+; CHECK-NEXT: [[T3:%.*]] = xor i32 [[Y:%.*]], -5
+; CHECK-NEXT: [[T4:%.*]] = and i32 [[T2]], [[T3]]
+; CHECK-NEXT: ret i32 [[T4]]
;
%t2 = xor i32 %x, 4
%t3 = xor i32 %y, -5
diff --git a/llvm/test/Transforms/InstCombine/and-xor-or.ll b/llvm/test/Transforms/InstCombine/and-xor-or.ll
index b26d6e16c2db2..3dbf9af7e1934 100644
--- a/llvm/test/Transforms/InstCombine/and-xor-or.ll
+++ b/llvm/test/Transforms/InstCombine/and-xor-or.ll
@@ -339,8 +339,8 @@ define i64 @and_xor_or_negative(i64 %x, i64 %y, i64 %z, i64 %w) {
; CHECK-LABEL: define {{[^@]+}}@and_xor_or_negative
; CHECK-SAME: (i64 [[X:%.*]], i64 [[Y:%.*]], i64 [[Z:%.*]], i64 [[W:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[Y]], [[X]]
-; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], [[Z]]
-; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP2]], [[W]]
+; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[Z]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[W]], [[TMP2]]
; CHECK-NEXT: ret i64 [[TMP3]]
;
%1 = and i64 %y, %x
@@ -585,7 +585,7 @@ define i64 @sext_or_chain(i64 %a, i16 %b, i16 %c) {
; CHECK-SAME: (i64 [[A:%.*]], i16 [[B:%.*]], i16 [[C:%.*]]) {
; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B]] to i64
; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[C]] to i64
-; CHECK-NEXT: [[OR:%.*]] = or i64 [[CONV]], [[A]]
+; CHECK-NEXT: [[OR:%.*]] = or i64 [[A]], [[CONV]]
; CHECK-NEXT: [[OR2:%.*]] = or i64 [[OR]], [[CONV2]]
; CHECK-NEXT: ret i64 [[OR2]]
;
@@ -601,7 +601,7 @@ define i64 @zext_or_chain(i64 %a, i16 %b, i16 %c) {
; CHECK-SAME: (i64 [[A:%.*]], i16 [[B:%.*]], i16 [[C:%.*]]) {
; CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B]] to i64
; CHECK-NEXT: [[CONV2:%.*]] = zext i16 [[C]] to i64
-; CHECK-NEXT: [[OR:%.*]] = or i64 [[CONV]], [[A]]
+; CHECK-NEXT: [[OR:%.*]] = or i64 [[A]], [[CONV]]
; CHECK-NEXT: [[OR2:%.*]] = or i64 [[OR]], [[CONV2]]
; CHECK-NEXT: ret i64 [[OR2]]
;
@@ -617,7 +617,7 @@ define i64 @sext_and_chain(i64 %a, i16 %b, i16 %c) {
; CHECK-SAME: (i64 [[A:%.*]], i16 [[B:%.*]], i16 [[C:%.*]]) {
; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B]] to i64
; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[C]] to i64
-; CHECK-NEXT: [[AND:%.*]] = and i64 [[CONV]], [[A]]
+; CHECK-NEXT: [[AND:%.*]] = and i64 [[A]], [[CONV]]
; CHECK-NEXT: [[AND2:%.*]] = and i64 [[AND]], [[CONV2]]
; CHECK-NEXT: ret i64 [[AND2]]
;
@@ -633,7 +633,7 @@ define i64 @zext_and_chain(i64 %a, i16 %b, i16 %c) {
; CHECK-SAME: (i64 [[A:%.*]], i16 [[B:%.*]], i16 [[C:%.*]]) {
; CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B]] to i64
; CHECK-NEXT: [[CONV2:%.*]] = zext i16 [[C]] to i64
-; CHECK-NEXT: [[AND:%.*]] = and i64 [[CONV]], [[A]]
+; CHECK-NEXT: [[AND:%.*]] = and i64 [[A]], [[CONV]]
; CHECK-NEXT: [[AND2:%.*]] = and i64 [[AND]], [[CONV2]]
; CHECK-NEXT: ret i64 [[AND2]]
;
@@ -649,7 +649,7 @@ define i64 @sext_xor_chain(i64 %a, i16 %b, i16 %c) {
; CHECK-SAME: (i64 [[A:%.*]], i16 [[B:%.*]], i16 [[C:%.*]]) {
; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B]] to i64
; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[C]] to i64
-; CHECK-NEXT: [[XOR:%.*]] = xor i64 [[CONV]], [[A]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i64 [[A]], [[CONV]]
; CHECK-NEXT: [[XOR2:%.*]] = xor i64 [[XOR]], [[CONV2]]
; CHECK-NEXT: ret i64 [[XOR2]]
;
@@ -665,7 +665,7 @@ define i64 @zext_xor_chain(i64 %a, i16 %b, i16 %c) {
; CHECK-SAME: (i64 [[A:%.*]], i16 [[B:%.*]], i16 [[C:%.*]]) {
; CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B]] to i64
; CHECK-NEXT: [[CONV2:%.*]] = zext i16 [[C]] to i64
-; CHECK-NEXT: [[XOR:%.*]] = xor i64 [[CONV]], [[A]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i64 [[A]], [[CONV]]
; CHECK-NEXT: [[XOR2:%.*]] = xor i64 [[XOR]], [[CONV2]]
; CHECK-NEXT: ret i64 [[XOR2]]
;
@@ -682,7 +682,7 @@ define i64 @sext_or_chain_two_uses1(i64 %a, i16 %b, i16 %c, i64 %d) {
; CHECK-SAME: (i64 [[A:%.*]], i16 [[B:%.*]], i16 [[C:%.*]], i64 [[D:%.*]]) {
; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B]] to i64
; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[C]] to i64
-; CHECK-NEXT: [[OR:%.*]] = or i64 [[CONV]], [[A]]
+; CHECK-NEXT: [[OR:%.*]] = or i64 [[A]], [[CONV]]
; CHECK-NEXT: [[OR2:%.*]] = or i64 [[OR]], [[CONV2]]
; CHECK-NEXT: [[USE:%.*]] = udiv i64 [[OR]], [[D]]
; CHECK-NEXT: [[RETVAL:%.*]] = udiv i64 [[OR2]], [[USE]]
@@ -702,7 +702,7 @@ define i64 @sext_or_chain_two_uses2(i64 %a, i16 %b, i16 %c, i64 %d) {
; CHECK-SAME: (i64 [[A:%.*]], i16 [[B:%.*]], i16 [[C:%.*]], i64 [[D:%.*]]) {
; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B]] to i64
; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[C]] to i64
-; CHECK-NEXT: [[OR:%.*]] = or i64 [[CONV]], [[A]]
+; CHECK-NEXT: [[OR:%.*]] = or i64 [[A]], [[CONV]]
; CHECK-NEXT: [[OR2:%.*]] = or i64 [[OR]], [[CONV2]]
; CHECK-NEXT: [[USE1:%.*]] = udiv i64 [[OR2]], [[D]]
; CHECK-NEXT: [[USE2:%.*]] = udiv i64 [[OR2]], [[USE1]]
@@ -761,7 +761,7 @@ define i32 @not_and_and_not_commute1(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[B]], [[C]]
; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], -1
-; CHECK-NEXT: [[AND2:%.*]] = and i32 [[TMP2]], [[A]]
+; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[TMP2]]
; CHECK-NEXT: ret i32 [[AND2]]
;
%not1 = xor i32 %b, -1
@@ -856,7 +856,7 @@ define i32 @not_or_or_not_commute1(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[B]], [[C]]
; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], -1
-; CHECK-NEXT: [[OR2:%.*]] = or i32 [[TMP2]], [[A]]
+; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[TMP2]]
; CHECK-NEXT: ret i32 [[OR2]]
;
%not1 = xor i32 %b, -1
@@ -952,7 +952,7 @@ define i32 @or_not_and_commute2(i32 %a, i32 %b0, i32 %c) {
; CHECK-LABEL: define {{[^@]+}}@or_not_and_commute2
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B0:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]]
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]]
; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[OR3:%.*]] = and i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret i32 [[OR3]]
@@ -990,7 +990,7 @@ define i32 @or_not_and_commute4(i32 %a, i32 %b, i32 %c0) {
; CHECK-LABEL: define {{[^@]+}}@or_not_and_commute4
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C0:%.*]]) {
; CHECK-NEXT: [[C:%.*]] = sdiv i32 42, [[C0]]
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]]
; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[OR3:%.*]] = and i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret i32 [[OR3]]
@@ -1011,7 +1011,7 @@ define i32 @or_not_and_commute5(i32 %a0, i32 %b, i32 %c0) {
; CHECK-SAME: (i32 [[A0:%.*]], i32 [[B:%.*]], i32 [[C0:%.*]]) {
; CHECK-NEXT: [[A:%.*]] = sdiv i32 42, [[A0]]
; CHECK-NEXT: [[C:%.*]] = sdiv i32 42, [[C0]]
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]]
; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[OR3:%.*]] = and i32 [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret i32 [[OR3]]
@@ -1137,10 +1137,10 @@ define i32 @or_not_and_extra_not_use2(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[B]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT1]], [[C]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[C]], [[NOT1]]
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1
-; CHECK-NEXT: [[AND2:%.*]] = and i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[AND2:%.*]] = and i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND1]], [[AND2]]
; CHECK-NEXT: call void @use(i32 [[NOT2]])
; CHECK-NEXT: ret i32 [[OR3]]
@@ -1161,7 +1161,7 @@ define i32 @or_not_and_extra_and_use1(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[B]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT1]], [[C]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[C]], [[NOT1]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]]
; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[OR3:%.*]] = and i32 [[TMP1]], [[TMP2]]
@@ -1184,10 +1184,10 @@ define i32 @or_not_and_extra_and_use2(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[B]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT1]], [[C]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[C]], [[NOT1]]
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1
-; CHECK-NEXT: [[AND2:%.*]] = and i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[AND2:%.*]] = and i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND1]], [[AND2]]
; CHECK-NEXT: call void @use(i32 [[AND2]])
; CHECK-NEXT: ret i32 [[OR3]]
@@ -1250,10 +1250,10 @@ define i32 @or_not_and_wrong_c(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]], i32 [[D:%.*]]) {
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[B]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT1]], [[C]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[C]], [[NOT1]]
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[D]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1
-; CHECK-NEXT: [[AND2:%.*]] = and i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[AND2:%.*]] = and i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND1]], [[AND2]]
; CHECK-NEXT: ret i32 [[OR3]]
;
@@ -1272,10 +1272,10 @@ define i32 @or_not_and_wrong_b(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]], i32 [[D:%.*]]) {
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[B]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT1]], [[C]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[C]], [[NOT1]]
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1
-; CHECK-NEXT: [[AND2:%.*]] = and i32 [[NOT2]], [[D]]
+; CHECK-NEXT: [[AND2:%.*]] = and i32 [[D]], [[NOT2]]
; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND1]], [[AND2]]
; CHECK-NEXT: ret i32 [[OR3]]
;
@@ -1333,7 +1333,7 @@ define i32 @and_not_or_commute2(i32 %a, i32 %b0, i32 %c) {
; CHECK-LABEL: define {{[^@]+}}@and_not_or_commute2
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B0:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]]
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[A]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[TMP2]], -1
; CHECK-NEXT: ret i32 [[AND3]]
@@ -1371,7 +1371,7 @@ define i32 @and_not_or_commute4(i32 %a, i32 %b, i32 %c0) {
; CHECK-LABEL: define {{[^@]+}}@and_not_or_commute4
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C0:%.*]]) {
; CHECK-NEXT: [[C:%.*]] = sdiv i32 42, [[C0]]
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[A]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[TMP2]], -1
; CHECK-NEXT: ret i32 [[AND3]]
@@ -1392,7 +1392,7 @@ define i32 @and_not_or_commute5(i32 %a0, i32 %b, i32 %c0) {
; CHECK-SAME: (i32 [[A0:%.*]], i32 [[B:%.*]], i32 [[C0:%.*]]) {
; CHECK-NEXT: [[A:%.*]] = sdiv i32 42, [[A0]]
; CHECK-NEXT: [[C:%.*]] = sdiv i32 42, [[C0]]
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[A]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[TMP2]], -1
; CHECK-NEXT: ret i32 [[AND3]]
@@ -1518,10 +1518,10 @@ define i32 @and_not_or_extra_not_use2(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[A]], [[B]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[C]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[C]], [[NOT1]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1
-; CHECK-NEXT: [[OR2:%.*]] = or i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[OR2:%.*]] = or i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[AND3:%.*]] = and i32 [[OR1]], [[OR2]]
; CHECK-NEXT: call void @use(i32 [[NOT2]])
; CHECK-NEXT: ret i32 [[AND3]]
@@ -1542,7 +1542,7 @@ define i32 @and_not_or_extra_and_use1(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[A]], [[B]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[C]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[C]], [[NOT1]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[A]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[TMP2]], -1
@@ -1565,10 +1565,10 @@ define i32 @and_not_or_extra_and_use2(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[A]], [[B]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[C]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[C]], [[NOT1]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1
-; CHECK-NEXT: [[OR2:%.*]] = or i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[OR2:%.*]] = or i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[AND3:%.*]] = and i32 [[OR1]], [[OR2]]
; CHECK-NEXT: call void @use(i32 [[OR2]])
; CHECK-NEXT: ret i32 [[AND3]]
@@ -1631,10 +1631,10 @@ define i32 @and_not_or_wrong_c(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]], i32 [[D:%.*]]) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[A]], [[B]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[C]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[C]], [[NOT1]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[D]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1
-; CHECK-NEXT: [[OR2:%.*]] = or i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[OR2:%.*]] = or i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[AND3:%.*]] = and i32 [[OR1]], [[OR2]]
; CHECK-NEXT: ret i32 [[AND3]]
;
@@ -1653,10 +1653,10 @@ define i32 @and_not_or_wrong_b(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]], i32 [[D:%.*]]) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[A]], [[B]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[C]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[C]], [[NOT1]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1
-; CHECK-NEXT: [[OR2:%.*]] = or i32 [[NOT2]], [[D]]
+; CHECK-NEXT: [[OR2:%.*]] = or i32 [[D]], [[NOT2]]
; CHECK-NEXT: [[AND3:%.*]] = and i32 [[OR1]], [[OR2]]
; CHECK-NEXT: ret i32 [[AND3]]
;
@@ -1693,7 +1693,7 @@ define i32 @or_and_not_not_commute1(i32 %a, i32 %b0, i32 %c) {
; CHECK-LABEL: define {{[^@]+}}@or_and_not_not_commute1
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B0:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]]
-; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[B]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[C]], [[B]]
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[A]]
; CHECK-NEXT: [[OR3:%.*]] = xor i32 [[TMP2]], -1
; CHECK-NEXT: ret i32 [[OR3]]
@@ -1780,7 +1780,7 @@ define i32 @or_and_not_not_commute6(i32 %a, i32 %b0, i32 %c) {
; CHECK-LABEL: define {{[^@]+}}@or_and_not_not_commute6
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B0:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]]
-; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[B]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[C]], [[B]]
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[A]]
; CHECK-NEXT: [[OR3:%.*]] = xor i32 [[TMP2]], -1
; CHECK-NEXT: ret i32 [[OR3]]
@@ -1819,7 +1819,7 @@ define i32 @or_and_not_not_extra_not_use1(i32 %a, i32 %b, i32 %c) {
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND]], [[NOT1]]
; CHECK-NEXT: call void @use(i32 [[NOT1]])
; CHECK-NEXT: ret i32 [[OR3]]
@@ -1860,7 +1860,7 @@ define i32 @or_and_not_not_extra_and_use(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[C]], [[B]]
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[A]]
; CHECK-NEXT: [[OR3:%.*]] = xor i32 [[TMP2]], -1
@@ -1884,7 +1884,7 @@ define i32 @or_and_not_not_extra_or_use1(i32 %a, i32 %b, i32 %c) {
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND]], [[NOT1]]
; CHECK-NEXT: call void @use(i32 [[OR1]])
; CHECK-NEXT: ret i32 [[OR3]]
@@ -1929,7 +1929,7 @@ define i32 @or_and_not_not_2_extra_uses(i32 %a, i32 %b, i32 %c) {
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[B]], [[NOT2]]
; CHECK-NEXT: call void @use(i32 [[AND]])
; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND]], [[NOT1]]
; CHECK-NEXT: ret i32 [[OR3]]
@@ -1952,7 +1952,7 @@ define i32 @or_and_not_not_wrong_a(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND]], [[NOT1]]
; CHECK-NEXT: ret i32 [[OR3]]
;
@@ -1972,7 +1972,7 @@ define i32 @or_and_not_not_wrong_b(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND]], [[NOT1]]
; CHECK-NEXT: ret i32 [[OR3]]
;
@@ -2008,7 +2008,7 @@ define i32 @and_or_not_not_commute1(i32 %a, i32 %b0, i32 %c) {
; CHECK-LABEL: define {{[^@]+}}@and_or_not_not_commute1
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B0:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]]
-; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[B]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[C]], [[B]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[A]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[TMP2]], -1
; CHECK-NEXT: ret i32 [[AND3]]
@@ -2095,7 +2095,7 @@ define i32 @and_or_not_not_commute6(i32 %a, i32 %b0, i32 %c) {
; CHECK-LABEL: define {{[^@]+}}@and_or_not_not_commute6
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B0:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]]
-; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[B]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[C]], [[B]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[A]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[TMP2]], -1
; CHECK-NEXT: ret i32 [[AND3]]
@@ -2134,7 +2134,7 @@ define i32 @and_or_not_not_extra_not_use1(i32 %a, i32 %b, i32 %c) {
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND1]], [[OR]]
; CHECK-NEXT: call void @use(i32 [[NOT1]])
; CHECK-NEXT: ret i32 [[AND3]]
@@ -2175,7 +2175,7 @@ define i32 @and_or_not_not_extra_and_use(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[C]], [[B]]
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[A]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[TMP2]], -1
@@ -2198,7 +2198,7 @@ define i32 @and_or_not_not_extra_or_use1(i32 %a, i32 %b, i32 %c) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[A]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND1]], [[OR]]
; CHECK-NEXT: call void @use(i32 [[AND1]])
; CHECK-NEXT: ret i32 [[AND3]]
@@ -2240,7 +2240,7 @@ define i32 @and_or_not_not_2_extra_uses(i32 %a, i32 %b, i32 %c) {
; CHECK-NEXT: call void @use(i32 [[AND1]])
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[B]], [[NOT2]]
; CHECK-NEXT: call void @use(i32 [[OR]])
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND1]], [[OR]]
; CHECK-NEXT: ret i32 [[AND3]]
@@ -2262,7 +2262,7 @@ define i32 @and_or_not_not_wrong_a(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[D]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND1]], [[OR]]
; CHECK-NEXT: ret i32 [[AND3]]
;
@@ -2282,7 +2282,7 @@ define i32 @and_or_not_not_wrong_b(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[AND3:%.*]] = and i32 [[OR]], [[NOT1]]
; CHECK-NEXT: ret i32 [[AND3]]
;
@@ -2471,7 +2471,7 @@ define i32 @and_not_or_or_not_or_xor_use3(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[B]], [[C]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT1]], [[A]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[A]], [[NOT1]]
; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]]
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[XOR1]], [[A]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1
@@ -2539,7 +2539,7 @@ define i32 @and_not_or_or_not_or_xor_use6(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[B]], [[C]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT1]], [[A]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[A]], [[NOT1]]
; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]]
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[XOR1]], [[A]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1
@@ -2567,7 +2567,7 @@ define i32 @or_not_and_and_not_and_xor(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[C]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]]
; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR1]]
@@ -2588,7 +2588,7 @@ define i32 @or_not_and_and_not_and_xor_commute1(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[C]], [[B]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]]
; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR1]]
@@ -2632,7 +2632,7 @@ define i32 @or_not_and_and_not_and_xor_commute3(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[C]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]]
; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[C]], [[B]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR1]]
@@ -2676,7 +2676,7 @@ define i32 @or_not_and_and_not_and_xor_commute5(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[C]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]]
; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR1]]
@@ -2697,7 +2697,7 @@ define i32 @or_not_and_and_not_and_xor_use1(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[C]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]]
; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR1]]
@@ -2720,7 +2720,7 @@ define i32 @or_not_and_and_not_and_xor_use2(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[C]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]]
; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR1]]
@@ -2743,7 +2743,7 @@ define i32 @or_not_and_and_not_and_xor_use3(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[C]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]]
; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR1]]
@@ -2766,7 +2766,7 @@ define i32 @or_not_and_and_not_and_xor_use4(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[C]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]]
; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR1]]
@@ -2789,7 +2789,7 @@ define i32 @or_not_and_and_not_and_xor_use5(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[C]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]]
; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR1]]
@@ -2812,7 +2812,7 @@ define i32 @or_not_and_and_not_and_xor_use6(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[C]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]]
; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1
@@ -2965,7 +2965,7 @@ define i32 @not_and_and_or_not_or_or_commute3(i32 %a, i32 %b0, i32 %c) {
; CHECK-LABEL: define {{[^@]+}}@not_and_and_or_not_or_or_commute3
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B0:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]]
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]]
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[A]]
; CHECK-NEXT: [[OR3:%.*]] = xor i32 [[TMP2]], -1
; CHECK-NEXT: ret i32 [[OR3]]
@@ -3051,7 +3051,7 @@ define i32 @not_and_and_or_not_or_or_use3(i32 %a, i32 %b, i32 %c) {
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[OR1]], [[C]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR2]], -1
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[AND1]], [[C]]
; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND2]], [[NOT1]]
; CHECK-NEXT: call void @use(i32 [[NOT1]])
@@ -3093,7 +3093,7 @@ define i32 @not_and_and_or_not_or_or_use5(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: define {{[^@]+}}@not_and_and_or_not_or_or_use5
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]]
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[A]]
; CHECK-NEXT: [[OR3:%.*]] = xor i32 [[TMP2]], -1
@@ -3118,7 +3118,7 @@ define i32 @not_and_and_or_not_or_or_use6(i32 %a, i32 %b, i32 %c) {
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[OR1]], [[C]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR2]], -1
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[AND1]], [[C]]
; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND2]], [[NOT1]]
; CHECK-NEXT: call void @use(i32 [[AND2]])
@@ -3270,7 +3270,7 @@ define i32 @not_or_or_and_not_and_and_commute3(i32 %a, i32 %b0, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B0:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]]
; CHECK-NEXT: [[AND3:%.*]] = or i32 [[TMP1]], [[NOT2]]
; CHECK-NEXT: ret i32 [[AND3]]
;
@@ -3355,7 +3355,7 @@ define i32 @not_or_or_and_not_and_and_use3(i32 %a, i32 %b, i32 %c) {
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[AND1]], [[C]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND2]], -1
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[OR1]], [[C]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR2]]
; CHECK-NEXT: call void @use(i32 [[NOT1]])
@@ -3396,7 +3396,7 @@ define i32 @not_or_or_and_not_and_and_use5(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: define {{[^@]+}}@not_or_or_and_not_and_and_use5
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]]
; CHECK-NEXT: [[AND3:%.*]] = or i32 [[TMP1]], [[NOT2]]
; CHECK-NEXT: call void @use(i32 [[OR1]])
@@ -3419,7 +3419,7 @@ define i32 @not_or_or_and_not_and_and_use6(i32 %a, i32 %b, i32 %c) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[A]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[AND1]], [[C]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[OR1]], [[C]]
; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR2]]
; CHECK-NEXT: call void @use(i32 [[OR2]])
@@ -3443,7 +3443,7 @@ define i32 @not_and_and_or_no_or(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: ret i32 [[OR2]]
;
@@ -3461,7 +3461,7 @@ define i32 @not_and_and_or_no_or_commute1_and(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: ret i32 [[OR2]]
;
@@ -3479,7 +3479,7 @@ define i32 @not_and_and_or_no_or_commute2_and(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: ret i32 [[OR2]]
;
@@ -3497,7 +3497,7 @@ define i32 @not_and_and_or_no_or_commute1(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: ret i32 [[OR2]]
;
@@ -3516,7 +3516,7 @@ define i32 @not_and_and_or_no_or_commute2(i32 %a, i32 %b0, i32 %c) {
; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: ret i32 [[OR2]]
;
@@ -3555,7 +3555,7 @@ define i32 @not_and_and_or_no_or_use1(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: call void @use(i32 [[NOT2]])
; CHECK-NEXT: ret i32 [[OR2]]
@@ -3575,7 +3575,7 @@ define i32 @not_and_and_or_no_or_use2(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: call void @use(i32 [[NOT2]])
; CHECK-NEXT: ret i32 [[OR2]]
@@ -3595,7 +3595,7 @@ define i32 @not_and_and_or_no_or_use3(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: call void @use(i32 [[NOT2]])
; CHECK-NEXT: ret i32 [[OR2]]
@@ -3615,7 +3615,7 @@ define i32 @not_and_and_or_no_or_use4(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: call void @use(i32 [[NOT2]])
; CHECK-NEXT: ret i32 [[OR2]]
@@ -3636,7 +3636,7 @@ define i32 @not_and_and_or_no_or_use5(i32 %a, i32 %b, i32 %c) {
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[B]], [[A]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[NOT2]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[C]], [[NOT2]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[TMP1]], [[B]]
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[AND2]], [[NOT1]]
; CHECK-NEXT: call void @use(i32 [[OR1]])
@@ -3658,7 +3658,7 @@ define i32 @not_and_and_or_no_or_use6(i32 %a, i32 %b, i32 %c) {
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[B]], [[A]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[NOT2]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[C]], [[NOT2]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[TMP1]], [[B]]
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[AND2]], [[NOT1]]
; CHECK-NEXT: call void @use(i32 [[NOT1]])
@@ -3678,9 +3678,9 @@ define i32 @not_and_and_or_no_or_use7(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: define {{[^@]+}}@not_and_and_or_no_or_use7
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: call void @use(i32 [[AND1]])
; CHECK-NEXT: ret i32 [[OR2]]
@@ -3701,7 +3701,7 @@ define i32 @not_and_and_or_no_or_use8(i32 %a, i32 %b, i32 %c) {
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[B]], [[A]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[NOT2]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[C]], [[NOT2]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[TMP1]], [[B]]
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[AND2]], [[NOT1]]
; CHECK-NEXT: call void @use(i32 [[AND2]])
@@ -3724,7 +3724,7 @@ define i32 @not_or_or_and_no_and(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: ret i32 [[AND2]]
;
@@ -3742,7 +3742,7 @@ define i32 @not_or_or_and_no_and_commute1_or(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: ret i32 [[AND2]]
;
@@ -3760,7 +3760,7 @@ define i32 @not_or_or_and_no_and_commute2_or(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: ret i32 [[AND2]]
;
@@ -3778,7 +3778,7 @@ define i32 @not_or_or_and_no_and_commute1(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: ret i32 [[AND2]]
;
@@ -3797,7 +3797,7 @@ define i32 @not_or_or_and_no_and_commute2(i32 %a, i32 %b0, i32 %c) {
; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: ret i32 [[AND2]]
;
@@ -3836,7 +3836,7 @@ define i32 @not_or_or_and_no_and_use1(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: call void @use(i32 [[NOT2]])
; CHECK-NEXT: ret i32 [[AND2]]
@@ -3856,7 +3856,7 @@ define i32 @not_or_or_and_no_and_use2(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: call void @use(i32 [[NOT2]])
; CHECK-NEXT: ret i32 [[AND2]]
@@ -3876,7 +3876,7 @@ define i32 @not_or_or_and_no_and_use3(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: call void @use(i32 [[NOT2]])
; CHECK-NEXT: ret i32 [[AND2]]
@@ -3896,7 +3896,7 @@ define i32 @not_or_or_and_no_and_use4(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: call void @use(i32 [[NOT2]])
; CHECK-NEXT: ret i32 [[AND2]]
@@ -3916,7 +3916,7 @@ define i32 @not_or_or_and_no_and_use5(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[A]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[NOT2]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[C]], [[NOT2]]
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[TMP1]], [[B]]
; CHECK-NEXT: [[AND2:%.*]] = xor i32 [[AND1]], [[OR2]]
; CHECK-NEXT: call void @use(i32 [[AND1]])
@@ -3938,7 +3938,7 @@ define i32 @not_or_or_and_no_and_use6(i32 %a, i32 %b, i32 %c) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[A]]
; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[NOT2]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[C]], [[NOT2]]
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[TMP1]], [[B]]
; CHECK-NEXT: [[AND2:%.*]] = xor i32 [[AND1]], [[OR2]]
; CHECK-NEXT: call void @use(i32 [[NOT1]])
@@ -3958,9 +3958,9 @@ define i32 @not_or_or_and_no_and_use7(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: define {{[^@]+}}@not_or_or_and_no_and_use7
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT2]], [[B]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[B]], [[NOT2]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]]
; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]]
; CHECK-NEXT: call void @use(i32 [[OR1]])
; CHECK-NEXT: ret i32 [[AND2]]
@@ -3980,7 +3980,7 @@ define i32 @not_or_or_and_no_and_use8(i32 %a, i32 %b, i32 %c) {
; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) {
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[A]]
; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[NOT2]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[C]], [[NOT2]]
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[TMP1]], [[B]]
; CHECK-NEXT: [[AND2:%.*]] = xor i32 [[AND1]], [[OR2]]
; CHECK-NEXT: call void @use(i32 [[OR2]])
@@ -4000,7 +4000,7 @@ define i4 @and_orn_xor(i4 %a, i4 %b) {
; CHECK-LABEL: define {{[^@]+}}@and_orn_xor
; CHECK-SAME: (i4 [[A:%.*]], i4 [[B:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = xor i4 [[A]], -1
-; CHECK-NEXT: [[R:%.*]] = and i4 [[TMP1]], [[B]]
+; CHECK-NEXT: [[R:%.*]] = and i4 [[B]], [[TMP1]]
; CHECK-NEXT: ret i4 [[R]]
;
%xor = xor i4 %a, %b
@@ -4014,7 +4014,7 @@ define <2 x i4> @and_orn_xor_commute1(<2 x i4> %a, <2 x i4> %b) {
; CHECK-LABEL: define {{[^@]+}}@and_orn_xor_commute1
; CHECK-SAME: (<2 x i4> [[A:%.*]], <2 x i4> [[B:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i4> [[A]], <i4 -1, i4 -1>
-; CHECK-NEXT: [[R:%.*]] = and <2 x i4> [[TMP1]], [[B]]
+; CHECK-NEXT: [[R:%.*]] = and <2 x i4> [[B]], [[TMP1]]
; CHECK-NEXT: ret <2 x i4> [[R]]
;
%xor = xor <2 x i4> %a, %b
@@ -4030,7 +4030,7 @@ define i32 @and_orn_xor_commute2(i32 %a, i32 %b) {
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[B]], [[A]]
; CHECK-NEXT: call void @use(i32 [[XOR]])
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT: [[R:%.*]] = and i32 [[TMP1]], [[B]]
+; CHECK-NEXT: [[R:%.*]] = and i32 [[B]], [[TMP1]]
; CHECK-NEXT: ret i32 [[R]]
;
%xor = xor i32 %b, %a
@@ -4047,7 +4047,7 @@ define i32 @and_orn_xor_commute3(i32 %a, i32 %b) {
; CHECK-NEXT: [[NOTA:%.*]] = xor i32 [[A]], -1
; CHECK-NEXT: call void @use(i32 [[NOTA]])
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT: [[R:%.*]] = and i32 [[TMP1]], [[B]]
+; CHECK-NEXT: [[R:%.*]] = and i32 [[B]], [[TMP1]]
; CHECK-NEXT: ret i32 [[R]]
;
%xor = xor i32 %b, %a
@@ -4207,7 +4207,7 @@ define i16 @and_zext_zext(i8 %x, i4 %y) {
; CHECK-LABEL: define {{[^@]+}}@and_zext_zext
; CHECK-SAME: (i8 [[X:%.*]], i4 [[Y:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = zext i4 [[Y]] to i8
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = zext nneg i8 [[TMP2]] to i16
; CHECK-NEXT: ret i16 [[R]]
;
@@ -4221,7 +4221,7 @@ define i16 @or_zext_zext(i8 %x, i4 %y) {
; CHECK-LABEL: define {{[^@]+}}@or_zext_zext
; CHECK-SAME: (i8 [[X:%.*]], i4 [[Y:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = zext i4 [[Y]] to i8
-; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[TMP1]], [[X]]
+; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[X]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = zext i8 [[TMP2]] to i16
; CHECK-NEXT: ret i16 [[R]]
;
@@ -4235,7 +4235,7 @@ define <2 x i16> @xor_zext_zext(<2 x i8> %x, <2 x i4> %y) {
; CHECK-LABEL: define {{[^@]+}}@xor_zext_zext
; CHECK-SAME: (<2 x i8> [[X:%.*]], <2 x i4> [[Y:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i4> [[Y]] to <2 x i8>
-; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i8> [[TMP1]], [[X]]
+; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i8> [[X]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = zext <2 x i8> [[TMP2]] to <2 x i16>
; CHECK-NEXT: ret <2 x i16> [[R]]
;
@@ -4249,7 +4249,7 @@ define i16 @and_sext_sext(i8 %x, i4 %y) {
; CHECK-LABEL: define {{[^@]+}}@and_sext_sext
; CHECK-SAME: (i8 [[X:%.*]], i4 [[Y:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[Y]] to i8
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = sext i8 [[TMP2]] to i16
; CHECK-NEXT: ret i16 [[R]]
;
@@ -4263,7 +4263,7 @@ define i16 @or_sext_sext(i8 %x, i4 %y) {
; CHECK-LABEL: define {{[^@]+}}@or_sext_sext
; CHECK-SAME: (i8 [[X:%.*]], i4 [[Y:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[Y]] to i8
-; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[TMP1]], [[X]]
+; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[X]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = sext i8 [[TMP2]] to i16
; CHECK-NEXT: ret i16 [[R]]
;
@@ -4277,7 +4277,7 @@ define i16 @xor_sext_sext(i8 %x, i4 %y) {
; CHECK-LABEL: define {{[^@]+}}@xor_sext_sext
; CHECK-SAME: (i8 [[X:%.*]], i4 [[Y:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[Y]] to i8
-; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[TMP1]], [[X]]
+; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[X]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = sext i8 [[TMP2]] to i16
; CHECK-NEXT: ret i16 [[R]]
;
@@ -4801,7 +4801,7 @@ define i1 @test_and_xor_freely_invertable_multiuse(i32 %x, i32 %y, i1 %z) {
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], [[Y]]
; CHECK-NEXT: call void @use_i1(i1 [[CMP]])
; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[CMP]], true
-; CHECK-NEXT: [[AND:%.*]] = and i1 [[TMP1]], [[Z]]
+; CHECK-NEXT: [[AND:%.*]] = and i1 [[Z]], [[TMP1]]
; CHECK-NEXT: ret i1 [[AND]]
;
%cmp = icmp sgt i32 %x, %y
diff --git a/llvm/test/Transforms/InstCombine/and.ll b/llvm/test/Transforms/InstCombine/and.ll
index b5250fc1a7849..466718c802300 100644
--- a/llvm/test/Transforms/InstCombine/and.ll
+++ b/llvm/test/Transforms/InstCombine/and.ll
@@ -831,7 +831,7 @@ define i64 @test39(i32 %X) {
define i32 @lowmask_add_zext(i8 %x, i32 %y) {
; CHECK-LABEL: @lowmask_add_zext(
; CHECK-NEXT: [[Y_TR:%.*]] = trunc i32 [[Y:%.*]] to i8
-; CHECK-NEXT: [[BO_NARROW:%.*]] = add i8 [[Y_TR]], [[X:%.*]]
+; CHECK-NEXT: [[BO_NARROW:%.*]] = add i8 [[X:%.*]], [[Y_TR]]
; CHECK-NEXT: [[R:%.*]] = zext i8 [[BO_NARROW]] to i32
; CHECK-NEXT: ret i32 [[R]]
;
@@ -845,7 +845,7 @@ define i32 @lowmask_add_zext_commute(i16 %x, i32 %p) {
; CHECK-LABEL: @lowmask_add_zext_commute(
; CHECK-NEXT: [[Y:%.*]] = mul i32 [[P:%.*]], [[P]]
; CHECK-NEXT: [[Y_TR:%.*]] = trunc i32 [[Y]] to i16
-; CHECK-NEXT: [[BO_NARROW:%.*]] = add i16 [[Y_TR]], [[X:%.*]]
+; CHECK-NEXT: [[BO_NARROW:%.*]] = add i16 [[X:%.*]], [[Y_TR]]
; CHECK-NEXT: [[R:%.*]] = zext i16 [[BO_NARROW]] to i32
; CHECK-NEXT: ret i32 [[R]]
;
@@ -861,7 +861,7 @@ define i32 @lowmask_add_zext_commute(i16 %x, i32 %p) {
define i32 @lowmask_add_zext_wrong_mask(i8 %x, i32 %y) {
; CHECK-LABEL: @lowmask_add_zext_wrong_mask(
; CHECK-NEXT: [[ZX:%.*]] = zext i8 [[X:%.*]] to i32
-; CHECK-NEXT: [[BO:%.*]] = add i32 [[ZX]], [[Y:%.*]]
+; CHECK-NEXT: [[BO:%.*]] = add i32 [[Y:%.*]], [[ZX]]
; CHECK-NEXT: [[R:%.*]] = and i32 [[BO]], 511
; CHECK-NEXT: ret i32 [[R]]
;
@@ -877,7 +877,7 @@ define i32 @lowmask_add_zext_use1(i8 %x, i32 %y) {
; CHECK-LABEL: @lowmask_add_zext_use1(
; CHECK-NEXT: [[ZX:%.*]] = zext i8 [[X:%.*]] to i32
; CHECK-NEXT: call void @use32(i32 [[ZX]])
-; CHECK-NEXT: [[BO:%.*]] = add i32 [[ZX]], [[Y:%.*]]
+; CHECK-NEXT: [[BO:%.*]] = add i32 [[Y:%.*]], [[ZX]]
; CHECK-NEXT: [[R:%.*]] = and i32 [[BO]], 255
; CHECK-NEXT: ret i32 [[R]]
;
@@ -893,7 +893,7 @@ define i32 @lowmask_add_zext_use1(i8 %x, i32 %y) {
define i32 @lowmask_add_zext_use2(i8 %x, i32 %y) {
; CHECK-LABEL: @lowmask_add_zext_use2(
; CHECK-NEXT: [[ZX:%.*]] = zext i8 [[X:%.*]] to i32
-; CHECK-NEXT: [[BO:%.*]] = add i32 [[ZX]], [[Y:%.*]]
+; CHECK-NEXT: [[BO:%.*]] = add i32 [[Y:%.*]], [[ZX]]
; CHECK-NEXT: call void @use32(i32 [[BO]])
; CHECK-NEXT: [[R:%.*]] = and i32 [[BO]], 255
; CHECK-NEXT: ret i32 [[R]]
@@ -938,7 +938,7 @@ define i17 @lowmask_sub_zext_commute(i5 %x, i17 %y) {
define i32 @lowmask_mul_zext(i8 %x, i32 %y) {
; CHECK-LABEL: @lowmask_mul_zext(
; CHECK-NEXT: [[Y_TR:%.*]] = trunc i32 [[Y:%.*]] to i8
-; CHECK-NEXT: [[BO_NARROW:%.*]] = mul i8 [[Y_TR]], [[X:%.*]]
+; CHECK-NEXT: [[BO_NARROW:%.*]] = mul i8 [[X:%.*]], [[Y_TR]]
; CHECK-NEXT: [[R:%.*]] = zext i8 [[BO_NARROW]] to i32
; CHECK-NEXT: ret i32 [[R]]
;
@@ -952,7 +952,7 @@ define i32 @lowmask_xor_zext_commute(i8 %x, i32 %p) {
; CHECK-LABEL: @lowmask_xor_zext_commute(
; CHECK-NEXT: [[Y:%.*]] = mul i32 [[P:%.*]], [[P]]
; CHECK-NEXT: [[Y_TR:%.*]] = trunc i32 [[Y]] to i8
-; CHECK-NEXT: [[BO_NARROW:%.*]] = xor i8 [[Y_TR]], [[X:%.*]]
+; CHECK-NEXT: [[BO_NARROW:%.*]] = xor i8 [[X:%.*]], [[Y_TR]]
; CHECK-NEXT: [[R:%.*]] = zext i8 [[BO_NARROW]] to i32
; CHECK-NEXT: ret i32 [[R]]
;
@@ -966,7 +966,7 @@ define i32 @lowmask_xor_zext_commute(i8 %x, i32 %p) {
define i24 @lowmask_or_zext_commute(i16 %x, i24 %y) {
; CHECK-LABEL: @lowmask_or_zext_commute(
; CHECK-NEXT: [[Y_TR:%.*]] = trunc i24 [[Y:%.*]] to i16
-; CHECK-NEXT: [[BO_NARROW:%.*]] = or i16 [[Y_TR]], [[X:%.*]]
+; CHECK-NEXT: [[BO_NARROW:%.*]] = or i16 [[X:%.*]], [[Y_TR]]
; CHECK-NEXT: [[R:%.*]] = zext i16 [[BO_NARROW]] to i24
; CHECK-NEXT: ret i24 [[R]]
;
@@ -1127,7 +1127,7 @@ define i32 @test45(i32 %x, i32 %y) nounwind {
; y & (~y | x) -> y | x
define i32 @test46(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: @test46(
-; CHECK-NEXT: [[A:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[A:%.*]] = and i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i32 [[A]]
;
%n = xor i32 %y, -1
@@ -1139,7 +1139,7 @@ define i32 @test46(i32 %x, i32 %y) nounwind {
; y & (x | ~y) -> y | x
define i32 @test47(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: @test47(
-; CHECK-NEXT: [[A:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[A:%.*]] = and i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i32 [[A]]
;
%n = xor i32 %y, -1
@@ -1814,7 +1814,7 @@ define i16 @signbit_splat_mask_use2(i8 %x, i16 %y) {
; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 7
; CHECK-NEXT: [[S:%.*]] = sext i8 [[A]] to i16
; CHECK-NEXT: call void @use16(i16 [[S]])
-; CHECK-NEXT: [[R:%.*]] = and i16 [[S]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = and i16 [[Y:%.*]], [[S]]
; CHECK-NEXT: ret i16 [[R]]
;
%a = ashr i8 %x, 7
@@ -1830,7 +1830,7 @@ define i16 @not_signbit_splat_mask1(i8 %x, i16 %y) {
; CHECK-LABEL: @not_signbit_splat_mask1(
; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 7
; CHECK-NEXT: [[Z:%.*]] = zext i8 [[A]] to i16
-; CHECK-NEXT: [[R:%.*]] = and i16 [[Z]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = and i16 [[Y:%.*]], [[Z]]
; CHECK-NEXT: ret i16 [[R]]
;
%a = ashr i8 %x, 7
@@ -1845,7 +1845,7 @@ define i16 @not_signbit_splat_mask2(i8 %x, i16 %y) {
; CHECK-LABEL: @not_signbit_splat_mask2(
; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 6
; CHECK-NEXT: [[S:%.*]] = sext i8 [[A]] to i16
-; CHECK-NEXT: [[R:%.*]] = and i16 [[S]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = and i16 [[Y:%.*]], [[S]]
; CHECK-NEXT: ret i16 [[R]]
;
%a = ashr i8 %x, 6
@@ -1920,7 +1920,7 @@ define i8 @not_ashr_not_bitwidth_mask(i8 %x, i8 %y) {
; CHECK-LABEL: @not_ashr_not_bitwidth_mask(
; CHECK-NEXT: [[SIGN:%.*]] = ashr i8 [[X:%.*]], 6
; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[SIGN]], -1
-; CHECK-NEXT: [[R:%.*]] = and i8 [[NOT]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = and i8 [[Y:%.*]], [[NOT]]
; CHECK-NEXT: ret i8 [[R]]
;
%sign = ashr i8 %x, 6
@@ -1935,7 +1935,7 @@ define i8 @not_lshr_bitwidth_mask(i8 %x, i8 %y) {
; CHECK-LABEL: @not_lshr_bitwidth_mask(
; CHECK-NEXT: [[SIGN:%.*]] = lshr i8 [[X:%.*]], 7
; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[SIGN]], -1
-; CHECK-NEXT: [[R:%.*]] = and i8 [[NOT]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = and i8 [[Y:%.*]], [[NOT]]
; CHECK-NEXT: ret i8 [[R]]
;
%sign = lshr i8 %x, 7
@@ -2029,7 +2029,7 @@ define i16 @not_invert_signbit_splat_mask1(i8 %x, i16 %y) {
; CHECK-NEXT: [[ISNOTNEG:%.*]] = icmp sgt i8 [[X:%.*]], -1
; CHECK-NEXT: [[N:%.*]] = sext i1 [[ISNOTNEG]] to i8
; CHECK-NEXT: [[Z:%.*]] = zext i8 [[N]] to i16
-; CHECK-NEXT: [[R:%.*]] = and i16 [[Z]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = and i16 [[Y:%.*]], [[Z]]
; CHECK-NEXT: ret i16 [[R]]
;
%a = ashr i8 %x, 7
@@ -2046,7 +2046,7 @@ define i16 @not_invert_signbit_splat_mask2(i8 %x, i16 %y) {
; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 6
; CHECK-NEXT: [[N:%.*]] = xor i8 [[A]], -1
; CHECK-NEXT: [[S:%.*]] = sext i8 [[N]] to i16
-; CHECK-NEXT: [[R:%.*]] = and i16 [[S]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = and i16 [[Y:%.*]], [[S]]
; CHECK-NEXT: ret i16 [[R]]
;
%a = ashr i8 %x, 6
@@ -2504,7 +2504,7 @@ define i8 @negate_lowbitmask_use2(i8 %x, i8 %y) {
; CHECK-NEXT: [[A:%.*]] = and i8 [[X:%.*]], 1
; CHECK-NEXT: [[N:%.*]] = sub nsw i8 0, [[A]]
; CHECK-NEXT: call void @use8(i8 [[N]])
-; CHECK-NEXT: [[R:%.*]] = and i8 [[N]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = and i8 [[Y:%.*]], [[N]]
; CHECK-NEXT: ret i8 [[R]]
;
%a = and i8 %x, 1
@@ -2553,7 +2553,7 @@ define i32 @and_zext_multiuse(i32 %a, i1 %b) {
; CHECK-LABEL: @and_zext_multiuse(
; CHECK-NEXT: [[MASK:%.*]] = zext i1 [[B:%.*]] to i32
; CHECK-NEXT: call void @use32(i32 [[MASK]])
-; CHECK-NEXT: [[R:%.*]] = and i32 [[MASK]], [[A:%.*]]
+; CHECK-NEXT: [[R:%.*]] = and i32 [[A:%.*]], [[MASK]]
; CHECK-NEXT: ret i32 [[R]]
;
%mask = zext i1 %b to i32
@@ -2636,7 +2636,7 @@ define i32 @and_zext_eq_zero(i32 %A, i32 %C) {
define i32 @canonicalize_and_add_power2_or_zero(i32 %x, i32 %y) {
; CHECK-LABEL: @canonicalize_and_add_power2_or_zero(
; CHECK-NEXT: [[NY:%.*]] = sub i32 0, [[Y:%.*]]
-; CHECK-NEXT: [[P2:%.*]] = and i32 [[NY]], [[Y]]
+; CHECK-NEXT: [[P2:%.*]] = and i32 [[Y]], [[NY]]
; CHECK-NEXT: call void @use32(i32 [[P2]])
; CHECK-NEXT: [[X2:%.*]] = mul i32 [[X:%.*]], [[X]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X2]], -1
@@ -2656,7 +2656,7 @@ define i32 @canonicalize_and_add_power2_or_zero(i32 %x, i32 %y) {
define i32 @canonicalize_and_sub_power2_or_zero(i32 %x, i32 %y) {
; CHECK-LABEL: @canonicalize_and_sub_power2_or_zero(
; CHECK-NEXT: [[NY:%.*]] = sub i32 0, [[Y:%.*]]
-; CHECK-NEXT: [[P2:%.*]] = and i32 [[NY]], [[Y]]
+; CHECK-NEXT: [[P2:%.*]] = and i32 [[Y]], [[NY]]
; CHECK-NEXT: call void @use32(i32 [[P2]])
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], -1
; CHECK-NEXT: [[AND:%.*]] = and i32 [[P2]], [[TMP1]]
@@ -2674,7 +2674,7 @@ define i32 @canonicalize_and_sub_power2_or_zero(i32 %x, i32 %y) {
define i32 @canonicalize_and_add_power2_or_zero_commuted1(i32 %x, i32 %y) {
; CHECK-LABEL: @canonicalize_and_add_power2_or_zero_commuted1(
; CHECK-NEXT: [[NY:%.*]] = sub i32 0, [[Y:%.*]]
-; CHECK-NEXT: [[P2:%.*]] = and i32 [[NY]], [[Y]]
+; CHECK-NEXT: [[P2:%.*]] = and i32 [[Y]], [[NY]]
; CHECK-NEXT: call void @use32(i32 [[P2]])
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], -1
; CHECK-NEXT: [[AND:%.*]] = and i32 [[P2]], [[TMP1]]
@@ -2692,7 +2692,7 @@ define i32 @canonicalize_and_add_power2_or_zero_commuted1(i32 %x, i32 %y) {
define i32 @canonicalize_and_add_power2_or_zero_commuted2(i32 %x, i32 %y) {
; CHECK-LABEL: @canonicalize_and_add_power2_or_zero_commuted2(
; CHECK-NEXT: [[NY:%.*]] = sub i32 0, [[Y:%.*]]
-; CHECK-NEXT: [[P2:%.*]] = and i32 [[NY]], [[Y]]
+; CHECK-NEXT: [[P2:%.*]] = and i32 [[Y]], [[NY]]
; CHECK-NEXT: call void @use32(i32 [[P2]])
; CHECK-NEXT: [[X2:%.*]] = mul i32 [[X:%.*]], [[X]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X2]], -1
@@ -2712,7 +2712,7 @@ define i32 @canonicalize_and_add_power2_or_zero_commuted2(i32 %x, i32 %y) {
define i32 @canonicalize_and_add_power2_or_zero_commuted3(i32 %x, i32 %y) {
; CHECK-LABEL: @canonicalize_and_add_power2_or_zero_commuted3(
; CHECK-NEXT: [[NY:%.*]] = sub i32 0, [[Y:%.*]]
-; CHECK-NEXT: [[P2:%.*]] = and i32 [[NY]], [[Y]]
+; CHECK-NEXT: [[P2:%.*]] = and i32 [[Y]], [[NY]]
; CHECK-NEXT: call void @use32(i32 [[P2]])
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], -1
; CHECK-NEXT: [[AND:%.*]] = and i32 [[P2]], [[TMP1]]
@@ -2730,7 +2730,7 @@ define i32 @canonicalize_and_add_power2_or_zero_commuted3(i32 %x, i32 %y) {
define i32 @canonicalize_and_sub_power2_or_zero_commuted_nofold(i32 %x, i32 %y) {
; CHECK-LABEL: @canonicalize_and_sub_power2_or_zero_commuted_nofold(
; CHECK-NEXT: [[NY:%.*]] = sub i32 0, [[Y:%.*]]
-; CHECK-NEXT: [[P2:%.*]] = and i32 [[NY]], [[Y]]
+; CHECK-NEXT: [[P2:%.*]] = and i32 [[Y]], [[NY]]
; CHECK-NEXT: call void @use32(i32 [[P2]])
; CHECK-NEXT: [[VAL:%.*]] = sub i32 [[P2]], [[X:%.*]]
; CHECK-NEXT: [[AND:%.*]] = and i32 [[VAL]], [[P2]]
@@ -2759,7 +2759,7 @@ define i32 @canonicalize_and_add_non_power2_or_zero_nofold(i32 %x, i32 %y) {
define i32 @canonicalize_and_add_power2_or_zero_multiuse_nofold(i32 %x, i32 %y) {
; CHECK-LABEL: @canonicalize_and_add_power2_or_zero_multiuse_nofold(
; CHECK-NEXT: [[NY:%.*]] = sub i32 0, [[Y:%.*]]
-; CHECK-NEXT: [[P2:%.*]] = and i32 [[NY]], [[Y]]
+; CHECK-NEXT: [[P2:%.*]] = and i32 [[Y]], [[NY]]
; CHECK-NEXT: call void @use32(i32 [[P2]])
; CHECK-NEXT: [[X2:%.*]] = mul i32 [[X:%.*]], [[X]]
; CHECK-NEXT: [[VAL:%.*]] = add i32 [[X2]], [[P2]]
@@ -2781,7 +2781,7 @@ define i32 @canonicalize_and_add_power2_or_zero_multiuse_nofold(i32 %x, i32 %y)
define i32 @canonicalize_and_sub_power2_or_zero_multiuse_nofold(i32 %x, i32 %y) {
; CHECK-LABEL: @canonicalize_and_sub_power2_or_zero_multiuse_nofold(
; CHECK-NEXT: [[NY:%.*]] = sub i32 0, [[Y:%.*]]
-; CHECK-NEXT: [[P2:%.*]] = and i32 [[NY]], [[Y]]
+; CHECK-NEXT: [[P2:%.*]] = and i32 [[Y]], [[NY]]
; CHECK-NEXT: call void @use32(i32 [[P2]])
; CHECK-NEXT: [[VAL:%.*]] = sub i32 [[X:%.*]], [[P2]]
; CHECK-NEXT: call void @use32(i32 [[VAL]])
diff --git a/llvm/test/Transforms/InstCombine/apint-and-xor-merge.ll b/llvm/test/Transforms/InstCombine/apint-and-xor-merge.ll
index 9810e5057d8a9..eca38586d01d0 100644
--- a/llvm/test/Transforms/InstCombine/apint-and-xor-merge.ll
+++ b/llvm/test/Transforms/InstCombine/apint-and-xor-merge.ll
@@ -8,7 +8,7 @@
define i57 @test1(i57 %x, i57 %y, i57 %z) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: [[TMP61:%.*]] = xor i57 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[TMP7:%.*]] = and i57 [[TMP61]], [[Z:%.*]]
+; CHECK-NEXT: [[TMP7:%.*]] = and i57 [[Z:%.*]], [[TMP61]]
; CHECK-NEXT: ret i57 [[TMP7]]
;
%tmp3 = and i57 %z, %x
diff --git a/llvm/test/Transforms/InstCombine/apint-or.ll b/llvm/test/Transforms/InstCombine/apint-or.ll
index 38bffdf35a364..07a0e497e521e 100644
--- a/llvm/test/Transforms/InstCombine/apint-or.ll
+++ b/llvm/test/Transforms/InstCombine/apint-or.ll
@@ -20,7 +20,7 @@ define i39 @test2(i39 %V, i39 %M) {
; CHECK-LABEL: define i39 @test2(
; CHECK-SAME: i39 [[V:%.*]], i39 [[M:%.*]]) {
; CHECK-NEXT: [[N:%.*]] = and i39 [[M]], -274877906944
-; CHECK-NEXT: [[A:%.*]] = add i39 [[N]], [[V]]
+; CHECK-NEXT: [[A:%.*]] = add i39 [[V]], [[N]]
; CHECK-NEXT: ret i39 [[A]]
;
%C1 = xor i39 274877906943, -1 ;; C2 = 274877906943
@@ -51,7 +51,7 @@ define i399 @test5(i399 %V, i399 %M) {
; CHECK-LABEL: define i399 @test5(
; CHECK-SAME: i399 [[V:%.*]], i399 [[M:%.*]]) {
; CHECK-NEXT: [[N:%.*]] = and i399 [[M]], 18446742974197923840
-; CHECK-NEXT: [[A:%.*]] = add i399 [[N]], [[V]]
+; CHECK-NEXT: [[A:%.*]] = add i399 [[V]], [[N]]
; CHECK-NEXT: ret i399 [[A]]
;
%C1 = xor i399 274877906943, -1 ;; C2 = 274877906943
diff --git a/llvm/test/Transforms/InstCombine/apint-shift.ll b/llvm/test/Transforms/InstCombine/apint-shift.ll
index 05c3db70ce1ca..dfcf0befe39f0 100644
--- a/llvm/test/Transforms/InstCombine/apint-shift.ll
+++ b/llvm/test/Transforms/InstCombine/apint-shift.ll
@@ -240,8 +240,8 @@ define i23 @test11(i23 %x) {
define i47 @test12(i47 %X) {
; CHECK-LABEL: @test12(
-; CHECK-NEXT: [[SH2:%.*]] = and i47 [[X:%.*]], -256
-; CHECK-NEXT: ret i47 [[SH2]]
+; CHECK-NEXT: [[SH1:%.*]] = and i47 [[X:%.*]], -256
+; CHECK-NEXT: ret i47 [[SH1]]
;
%sh1 = ashr i47 %X, 8
%sh2 = shl i47 %sh1, 8
@@ -250,8 +250,8 @@ define i47 @test12(i47 %X) {
define <2 x i47> @test12_splat_vec(<2 x i47> %X) {
; CHECK-LABEL: @test12_splat_vec(
-; CHECK-NEXT: [[SH2:%.*]] = and <2 x i47> [[X:%.*]], <i47 -256, i47 -256>
-; CHECK-NEXT: ret <2 x i47> [[SH2]]
+; CHECK-NEXT: [[SH1:%.*]] = and <2 x i47> [[X:%.*]], <i47 -256, i47 -256>
+; CHECK-NEXT: ret <2 x i47> [[SH1]]
;
%sh1 = ashr <2 x i47> %X, <i47 8, i47 8>
%sh2 = shl <2 x i47> %sh1, <i47 8, i47 8>
@@ -538,7 +538,7 @@ define <2 x i43> @lshr_shl_eq_amt_multi_use_splat_vec(<2 x i43> %A) {
define i37 @test25(i37 %AA, i37 %BB) {
; CHECK-LABEL: @test25(
; CHECK-NEXT: [[D:%.*]] = and i37 [[AA:%.*]], -131072
-; CHECK-NEXT: [[C2:%.*]] = add i37 [[D]], [[BB:%.*]]
+; CHECK-NEXT: [[C2:%.*]] = add i37 [[BB:%.*]], [[D]]
; CHECK-NEXT: [[F:%.*]] = and i37 [[C2]], -131072
; CHECK-NEXT: ret i37 [[F]]
;
diff --git a/llvm/test/Transforms/InstCombine/apint-sub.ll b/llvm/test/Transforms/InstCombine/apint-sub.ll
index 1c0374d443740..e9abe1a7e627d 100644
--- a/llvm/test/Transforms/InstCombine/apint-sub.ll
+++ b/llvm/test/Transforms/InstCombine/apint-sub.ll
@@ -50,7 +50,7 @@ define i19 @test5(i19 %A, i19 %Bok, i19 %Cok) {
define i57 @test6(i57 %A, i57 %B) {
; CHECK-LABEL: @test6(
; CHECK-NEXT: [[B_NOT:%.*]] = xor i57 [[B:%.*]], -1
-; CHECK-NEXT: [[D:%.*]] = and i57 [[B_NOT]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = and i57 [[A:%.*]], [[B_NOT]]
; CHECK-NEXT: ret i57 [[D]]
;
%C = and i57 %A, %B
diff --git a/llvm/test/Transforms/InstCombine/assume-align.ll b/llvm/test/Transforms/InstCombine/assume-align.ll
index 798707f317d29..ce3195d50be7c 100644
--- a/llvm/test/Transforms/InstCombine/assume-align.ll
+++ b/llvm/test/Transforms/InstCombine/assume-align.ll
@@ -88,7 +88,7 @@ define void @f3(i64 %a, ptr %b) {
; CHECK-LABEL: @f3(
; CHECK-NEXT: [[C:%.*]] = ptrtoint ptr [[B:%.*]] to i64
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[B]], i64 4294967296) ]
-; CHECK-NEXT: [[D:%.*]] = add i64 [[C]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = add i64 [[A:%.*]], [[C]]
; CHECK-NEXT: call void @g(i64 [[D]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/InstCombine/assume-separate_storage.ll b/llvm/test/Transforms/InstCombine/assume-separate_storage.ll
index 8fa8c3e80786d..b94c303e5a70c 100644
--- a/llvm/test/Transforms/InstCombine/assume-separate_storage.ll
+++ b/llvm/test/Transforms/InstCombine/assume-separate_storage.ll
@@ -24,7 +24,7 @@ define i64 @folds_removed_operands(ptr %a, ptr %b, i64 %n1, i64 %n2) {
; CHECK-LABEL: @folds_removed_operands(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[REASS_ADD:%.*]] = shl i64 [[N2:%.*]], 1
-; CHECK-NEXT: [[Y:%.*]] = add i64 [[REASS_ADD]], [[N1:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = add i64 [[N1:%.*]], [[REASS_ADD]]
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "separate_storage"(ptr [[A:%.*]], ptr [[B:%.*]]) ]
; CHECK-NEXT: ret i64 [[Y]]
;
diff --git a/llvm/test/Transforms/InstCombine/binop-and-shifts.ll b/llvm/test/Transforms/InstCombine/binop-and-shifts.ll
index f776dc13bb4e5..4b5de41fc7095 100644
--- a/llvm/test/Transforms/InstCombine/binop-and-shifts.ll
+++ b/llvm/test/Transforms/InstCombine/binop-and-shifts.ll
@@ -77,7 +77,7 @@ define i8 @shl_and_and_fail2(i8 %x, i8 %y) {
define <2 x i8> @lshr_and_or(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @lshr_and_or(
; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[X:%.*]], <i8 -64, i8 96>
-; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i8> [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i8> [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: [[BW1:%.*]] = lshr <2 x i8> [[TMP2]], <i8 4, i8 5>
; CHECK-NEXT: ret <2 x i8> [[BW1]]
;
@@ -106,7 +106,7 @@ define <2 x i8> @lshr_and_or_fail(<2 x i8> %x, <2 x i8> %y) {
define i8 @shl_and_xor(i8 %x, i8 %y) {
; CHECK-LABEL: @shl_and_xor(
; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], 10
-; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: [[BW1:%.*]] = shl i8 [[TMP2]], 1
; CHECK-NEXT: ret i8 [[BW1]]
;
@@ -120,7 +120,7 @@ define i8 @shl_and_xor(i8 %x, i8 %y) {
define i8 @shl_and_add(i8 %x, i8 %y) {
; CHECK-LABEL: @shl_and_add(
; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[Y:%.*]], 59
-; CHECK-NEXT: [[TMP2:%.*]] = add i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = add i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[BW1:%.*]] = shl i8 [[TMP2]], 1
; CHECK-NEXT: ret i8 [[BW1]]
;
@@ -149,7 +149,7 @@ define i8 @shl_xor_add_fail(i8 %x, i8 %y) {
define i8 @lshr_or_and(i8 %x, i8 %y) {
; CHECK-LABEL: @lshr_or_and(
; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[X:%.*]], -64
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: [[BW1:%.*]] = lshr i8 [[TMP2]], 5
; CHECK-NEXT: ret i8 [[BW1]]
;
@@ -177,7 +177,7 @@ define i8 @lshr_or_or_fail(i8 %x, i8 %y) {
define <2 x i8> @shl_xor_and(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @shl_xor_and(
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[Y:%.*]], <i8 11, i8 poison>
-; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i8> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[BW1:%.*]] = shl <2 x i8> [[TMP2]], <i8 2, i8 poison>
; CHECK-NEXT: ret <2 x i8> [[BW1]]
;
@@ -307,7 +307,7 @@ define i8 @lshr_add_add_no_const_fail(i8 %x, i8 %y, i8 %sh, i8 %mask) {
define <2 x i8> @lshr_add_and(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @lshr_add_and(
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[Y:%.*]], <i8 -8, i8 16>
-; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i8> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[BW1:%.*]] = lshr <2 x i8> [[TMP2]], <i8 3, i8 4>
; CHECK-NEXT: ret <2 x i8> [[BW1]]
;
@@ -393,7 +393,7 @@ define i8 @lshr_xor_or_fail_bad_mask(i8 %x, i8 %y) {
define <2 x i8> @lshr_or_xor_good_mask(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @lshr_or_xor_good_mask(
; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i8> [[Y:%.*]], <i8 -64, i8 64>
-; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i8> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[BW1:%.*]] = lshr <2 x i8> [[TMP2]], <i8 6, i8 6>
; CHECK-NEXT: ret <2 x i8> [[BW1]]
;
@@ -450,7 +450,7 @@ define i8 @shl_xor_xor_bad_mask_distribute(i8 %x, i8 %y) {
define i8 @shl_add_and(i8 %x, i8 %y) {
; CHECK-LABEL: @shl_add_and(
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y:%.*]], 61
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[BW1:%.*]] = shl i8 [[TMP2]], 1
; CHECK-NEXT: ret i8 [[BW1]]
;
@@ -509,7 +509,7 @@ define i8 @lshr_add_xor_fail(i8 %x, i8 %y) {
define <2 x i8> @lshr_and_add(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @lshr_and_add(
; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[X:%.*]], <i8 11, i8 3>
-; CHECK-NEXT: [[TMP2:%.*]] = add <2 x i8> [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = add <2 x i8> [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: [[BW1:%.*]] = shl <2 x i8> [[TMP2]], <i8 4, i8 5>
; CHECK-NEXT: ret <2 x i8> [[BW1]]
;
@@ -555,7 +555,7 @@ define i8 @shl_add_and_fail_mismatch_shift(i8 %x, i8 %y) {
define i8 @and_ashr_not(i8 %x, i8 %y, i8 %shamt) {
; CHECK-LABEL: @and_ashr_not(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[AND:%.*]] = ashr i8 [[TMP2]], [[SHAMT:%.*]]
; CHECK-NEXT: ret i8 [[AND]]
;
@@ -569,7 +569,7 @@ define i8 @and_ashr_not(i8 %x, i8 %y, i8 %shamt) {
define i8 @and_ashr_not_commuted(i8 %x, i8 %y, i8 %shamt) {
; CHECK-LABEL: @and_ashr_not_commuted(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[AND:%.*]] = ashr i8 [[TMP2]], [[SHAMT:%.*]]
; CHECK-NEXT: ret i8 [[AND]]
;
@@ -634,7 +634,7 @@ define i8 @and_ashr_not_fail_invalid_xor_constant(i8 %x, i8 %y, i8 %shamt) {
define <4 x i8> @and_ashr_not_vec(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
; CHECK-LABEL: @and_ashr_not_vec(
; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], <i8 -1, i8 -1, i8 -1, i8 -1>
-; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i8> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[AND:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]]
; CHECK-NEXT: ret <4 x i8> [[AND]]
;
@@ -648,7 +648,7 @@ define <4 x i8> @and_ashr_not_vec(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
define <4 x i8> @and_ashr_not_vec_commuted(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
; CHECK-LABEL: @and_ashr_not_vec_commuted(
; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], <i8 -1, i8 -1, i8 -1, i8 -1>
-; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i8> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[AND:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]]
; CHECK-NEXT: ret <4 x i8> [[AND]]
;
@@ -662,7 +662,7 @@ define <4 x i8> @and_ashr_not_vec_commuted(<4 x i8> %x, <4 x i8> %y, <4 x i8> %s
define <4 x i8> @and_ashr_not_vec_poison_1(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
; CHECK-LABEL: @and_ashr_not_vec_poison_1(
; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], <i8 -1, i8 -1, i8 -1, i8 -1>
-; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i8> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[AND:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]]
; CHECK-NEXT: ret <4 x i8> [[AND]]
;
@@ -689,7 +689,7 @@ define <4 x i8> @and_ashr_not_vec_poison_2(<4 x i8> %x, <4 x i8> %y, <4 x i8> %s
define i8 @or_ashr_not(i8 %x, i8 %y, i8 %shamt) {
; CHECK-LABEL: @or_ashr_not(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = ashr i8 [[TMP2]], [[SHAMT:%.*]]
; CHECK-NEXT: ret i8 [[OR]]
;
@@ -703,7 +703,7 @@ define i8 @or_ashr_not(i8 %x, i8 %y, i8 %shamt) {
define i8 @or_ashr_not_commuted(i8 %x, i8 %y, i8 %shamt) {
; CHECK-LABEL: @or_ashr_not_commuted(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = ashr i8 [[TMP2]], [[SHAMT:%.*]]
; CHECK-NEXT: ret i8 [[OR]]
;
@@ -768,7 +768,7 @@ define i8 @or_ashr_not_fail_invalid_xor_constant(i8 %x, i8 %y, i8 %shamt) {
define <4 x i8> @or_ashr_not_vec(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
; CHECK-LABEL: @or_ashr_not_vec(
; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], <i8 -1, i8 -1, i8 -1, i8 -1>
-; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i8> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]]
; CHECK-NEXT: ret <4 x i8> [[OR]]
;
@@ -782,7 +782,7 @@ define <4 x i8> @or_ashr_not_vec(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
define <4 x i8> @or_ashr_not_vec_commuted(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
; CHECK-LABEL: @or_ashr_not_vec_commuted(
; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], <i8 -1, i8 -1, i8 -1, i8 -1>
-; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i8> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]]
; CHECK-NEXT: ret <4 x i8> [[OR]]
;
@@ -796,7 +796,7 @@ define <4 x i8> @or_ashr_not_vec_commuted(<4 x i8> %x, <4 x i8> %y, <4 x i8> %sh
define <4 x i8> @or_ashr_not_vec_poison_1(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) {
; CHECK-LABEL: @or_ashr_not_vec_poison_1(
; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], <i8 -1, i8 -1, i8 -1, i8 -1>
-; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i8> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]]
; CHECK-NEXT: ret <4 x i8> [[OR]]
;
diff --git a/llvm/test/Transforms/InstCombine/binop-cast.ll b/llvm/test/Transforms/InstCombine/binop-cast.ll
index d521a7d5a2b3a..9d3b18c5e79ed 100644
--- a/llvm/test/Transforms/InstCombine/binop-cast.ll
+++ b/llvm/test/Transforms/InstCombine/binop-cast.ll
@@ -129,7 +129,7 @@ define i32 @and_not_zext_to_sel(i32 %x, i1 %y) {
; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[Y:%.*]] to i32
; CHECK-NEXT: call void @use(i32 [[ZEXT]])
; CHECK-NEXT: [[NOT:%.*]] = xor i32 [[ZEXT]], -1
-; CHECK-NEXT: [[R:%.*]] = and i32 [[NOT]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = and i32 [[X:%.*]], [[NOT]]
; CHECK-NEXT: ret i32 [[R]]
;
%zext = zext i1 %y to i32
@@ -175,7 +175,7 @@ define i32 @or_sext_to_sel_multi_use(i32 %x, i1 %y) {
; CHECK-LABEL: @or_sext_to_sel_multi_use(
; CHECK-NEXT: [[SEXT:%.*]] = sext i1 [[Y:%.*]] to i32
; CHECK-NEXT: call void @use(i32 [[SEXT]])
-; CHECK-NEXT: [[R:%.*]] = or i32 [[SEXT]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = or i32 [[X:%.*]], [[SEXT]]
; CHECK-NEXT: ret i32 [[R]]
;
%sext = sext i1 %y to i32
@@ -200,7 +200,7 @@ define i32 @or_sext_to_sel_multi_use_constant_mask(i1 %y) {
define i32 @xor_sext_to_sel(i32 %x, i1 %y) {
; CHECK-LABEL: @xor_sext_to_sel(
; CHECK-NEXT: [[SEXT:%.*]] = sext i1 [[Y:%.*]] to i32
-; CHECK-NEXT: [[R:%.*]] = xor i32 [[SEXT]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = xor i32 [[X:%.*]], [[SEXT]]
; CHECK-NEXT: ret i32 [[R]]
;
%sext = sext i1 %y to i32
@@ -236,7 +236,7 @@ define i32 @xor_sext_to_sel_multi_use(i32 %x, i1 %y) {
; CHECK-LABEL: @xor_sext_to_sel_multi_use(
; CHECK-NEXT: [[SEXT:%.*]] = sext i1 [[Y:%.*]] to i32
; CHECK-NEXT: call void @use(i32 [[SEXT]])
-; CHECK-NEXT: [[R:%.*]] = xor i32 [[SEXT]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = xor i32 [[X:%.*]], [[SEXT]]
; CHECK-NEXT: ret i32 [[R]]
;
%sext = sext i1 %y to i32
diff --git a/llvm/test/Transforms/InstCombine/bit-checks.ll b/llvm/test/Transforms/InstCombine/bit-checks.ll
index aea8d3465268a..a2e766af8b60a 100644
--- a/llvm/test/Transforms/InstCombine/bit-checks.ll
+++ b/llvm/test/Transforms/InstCombine/bit-checks.ll
@@ -137,7 +137,7 @@ define i32 @main3b_logical(i32 %argc) {
define i32 @main3e_like(i32 %argc, i32 %argc2, i32 %argc3) {
; CHECK-LABEL: @main3e_like(
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARGC2:%.*]], [[ARGC3:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]]
; CHECK-NEXT: [[AND_COND:%.*]] = icmp ne i32 [[TMP2]], 0
; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND]] to i32
; CHECK-NEXT: ret i32 [[STOREMERGE]]
@@ -238,7 +238,7 @@ define i32 @main3d_logical(i32 %argc) {
define i32 @main3f_like(i32 %argc, i32 %argc2, i32 %argc3) {
; CHECK-LABEL: @main3f_like(
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARGC2:%.*]], [[ARGC3:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR_COND_NOT:%.*]] = icmp eq i32 [[TMP2]], 0
; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[OR_COND_NOT]] to i32
; CHECK-NEXT: ret i32 [[STOREMERGE]]
@@ -355,7 +355,7 @@ define i32 @main4b_logical(i32 %argc) {
define i32 @main4e_like(i32 %argc, i32 %argc2, i32 %argc3) {
; CHECK-LABEL: @main4e_like(
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARGC2:%.*]], [[ARGC3:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]]
; CHECK-NEXT: [[AND_COND:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND]] to i32
; CHECK-NEXT: ret i32 [[STOREMERGE]]
@@ -456,7 +456,7 @@ define i32 @main4d_logical(i32 %argc) {
define i32 @main4f_like(i32 %argc, i32 %argc2, i32 %argc3) {
; CHECK-LABEL: @main4f_like(
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARGC2:%.*]], [[ARGC3:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR_COND_NOT:%.*]] = icmp eq i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[OR_COND_NOT]] to i32
; CHECK-NEXT: ret i32 [[STOREMERGE]]
@@ -529,7 +529,7 @@ define i32 @main5_like_logical(i32 %argc, i32 %argc2) {
define i32 @main5e_like(i32 %argc, i32 %argc2, i32 %argc3) {
; CHECK-LABEL: @main5e_like(
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC3:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]]
; CHECK-NEXT: [[AND_COND:%.*]] = icmp ne i32 [[TMP2]], [[ARGC]]
; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND]] to i32
; CHECK-NEXT: ret i32 [[STOREMERGE]]
@@ -602,7 +602,7 @@ define i32 @main5c_like_logical(i32 %argc, i32 %argc2) {
define i32 @main5f_like(i32 %argc, i32 %argc2, i32 %argc3) {
; CHECK-LABEL: @main5f_like(
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC3:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR_COND_NOT:%.*]] = icmp eq i32 [[TMP2]], [[ARGC]]
; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[OR_COND_NOT]] to i32
; CHECK-NEXT: ret i32 [[STOREMERGE]]
@@ -772,7 +772,7 @@ define i32 @main6d_logical(i32 %argc) {
define i32 @main7a(i32 %argc, i32 %argc2, i32 %argc3) {
; CHECK-LABEL: @main7a(
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARGC2:%.*]], [[ARGC3:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]]
; CHECK-NEXT: [[AND_COND:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND]] to i32
; CHECK-NEXT: ret i32 [[STOREMERGE]]
@@ -808,10 +808,12 @@ define i32 @main7a_logical(i32 %argc, i32 %argc2, i32 %argc3) {
; B == (A & B) & D == (A & D)
define i32 @main7b(i32 %argc, i32 %argc2, i32 %argc3) {
; CHECK-LABEL: @main7b(
-; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARGC2:%.*]], [[ARGC3:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]]
-; CHECK-NEXT: [[AND_COND:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
-; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND]] to i32
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[ARGC:%.*]], [[ARGC2:%.*]]
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[ARGC2]], [[AND1]]
+; CHECK-NEXT: [[AND2:%.*]] = and i32 [[ARGC]], [[ARGC3:%.*]]
+; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i32 [[ARGC3]], [[AND2]]
+; CHECK-NEXT: [[AND_COND_NOT:%.*]] = or i1 [[TOBOOL]], [[TOBOOL3]]
+; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND_NOT]] to i32
; CHECK-NEXT: ret i32 [[STOREMERGE]]
;
%and1 = and i32 %argc, %argc2
@@ -826,9 +828,9 @@ define i32 @main7b(i32 %argc, i32 %argc2, i32 %argc3) {
define i32 @main7b_logical(i32 %argc, i32 %argc2, i32 %argc3) {
; CHECK-LABEL: @main7b_logical(
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[ARGC:%.*]], [[ARGC2:%.*]]
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[AND1]], [[ARGC2]]
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[ARGC2]], [[AND1]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[ARGC]], [[ARGC3:%.*]]
-; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i32 [[AND2]], [[ARGC3]]
+; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i32 [[ARGC3]], [[AND2]]
; CHECK-NEXT: [[AND_COND_NOT:%.*]] = select i1 [[TOBOOL]], i1 true, i1 [[TOBOOL3]]
; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND_NOT]] to i32
; CHECK-NEXT: ret i32 [[STOREMERGE]]
@@ -845,10 +847,12 @@ define i32 @main7b_logical(i32 %argc, i32 %argc2, i32 %argc3) {
; B == (B & A) & D == (D & A)
define i32 @main7c(i32 %argc, i32 %argc2, i32 %argc3) {
; CHECK-LABEL: @main7c(
-; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARGC2:%.*]], [[ARGC3:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]]
-; CHECK-NEXT: [[AND_COND:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
-; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND]] to i32
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC:%.*]]
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[ARGC2]], [[AND1]]
+; CHECK-NEXT: [[AND2:%.*]] = and i32 [[ARGC3:%.*]], [[ARGC]]
+; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i32 [[ARGC3]], [[AND2]]
+; CHECK-NEXT: [[AND_COND_NOT:%.*]] = or i1 [[TOBOOL]], [[TOBOOL3]]
+; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND_NOT]] to i32
; CHECK-NEXT: ret i32 [[STOREMERGE]]
;
%and1 = and i32 %argc2, %argc
@@ -863,9 +867,9 @@ define i32 @main7c(i32 %argc, i32 %argc2, i32 %argc3) {
define i32 @main7c_logical(i32 %argc, i32 %argc2, i32 %argc3) {
; CHECK-LABEL: @main7c_logical(
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC:%.*]]
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[AND1]], [[ARGC2]]
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[ARGC2]], [[AND1]]
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[ARGC3:%.*]], [[ARGC]]
-; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i32 [[AND2]], [[ARGC3]]
+; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i32 [[ARGC3]], [[AND2]]
; CHECK-NEXT: [[AND_COND_NOT:%.*]] = select i1 [[TOBOOL]], i1 true, i1 [[TOBOOL3]]
; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND_NOT]] to i32
; CHECK-NEXT: ret i32 [[STOREMERGE]]
@@ -885,7 +889,7 @@ define i32 @main7d(i32 %argc, i32 %argc2, i32 %argc3, i32 %argc4, i32 %argc5) {
; CHECK-NEXT: [[BC:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC4:%.*]]
; CHECK-NEXT: [[DE:%.*]] = and i32 [[ARGC3:%.*]], [[ARGC5:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[BC]], [[DE]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]]
; CHECK-NEXT: [[AND_COND:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND]] to i32
; CHECK-NEXT: ret i32 [[STOREMERGE]]
@@ -905,9 +909,9 @@ define i32 @main7d_logical(i32 %argc, i32 %argc2, i32 %argc3, i32 %argc4, i32 %a
; CHECK-LABEL: @main7d_logical(
; CHECK-NEXT: [[BC:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC4:%.*]]
; CHECK-NEXT: [[DE:%.*]] = and i32 [[ARGC3:%.*]], [[ARGC5:%.*]]
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[BC]], [[ARGC:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[ARGC:%.*]], [[BC]]
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[AND1]], [[BC]]
-; CHECK-NEXT: [[AND2:%.*]] = and i32 [[DE]], [[ARGC]]
+; CHECK-NEXT: [[AND2:%.*]] = and i32 [[ARGC]], [[DE]]
; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i32 [[AND2]], [[DE]]
; CHECK-NEXT: [[AND_COND_NOT:%.*]] = select i1 [[TOBOOL]], i1 true, i1 [[TOBOOL3]]
; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND_NOT]] to i32
@@ -930,7 +934,7 @@ define i32 @main7e(i32 %argc, i32 %argc2, i32 %argc3, i32 %argc4, i32 %argc5) {
; CHECK-NEXT: [[BC:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC4:%.*]]
; CHECK-NEXT: [[DE:%.*]] = and i32 [[ARGC3:%.*]], [[ARGC5:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[BC]], [[DE]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]]
; CHECK-NEXT: [[AND_COND:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND]] to i32
; CHECK-NEXT: ret i32 [[STOREMERGE]]
@@ -975,7 +979,7 @@ define i32 @main7f(i32 %argc, i32 %argc2, i32 %argc3, i32 %argc4, i32 %argc5) {
; CHECK-NEXT: [[BC:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC4:%.*]]
; CHECK-NEXT: [[DE:%.*]] = and i32 [[ARGC3:%.*]], [[ARGC5:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[BC]], [[DE]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]]
; CHECK-NEXT: [[AND_COND:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND]] to i32
; CHECK-NEXT: ret i32 [[STOREMERGE]]
@@ -995,9 +999,9 @@ define i32 @main7f_logical(i32 %argc, i32 %argc2, i32 %argc3, i32 %argc4, i32 %a
; CHECK-LABEL: @main7f_logical(
; CHECK-NEXT: [[BC:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC4:%.*]]
; CHECK-NEXT: [[DE:%.*]] = and i32 [[ARGC3:%.*]], [[ARGC5:%.*]]
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[BC]], [[ARGC:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[ARGC:%.*]], [[BC]]
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[BC]], [[AND1]]
-; CHECK-NEXT: [[AND2:%.*]] = and i32 [[DE]], [[ARGC]]
+; CHECK-NEXT: [[AND2:%.*]] = and i32 [[ARGC]], [[DE]]
; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i32 [[DE]], [[AND2]]
; CHECK-NEXT: [[AND_COND_NOT:%.*]] = select i1 [[TOBOOL]], i1 true, i1 [[TOBOOL3]]
; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND_NOT]] to i32
@@ -1020,7 +1024,7 @@ define i32 @main7g(i32 %argc, i32 %argc2, i32 %argc3, i32 %argc4, i32 %argc5) {
; CHECK-NEXT: [[BC:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC4:%.*]]
; CHECK-NEXT: [[DE:%.*]] = and i32 [[ARGC3:%.*]], [[ARGC5:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[BC]], [[DE]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]]
; CHECK-NEXT: [[AND_COND:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND]] to i32
; CHECK-NEXT: ret i32 [[STOREMERGE]]
diff --git a/llvm/test/Transforms/InstCombine/bitcast-inseltpoison.ll b/llvm/test/Transforms/InstCombine/bitcast-inseltpoison.ll
index 061182fdaf3c8..3744d8c9171c7 100644
--- a/llvm/test/Transforms/InstCombine/bitcast-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/bitcast-inseltpoison.ll
@@ -120,7 +120,7 @@ define <2 x i8> @canonicalize_bitcast_logic_with_constant(<4 x i4> %x) {
define <4 x i32> @bitcasts_and_bitcast(<4 x i32> %a, <8 x i16> %b) {
; CHECK-LABEL: @bitcasts_and_bitcast(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B:%.*]] to <4 x i32>
-; CHECK-NEXT: [[BC3:%.*]] = and <4 x i32> [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[BC3:%.*]] = and <4 x i32> [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret <4 x i32> [[BC3]]
;
%bc1 = bitcast <4 x i32> %a to <2 x i64>
@@ -133,7 +133,7 @@ define <4 x i32> @bitcasts_and_bitcast(<4 x i32> %a, <8 x i16> %b) {
define <4 x float> @bitcasts_and_bitcast_to_fp(<4 x float> %a, <8 x i16> %b) {
; CHECK-LABEL: @bitcasts_and_bitcast_to_fp(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[A:%.*]] to <8 x i16>
-; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i16> [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i16> [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[BC3:%.*]] = bitcast <8 x i16> [[TMP2]] to <4 x float>
; CHECK-NEXT: ret <4 x float> [[BC3]]
;
@@ -149,7 +149,7 @@ define <4 x float> @bitcasts_and_bitcast_to_fp(<4 x float> %a, <8 x i16> %b) {
define i128 @bitcast_or_bitcast(i128 %a, <2 x i64> %b) {
; CHECK-LABEL: @bitcast_or_bitcast(
; CHECK-NEXT: [[BC1:%.*]] = bitcast i128 [[A:%.*]] to <2 x i64>
-; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[BC1]], [[B:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[B:%.*]], [[BC1]]
; CHECK-NEXT: [[BC2:%.*]] = bitcast <2 x i64> [[OR]] to i128
; CHECK-NEXT: ret i128 [[BC2]]
;
@@ -164,7 +164,7 @@ define i128 @bitcast_or_bitcast(i128 %a, <2 x i64> %b) {
define <4 x i32> @bitcast_xor_bitcast(<4 x i32> %a, i128 %b) {
; CHECK-LABEL: @bitcast_xor_bitcast(
; CHECK-NEXT: [[BC1:%.*]] = bitcast <4 x i32> [[A:%.*]] to i128
-; CHECK-NEXT: [[XOR:%.*]] = xor i128 [[BC1]], [[B:%.*]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i128 [[B:%.*]], [[BC1]]
; CHECK-NEXT: [[BC2:%.*]] = bitcast i128 [[XOR]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[BC2]]
;
diff --git a/llvm/test/Transforms/InstCombine/bitcast.ll b/llvm/test/Transforms/InstCombine/bitcast.ll
index 5599604b666fb..dfea2d05ce717 100644
--- a/llvm/test/Transforms/InstCombine/bitcast.ll
+++ b/llvm/test/Transforms/InstCombine/bitcast.ll
@@ -122,7 +122,7 @@ define <2 x i8> @canonicalize_bitcast_logic_with_constant(<4 x i4> %x) {
define <4 x i32> @bitcasts_and_bitcast(<4 x i32> %a, <8 x i16> %b) {
; CHECK-LABEL: @bitcasts_and_bitcast(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B:%.*]] to <4 x i32>
-; CHECK-NEXT: [[BC3:%.*]] = and <4 x i32> [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[BC3:%.*]] = and <4 x i32> [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret <4 x i32> [[BC3]]
;
%bc1 = bitcast <4 x i32> %a to <2 x i64>
@@ -135,7 +135,7 @@ define <4 x i32> @bitcasts_and_bitcast(<4 x i32> %a, <8 x i16> %b) {
define <4 x float> @bitcasts_and_bitcast_to_fp(<4 x float> %a, <8 x i16> %b) {
; CHECK-LABEL: @bitcasts_and_bitcast_to_fp(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[A:%.*]] to <8 x i16>
-; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i16> [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i16> [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[BC3:%.*]] = bitcast <8 x i16> [[TMP2]] to <4 x float>
; CHECK-NEXT: ret <4 x float> [[BC3]]
;
@@ -149,7 +149,7 @@ define <4 x float> @bitcasts_and_bitcast_to_fp(<4 x float> %a, <8 x i16> %b) {
define <2 x double> @bitcasts_or_bitcast_to_fp(<4 x float> %a, <8 x i16> %b) {
; CHECK-LABEL: @bitcasts_or_bitcast_to_fp(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[A:%.*]] to <8 x i16>
-; CHECK-NEXT: [[TMP2:%.*]] = or <8 x i16> [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = or <8 x i16> [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[BC3:%.*]] = bitcast <8 x i16> [[TMP2]] to <2 x double>
; CHECK-NEXT: ret <2 x double> [[BC3]]
;
@@ -163,7 +163,7 @@ define <2 x double> @bitcasts_or_bitcast_to_fp(<4 x float> %a, <8 x i16> %b) {
define <4 x float> @bitcasts_xor_bitcast_to_fp(<2 x double> %a, <8 x i16> %b) {
; CHECK-LABEL: @bitcasts_xor_bitcast_to_fp(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[A:%.*]] to <8 x i16>
-; CHECK-NEXT: [[TMP2:%.*]] = xor <8 x i16> [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = xor <8 x i16> [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[BC3:%.*]] = bitcast <8 x i16> [[TMP2]] to <4 x float>
; CHECK-NEXT: ret <4 x float> [[BC3]]
;
@@ -198,7 +198,7 @@ define <4 x float> @bitcasts_and_bitcast_to_fp_multiuse(<4 x float> %a, <8 x i16
define i128 @bitcast_or_bitcast(i128 %a, <2 x i64> %b) {
; CHECK-LABEL: @bitcast_or_bitcast(
; CHECK-NEXT: [[BC1:%.*]] = bitcast i128 [[A:%.*]] to <2 x i64>
-; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[BC1]], [[B:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[B:%.*]], [[BC1]]
; CHECK-NEXT: [[BC2:%.*]] = bitcast <2 x i64> [[OR]] to i128
; CHECK-NEXT: ret i128 [[BC2]]
;
@@ -213,7 +213,7 @@ define i128 @bitcast_or_bitcast(i128 %a, <2 x i64> %b) {
define <4 x i32> @bitcast_xor_bitcast(<4 x i32> %a, i128 %b) {
; CHECK-LABEL: @bitcast_xor_bitcast(
; CHECK-NEXT: [[BC1:%.*]] = bitcast <4 x i32> [[A:%.*]] to i128
-; CHECK-NEXT: [[XOR:%.*]] = xor i128 [[BC1]], [[B:%.*]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i128 [[B:%.*]], [[BC1]]
; CHECK-NEXT: [[BC2:%.*]] = bitcast i128 [[XOR]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[BC2]]
;
diff --git a/llvm/test/Transforms/InstCombine/bitreverse.ll b/llvm/test/Transforms/InstCombine/bitreverse.ll
index cbe9695c48690..fe44a7a77bdff 100644
--- a/llvm/test/Transforms/InstCombine/bitreverse.ll
+++ b/llvm/test/Transforms/InstCombine/bitreverse.ll
@@ -403,7 +403,7 @@ define i64 @PR59897(i1 %X1_2) {
define i16 @rev_xor_lhs_rev16(i16 %a, i16 %b) #0 {
; CHECK-LABEL: @rev_xor_lhs_rev16(
; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[B:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = xor i16 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = xor i16 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret i16 [[TMP2]]
;
%1 = tail call i16 @llvm.bitreverse.i16(i16 %a)
@@ -475,7 +475,7 @@ define <2 x i32> @rev_xor_rhs_i32vec(<2 x i32> %a, <2 x i32> %b) #0 {
define i64 @rev_and_rhs_rev64_multiuse1(i64 %a, i64 %b) #0 {
; CHECK-LABEL: @rev_and_rhs_rev64_multiuse1(
; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.bitreverse.i64(i64 [[B:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.bitreverse.i64(i64 [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP2]], [[TMP3]]
; CHECK-NEXT: ret i64 [[TMP4]]
@@ -490,7 +490,7 @@ define i64 @rev_and_rhs_rev64_multiuse1(i64 %a, i64 %b) #0 {
define i64 @rev_and_rhs_rev64_multiuse2(i64 %a, i64 %b) #0 {
; CHECK-LABEL: @rev_and_rhs_rev64_multiuse2(
; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.bitreverse.i64(i64 [[B:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.bitreverse.i64(i64 [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP1]], [[TMP3]]
; CHECK-NEXT: ret i64 [[TMP4]]
diff --git a/llvm/test/Transforms/InstCombine/bswap-fold.ll b/llvm/test/Transforms/InstCombine/bswap-fold.ll
index 91674c6017a9e..ddc0430896e7d 100644
--- a/llvm/test/Transforms/InstCombine/bswap-fold.ll
+++ b/llvm/test/Transforms/InstCombine/bswap-fold.ll
@@ -544,7 +544,7 @@ define i64 @bs_and64i_multiuse(i64 %a, i64 %b) #0 {
define i16 @bs_and_lhs_bs16(i16 %a, i16 %b) #0 {
; CHECK-LABEL: @bs_and_lhs_bs16(
; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[B:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = and i16 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i16 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret i16 [[TMP2]]
;
%1 = tail call i16 @llvm.bswap.i16(i16 %a)
@@ -556,7 +556,7 @@ define i16 @bs_and_lhs_bs16(i16 %a, i16 %b) #0 {
define i16 @bs_or_lhs_bs16(i16 %a, i16 %b) #0 {
; CHECK-LABEL: @bs_or_lhs_bs16(
; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[B:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = or i16 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = or i16 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret i16 [[TMP2]]
;
%1 = tail call i16 @llvm.bswap.i16(i16 %a)
@@ -568,7 +568,7 @@ define i16 @bs_or_lhs_bs16(i16 %a, i16 %b) #0 {
define i16 @bs_xor_lhs_bs16(i16 %a, i16 %b) #0 {
; CHECK-LABEL: @bs_xor_lhs_bs16(
; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[B:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = xor i16 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = xor i16 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret i16 [[TMP2]]
;
%1 = tail call i16 @llvm.bswap.i16(i16 %a)
@@ -724,7 +724,7 @@ define <2 x i32> @bs_xor_rhs_i32vec(<2 x i32> %a, <2 x i32> %b) #0 {
define i64 @bs_and_rhs_bs64_multiuse1(i64 %a, i64 %b) #0 {
; CHECK-LABEL: @bs_and_rhs_bs64_multiuse1(
; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[B:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP2]], [[TMP3]]
; CHECK-NEXT: ret i64 [[TMP4]]
@@ -739,7 +739,7 @@ define i64 @bs_and_rhs_bs64_multiuse1(i64 %a, i64 %b) #0 {
define i64 @bs_and_rhs_bs64_multiuse2(i64 %a, i64 %b) #0 {
; CHECK-LABEL: @bs_and_rhs_bs64_multiuse2(
; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[B:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[TMP2]])
; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP1]], [[TMP3]]
; CHECK-NEXT: ret i64 [[TMP4]]
diff --git a/llvm/test/Transforms/InstCombine/call-guard.ll b/llvm/test/Transforms/InstCombine/call-guard.ll
index 358518b9bd1cb..6b31c78118d0b 100644
--- a/llvm/test/Transforms/InstCombine/call-guard.ll
+++ b/llvm/test/Transforms/InstCombine/call-guard.ll
@@ -80,7 +80,7 @@ define void @negative_load(i32 %V1, ptr %P) {
define void @deref_load(i32 %V1, ptr dereferenceable(4) align 4 %P) nofree nosync {
; CHECK-LABEL: @deref_load(
; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P:%.*]], align 4
-; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[V2]], [[V1:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[V1:%.*]], [[V2]]
; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 123) [ "deopt"() ]
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-eq-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-eq-to-icmp-ule.ll
index 759770688cf20..3d5696a024513 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-eq-to-icmp-ule.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-eq-to-icmp-ule.ll
@@ -27,7 +27,7 @@ define i1 @p0(i8 %x) {
define i1 @pv(i8 %x, i8 %y) {
; CHECK-LABEL: @pv(
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]]
-; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[TMP0]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[RET]]
;
%tmp0 = lshr i8 -1, %y
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll
index 9b28129dd9e17..21daeb8983a85 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll
@@ -27,7 +27,7 @@ define i1 @p0(i8 %x) {
define i1 @pv(i8 %x, i8 %y) {
; CHECK-LABEL: @pv(
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]]
-; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[TMP0]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[RET]]
;
%tmp0 = lshr i8 -1, %y
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-uge-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-uge-to-icmp-ule.ll
index cfd48821b2c1d..1dac73df38789 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-uge-to-icmp-ule.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-uge-to-icmp-ule.ll
@@ -27,7 +27,7 @@ define i1 @p0(i8 %x) {
define i1 @pv(i8 %x, i8 %y) {
; CHECK-LABEL: @pv(
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]]
-; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[TMP0]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[RET]]
;
%tmp0 = lshr i8 -1, %y
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ult-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ult-to-icmp-ugt.ll
index 70fb34f499289..7eda7bb58f270 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ult-to-icmp-ugt.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ult-to-icmp-ugt.ll
@@ -27,7 +27,7 @@ define i1 @p0(i8 %x) {
define i1 @pv(i8 %x, i8 %y) {
; CHECK-LABEL: @pv(
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]]
-; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[TMP0]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[RET]]
;
%tmp0 = lshr i8 -1, %y
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-eq-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-eq-to-icmp-ule.ll
index dc5658d302d99..5a58fc96c6643 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-eq-to-icmp-ule.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-eq-to-icmp-ule.ll
@@ -15,7 +15,7 @@
define i1 @p0(i8 %x, i8 %y) {
; CHECK-LABEL: @p0(
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]]
-; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[TMP0]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[RET]]
;
%tmp0 = lshr i8 -1, %y
@@ -31,7 +31,7 @@ define i1 @p0(i8 %x, i8 %y) {
define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @p1_vec(
; CHECK-NEXT: [[TMP0:%.*]] = lshr <2 x i8> <i8 -1, i8 -1>, [[Y:%.*]]
-; CHECK-NEXT: [[RET:%.*]] = icmp uge <2 x i8> [[TMP0]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule <2 x i8> [[X:%.*]], [[TMP0]]
; CHECK-NEXT: ret <2 x i1> [[RET]]
;
%tmp0 = lshr <2 x i8> <i8 -1, i8 -1>, %y
@@ -43,7 +43,7 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) {
define <3 x i1> @p2_vec_poison(<3 x i8> %x, <3 x i8> %y) {
; CHECK-LABEL: @p2_vec_poison(
; CHECK-NEXT: [[TMP0:%.*]] = lshr <3 x i8> <i8 -1, i8 poison, i8 -1>, [[Y:%.*]]
-; CHECK-NEXT: [[RET:%.*]] = icmp uge <3 x i8> [[TMP0]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule <3 x i8> [[X:%.*]], [[TMP0]]
; CHECK-NEXT: ret <3 x i1> [[RET]]
;
%tmp0 = lshr <3 x i8> <i8 -1, i8 poison, i8 -1>, %y
@@ -110,7 +110,7 @@ define i1 @oneuse0(i8 %x, i8 %y) {
; CHECK-LABEL: @oneuse0(
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[TMP0]])
-; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[TMP0]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[RET]]
;
%tmp0 = lshr i8 -1, %y
@@ -125,7 +125,7 @@ define i1 @oneuse1(i8 %x, i8 %y) {
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], [[X:%.*]]
; CHECK-NEXT: call void @use8(i8 [[TMP1]])
-; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[TMP0]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[TMP0]]
; CHECK-NEXT: ret i1 [[RET]]
;
%tmp0 = lshr i8 -1, %y
@@ -141,7 +141,7 @@ define i1 @oneuse2(i8 %x, i8 %y) {
; CHECK-NEXT: call void @use8(i8 [[TMP0]])
; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], [[X:%.*]]
; CHECK-NEXT: call void @use8(i8 [[TMP1]])
-; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[TMP0]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[TMP0]]
; CHECK-NEXT: ret i1 [[RET]]
;
%tmp0 = lshr i8 -1, %y
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll
index 8fbbd2bb9907d..edd528b500e55 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll
@@ -15,7 +15,7 @@
define i1 @p0(i8 %x, i8 %y) {
; CHECK-LABEL: @p0(
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]]
-; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[TMP0]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[RET]]
;
%tmp0 = lshr i8 -1, %y
@@ -31,7 +31,7 @@ define i1 @p0(i8 %x, i8 %y) {
define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @p1_vec(
; CHECK-NEXT: [[TMP0:%.*]] = lshr <2 x i8> <i8 -1, i8 -1>, [[Y:%.*]]
-; CHECK-NEXT: [[RET:%.*]] = icmp ult <2 x i8> [[TMP0]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt <2 x i8> [[X:%.*]], [[TMP0]]
; CHECK-NEXT: ret <2 x i1> [[RET]]
;
%tmp0 = lshr <2 x i8> <i8 -1, i8 -1>, %y
@@ -43,7 +43,7 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) {
define <3 x i1> @p2_vec_poison(<3 x i8> %x, <3 x i8> %y) {
; CHECK-LABEL: @p2_vec_poison(
; CHECK-NEXT: [[TMP0:%.*]] = lshr <3 x i8> <i8 -1, i8 poison, i8 -1>, [[Y:%.*]]
-; CHECK-NEXT: [[RET:%.*]] = icmp ult <3 x i8> [[TMP0]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt <3 x i8> [[X:%.*]], [[TMP0]]
; CHECK-NEXT: ret <3 x i1> [[RET]]
;
%tmp0 = lshr <3 x i8> <i8 -1, i8 poison, i8 -1>, %y
@@ -110,7 +110,7 @@ define i1 @oneuse0(i8 %x, i8 %y) {
; CHECK-LABEL: @oneuse0(
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[TMP0]])
-; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[TMP0]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[RET]]
;
%tmp0 = lshr i8 -1, %y
@@ -125,7 +125,7 @@ define i1 @oneuse1(i8 %x, i8 %y) {
; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], [[X:%.*]]
; CHECK-NEXT: call void @use8(i8 [[TMP1]])
-; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[TMP0]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[TMP0]]
; CHECK-NEXT: ret i1 [[RET]]
;
%tmp0 = lshr i8 -1, %y
@@ -141,7 +141,7 @@ define i1 @oneuse2(i8 %x, i8 %y) {
; CHECK-NEXT: call void @use8(i8 [[TMP0]])
; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], [[X:%.*]]
; CHECK-NEXT: call void @use8(i8 [[TMP1]])
-; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[TMP0]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[TMP0]]
; CHECK-NEXT: ret i1 [[RET]]
;
%tmp0 = lshr i8 -1, %y
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-eq-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-eq-to-icmp-ule.ll
index 88487b38e2c70..19964ed8699ae 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-eq-to-icmp-ule.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-eq-to-icmp-ule.ll
@@ -161,7 +161,7 @@ define i1 @oneuse1(i8 %x, i8 %y) {
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
; CHECK-NEXT: call void @use8(i8 [[T1]])
-; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 -1, %y
@@ -176,9 +176,9 @@ define i1 @oneuse2(i8 %x, i8 %y) {
; CHECK-LABEL: @oneuse2(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
-; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = and i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
-; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 -1, %y
@@ -195,7 +195,7 @@ define i1 @oneuse3(i8 %x, i8 %y) {
; CHECK-NEXT: call void @use8(i8 [[T0]])
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
; CHECK-NEXT: call void @use8(i8 [[T1]])
-; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 -1, %y
@@ -212,9 +212,9 @@ define i1 @oneuse4(i8 %x, i8 %y) {
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
-; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = and i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
-; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 -1, %y
@@ -232,9 +232,9 @@ define i1 @oneuse5(i8 %x, i8 %y) {
; CHECK-NEXT: call void @use8(i8 [[T0]])
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
; CHECK-NEXT: call void @use8(i8 [[T1]])
-; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = and i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
-; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 -1, %y
@@ -255,7 +255,7 @@ define i1 @n0(i8 %x, i8 %y, i8 %notx) {
; CHECK-LABEL: @n0(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
-; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = and i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[NOTX:%.*]]
; CHECK-NEXT: ret i1 [[RET]]
;
@@ -270,7 +270,7 @@ define i1 @n1(i8 %x, i8 %y) {
; CHECK-LABEL: @n1(
; CHECK-NEXT: [[T0:%.*]] = shl nuw i8 1, [[Y:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
-; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = and i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[X]]
; CHECK-NEXT: ret i1 [[RET]]
;
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-ne-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-ne-to-icmp-ugt.ll
index b717925fd644f..e5e6e9302343f 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-ne-to-icmp-ugt.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-ne-to-icmp-ugt.ll
@@ -161,7 +161,7 @@ define i1 @oneuse1(i8 %x, i8 %y) {
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
; CHECK-NEXT: call void @use8(i8 [[T1]])
-; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 -1, %y
@@ -176,9 +176,9 @@ define i1 @oneuse2(i8 %x, i8 %y) {
; CHECK-LABEL: @oneuse2(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
-; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = and i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
-; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 -1, %y
@@ -195,7 +195,7 @@ define i1 @oneuse3(i8 %x, i8 %y) {
; CHECK-NEXT: call void @use8(i8 [[T0]])
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
; CHECK-NEXT: call void @use8(i8 [[T1]])
-; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 -1, %y
@@ -212,9 +212,9 @@ define i1 @oneuse4(i8 %x, i8 %y) {
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
-; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = and i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
-; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 -1, %y
@@ -232,9 +232,9 @@ define i1 @oneuse5(i8 %x, i8 %y) {
; CHECK-NEXT: call void @use8(i8 [[T0]])
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
; CHECK-NEXT: call void @use8(i8 [[T1]])
-; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = and i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
-; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 -1, %y
@@ -255,7 +255,7 @@ define i1 @n0(i8 %x, i8 %y, i8 %notx) {
; CHECK-LABEL: @n0(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
-; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = and i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[NOTX:%.*]]
; CHECK-NEXT: ret i1 [[RET]]
;
@@ -270,7 +270,7 @@ define i1 @n1(i8 %x, i8 %y) {
; CHECK-LABEL: @n1(
; CHECK-NEXT: [[T0:%.*]] = shl nuw i8 1, [[Y:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
-; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = and i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[X]]
; CHECK-NEXT: ret i1 [[RET]]
;
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-eq-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-eq-to-icmp-ule.ll
index a65be1e9ceeca..1a6e147426df5 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-eq-to-icmp-ule.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-eq-to-icmp-ule.ll
@@ -174,7 +174,7 @@ define i1 @oneuse0(i8 %x, i8 %y) {
; CHECK-NEXT: call void @use8(i8 [[T0]])
; CHECK-NEXT: [[T1:%.*]] = add i8 [[T0]], -1
; CHECK-NEXT: call void @use8(i8 [[T1]])
-; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 1, %y
@@ -193,7 +193,7 @@ define i1 @oneuse1(i8 %x, i8 %y) {
; CHECK-NEXT: [[T1:%.*]] = add i8 [[T0]], -1
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
-; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 1, %y
@@ -213,7 +213,7 @@ define i1 @oneuse2(i8 %x, i8 %y) {
; CHECK-NEXT: call void @use8(i8 [[T1]])
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
-; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 1, %y
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-ne-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-ne-to-icmp-ugt.ll
index f156d9bf007cb..96b844cd3c22e 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-ne-to-icmp-ugt.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-ne-to-icmp-ugt.ll
@@ -174,7 +174,7 @@ define i1 @oneuse0(i8 %x, i8 %y) {
; CHECK-NEXT: call void @use8(i8 [[T0]])
; CHECK-NEXT: [[T1:%.*]] = add i8 [[T0]], -1
; CHECK-NEXT: call void @use8(i8 [[T1]])
-; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 1, %y
@@ -193,7 +193,7 @@ define i1 @oneuse1(i8 %x, i8 %y) {
; CHECK-NEXT: [[T1:%.*]] = add i8 [[T0]], -1
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
-; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 1, %y
@@ -213,7 +213,7 @@ define i1 @oneuse2(i8 %x, i8 %y) {
; CHECK-NEXT: call void @use8(i8 [[T1]])
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
-; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 1, %y
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll
index f48d284e085bc..5fab93092a050 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll
@@ -23,7 +23,7 @@ define i1 @p0(i8 %x, i8 %y) {
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
-; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 -1, %y
@@ -43,7 +43,7 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) {
; CHECK-NEXT: [[T0:%.*]] = shl nsw <2 x i8> <i8 -1, i8 -1>, [[Y:%.*]]
; CHECK-NEXT: call void @use2i8(<2 x i8> [[T0]])
; CHECK-NEXT: [[T1:%.*]] = lshr <2 x i8> <i8 -1, i8 -1>, [[Y]]
-; CHECK-NEXT: [[RET:%.*]] = icmp uge <2 x i8> [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule <2 x i8> [[X:%.*]], [[T1]]
; CHECK-NEXT: ret <2 x i1> [[RET]]
;
%t0 = shl <2 x i8> <i8 -1, i8 -1>, %y
@@ -59,7 +59,7 @@ define <3 x i1> @p2_vec_poison0(<3 x i8> %x, <3 x i8> %y) {
; CHECK-NEXT: [[T0:%.*]] = shl nsw <3 x i8> <i8 -1, i8 poison, i8 -1>, [[Y:%.*]]
; CHECK-NEXT: call void @use3i8(<3 x i8> [[T0]])
; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i8> <i8 -1, i8 -1, i8 -1>, [[Y]]
-; CHECK-NEXT: [[RET:%.*]] = icmp uge <3 x i8> [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule <3 x i8> [[X:%.*]], [[T1]]
; CHECK-NEXT: ret <3 x i1> [[RET]]
;
%t0 = shl <3 x i8> <i8 -1, i8 poison, i8 -1>, %y
@@ -140,7 +140,7 @@ define i1 @oneuse0(i8 %x, i8 %y) {
; CHECK-NEXT: call void @use8(i8 [[T0]])
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: call void @use8(i8 [[T1]])
-; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 -1, %y
@@ -159,7 +159,7 @@ define i1 @oneuse1(i8 %x, i8 %y) {
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
-; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 -1, %y
@@ -179,7 +179,7 @@ define i1 @oneuse2(i8 %x, i8 %y) {
; CHECK-NEXT: call void @use8(i8 [[T1]])
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
-; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 -1, %y
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll
index f4b3c67164e49..40a67ce1d60cb 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll
@@ -23,7 +23,7 @@ define i1 @p0(i8 %x, i8 %y) {
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
-; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 -1, %y
@@ -43,7 +43,7 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) {
; CHECK-NEXT: [[T0:%.*]] = shl nsw <2 x i8> <i8 -1, i8 -1>, [[Y:%.*]]
; CHECK-NEXT: call void @use2i8(<2 x i8> [[T0]])
; CHECK-NEXT: [[T1:%.*]] = lshr <2 x i8> <i8 -1, i8 -1>, [[Y]]
-; CHECK-NEXT: [[RET:%.*]] = icmp ult <2 x i8> [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt <2 x i8> [[X:%.*]], [[T1]]
; CHECK-NEXT: ret <2 x i1> [[RET]]
;
%t0 = shl <2 x i8> <i8 -1, i8 -1>, %y
@@ -59,7 +59,7 @@ define <3 x i1> @p2_vec_poison0(<3 x i8> %x, <3 x i8> %y) {
; CHECK-NEXT: [[T0:%.*]] = shl nsw <3 x i8> <i8 -1, i8 poison, i8 -1>, [[Y:%.*]]
; CHECK-NEXT: call void @use3i8(<3 x i8> [[T0]])
; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i8> <i8 -1, i8 -1, i8 -1>, [[Y]]
-; CHECK-NEXT: [[RET:%.*]] = icmp ult <3 x i8> [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt <3 x i8> [[X:%.*]], [[T1]]
; CHECK-NEXT: ret <3 x i1> [[RET]]
;
%t0 = shl <3 x i8> <i8 -1, i8 poison, i8 -1>, %y
@@ -140,7 +140,7 @@ define i1 @oneuse0(i8 %x, i8 %y) {
; CHECK-NEXT: call void @use8(i8 [[T0]])
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: call void @use8(i8 [[T1]])
-; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 -1, %y
@@ -159,7 +159,7 @@ define i1 @oneuse1(i8 %x, i8 %y) {
; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]]
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
-; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 -1, %y
@@ -179,7 +179,7 @@ define i1 @oneuse2(i8 %x, i8 %y) {
; CHECK-NEXT: call void @use8(i8 [[T1]])
; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
-; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]]
; CHECK-NEXT: ret i1 [[RET]]
;
%t0 = shl i8 -1, %y
diff --git a/llvm/test/Transforms/InstCombine/cast-mul-select.ll b/llvm/test/Transforms/InstCombine/cast-mul-select.ll
index 50769ebe76f5c..e40f57e9663b4 100644
--- a/llvm/test/Transforms/InstCombine/cast-mul-select.ll
+++ b/llvm/test/Transforms/InstCombine/cast-mul-select.ll
@@ -13,12 +13,12 @@ define i32 @mul(i32 %x, i32 %y) {
; CHECK-NEXT: ret i32 [[D]]
;
; DBGINFO-LABEL: @mul(
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i32 [[X:%.*]], metadata [[META9:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_stack_value)), !dbg [[DBG15:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i32 [[Y:%.*]], metadata [[META11:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_stack_value)), !dbg [[DBG16:![0-9]+]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i32 [[X:%.*]], metadata [[META9:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_stack_value)), !dbg [[DBG15:![0-9]+]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i32 [[Y:%.*]], metadata [[META11:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_stack_value)), !dbg [[DBG16:![0-9]+]]
; DBGINFO-NEXT: [[C:%.*]] = mul i32 [[X]], [[Y]], !dbg [[DBG17:![0-9]+]]
; DBGINFO-NEXT: [[D:%.*]] = and i32 [[C]], 255, !dbg [[DBG18:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i32 [[C]], metadata [[META12:![0-9]+]], metadata !DIExpression()), !dbg [[DBG17]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i32 [[D]], metadata [[META13:![0-9]+]], metadata !DIExpression()), !dbg [[DBG18]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i32 [[C]], metadata [[META12:![0-9]+]], metadata !DIExpression()), !dbg [[DBG17]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i32 [[D]], metadata [[META13:![0-9]+]], metadata !DIExpression()), !dbg [[DBG18]]
; DBGINFO-NEXT: ret i32 [[D]], !dbg [[DBG19:![0-9]+]]
;
@@ -41,15 +41,15 @@ define i32 @select1(i1 %cond, i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: ret i32 [[F]]
;
; DBGINFO-LABEL: @select1(
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i32 [[X:%.*]], metadata [[META22:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_stack_value)), !dbg [[DBG28:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i32 [[Y:%.*]], metadata [[META23:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_stack_value)), !dbg [[DBG29:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i32 [[Z:%.*]], metadata [[META24:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_stack_value)), !dbg [[DBG30:![0-9]+]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i32 [[X:%.*]], metadata [[META22:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_stack_value)), !dbg [[DBG28:![0-9]+]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i32 [[Y:%.*]], metadata [[META23:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_stack_value)), !dbg [[DBG29:![0-9]+]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i32 [[Z:%.*]], metadata [[META24:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_stack_value)), !dbg [[DBG30:![0-9]+]]
; DBGINFO-NEXT: [[D:%.*]] = add i32 [[X]], [[Y]], !dbg [[DBG31:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata !DIArgList(i32 [[X]], i32 [[Y]]), metadata [[META25:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_LLVM_arg, 1, DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_plus, DW_OP_stack_value)), !dbg [[DBG31]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata !DIArgList(i32 [[X]], i32 [[Y]]), metadata [[META25:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_LLVM_arg, 1, DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_plus, DW_OP_stack_value)), !dbg [[DBG31]]
; DBGINFO-NEXT: [[E:%.*]] = select i1 [[COND:%.*]], i32 [[Z]], i32 [[D]], !dbg [[DBG32:![0-9]+]]
; DBGINFO-NEXT: [[F:%.*]] = and i32 [[E]], 255, !dbg [[DBG33:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i32 [[E]], metadata [[META26:![0-9]+]], metadata !DIExpression()), !dbg [[DBG32]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i32 [[F]], metadata [[META27:![0-9]+]], metadata !DIExpression()), !dbg [[DBG33]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i32 [[E]], metadata [[META26:![0-9]+]], metadata !DIExpression()), !dbg [[DBG32]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i32 [[F]], metadata [[META27:![0-9]+]], metadata !DIExpression()), !dbg [[DBG33]]
; DBGINFO-NEXT: ret i32 [[F]], !dbg [[DBG34:![0-9]+]]
;
%A = trunc i32 %x to i8
@@ -68,14 +68,14 @@ define i8 @select2(i1 %cond, i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: ret i8 [[E]]
;
; DBGINFO-LABEL: @select2(
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i8 [[X:%.*]], metadata [[META37:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_stack_value)), !dbg [[DBG43:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i8 [[Y:%.*]], metadata [[META38:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_stack_value)), !dbg [[DBG44:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i8 [[Z:%.*]], metadata [[META39:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_stack_value)), !dbg [[DBG45:![0-9]+]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i8 [[X:%.*]], metadata [[META37:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_stack_value)), !dbg [[DBG43:![0-9]+]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i8 [[Y:%.*]], metadata [[META38:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_stack_value)), !dbg [[DBG44:![0-9]+]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i8 [[Z:%.*]], metadata [[META39:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_stack_value)), !dbg [[DBG45:![0-9]+]]
; DBGINFO-NEXT: [[D:%.*]] = add i8 [[X]], [[Y]], !dbg [[DBG46:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata !DIArgList(i8 [[X]], i8 [[Y]]), metadata [[META40:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_LLVM_arg, 1, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_plus, DW_OP_stack_value)), !dbg [[DBG46]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata !DIArgList(i8 [[X]], i8 [[Y]]), metadata [[META40:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_LLVM_arg, 1, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_LLVM_convert, 32, DW_ATE_unsigned, DW_OP_plus, DW_OP_stack_value)), !dbg [[DBG46]]
; DBGINFO-NEXT: [[E:%.*]] = select i1 [[COND:%.*]], i8 [[Z]], i8 [[D]], !dbg [[DBG47:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i32 poison, metadata [[META41:![0-9]+]], metadata !DIExpression()), !dbg [[DBG47]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i8 [[E]], metadata [[META42:![0-9]+]], metadata !DIExpression()), !dbg [[DBG48:![0-9]+]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i32 poison, metadata [[META41:![0-9]+]], metadata !DIExpression()), !dbg [[DBG47]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i8 [[E]], metadata [[META42:![0-9]+]], metadata !DIExpression()), !dbg [[DBG48:![0-9]+]]
; DBGINFO-NEXT: ret i8 [[E]], !dbg [[DBG49:![0-9]+]]
;
%A = zext i8 %x to i32
@@ -100,13 +100,13 @@ define i32 @eval_trunc_multi_use_in_one_inst(i32 %x) {
;
; DBGINFO-LABEL: @eval_trunc_multi_use_in_one_inst(
; DBGINFO-NEXT: [[Z:%.*]] = zext i32 [[X:%.*]] to i64, !dbg [[DBG57:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i64 [[Z]], metadata [[META52:![0-9]+]], metadata !DIExpression()), !dbg [[DBG57]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i64 [[Z]], metadata [[META52:![0-9]+]], metadata !DIExpression()), !dbg [[DBG57]]
; DBGINFO-NEXT: [[A:%.*]] = add nuw nsw i64 [[Z]], 15, !dbg [[DBG58:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i64 [[A]], metadata [[META54:![0-9]+]], metadata !DIExpression()), !dbg [[DBG58]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i64 [[A]], metadata [[META54:![0-9]+]], metadata !DIExpression()), !dbg [[DBG58]]
; DBGINFO-NEXT: [[M:%.*]] = mul i64 [[A]], [[A]], !dbg [[DBG59:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i64 [[M]], metadata [[META55:![0-9]+]], metadata !DIExpression()), !dbg [[DBG59]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i64 [[M]], metadata [[META55:![0-9]+]], metadata !DIExpression()), !dbg [[DBG59]]
; DBGINFO-NEXT: [[T:%.*]] = trunc i64 [[M]] to i32, !dbg [[DBG60:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i32 [[T]], metadata [[META56:![0-9]+]], metadata !DIExpression()), !dbg [[DBG60]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i32 [[T]], metadata [[META56:![0-9]+]], metadata !DIExpression()), !dbg [[DBG60]]
; DBGINFO-NEXT: ret i32 [[T]], !dbg [[DBG61:![0-9]+]]
;
%z = zext i32 %x to i64
@@ -126,13 +126,13 @@ define i32 @eval_zext_multi_use_in_one_inst(i32 %x) {
;
; DBGINFO-LABEL: @eval_zext_multi_use_in_one_inst(
; DBGINFO-NEXT: [[T:%.*]] = trunc i32 [[X:%.*]] to i16, !dbg [[DBG69:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i16 [[T]], metadata [[META64:![0-9]+]], metadata !DIExpression()), !dbg [[DBG69]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i16 [[T]], metadata [[META64:![0-9]+]], metadata !DIExpression()), !dbg [[DBG69]]
; DBGINFO-NEXT: [[A:%.*]] = and i16 [[T]], 5, !dbg [[DBG70:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i16 [[A]], metadata [[META66:![0-9]+]], metadata !DIExpression()), !dbg [[DBG70]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i16 [[A]], metadata [[META66:![0-9]+]], metadata !DIExpression()), !dbg [[DBG70]]
; DBGINFO-NEXT: [[M:%.*]] = mul nuw nsw i16 [[A]], [[A]], !dbg [[DBG71:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i16 [[M]], metadata [[META67:![0-9]+]], metadata !DIExpression()), !dbg [[DBG71]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i16 [[M]], metadata [[META67:![0-9]+]], metadata !DIExpression()), !dbg [[DBG71]]
; DBGINFO-NEXT: [[R:%.*]] = zext nneg i16 [[M]] to i32, !dbg [[DBG72:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i32 [[R]], metadata [[META68:![0-9]+]], metadata !DIExpression()), !dbg [[DBG72]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i32 [[R]], metadata [[META68:![0-9]+]], metadata !DIExpression()), !dbg [[DBG72]]
; DBGINFO-NEXT: ret i32 [[R]], !dbg [[DBG73:![0-9]+]]
;
%t = trunc i32 %x to i16
@@ -153,15 +153,15 @@ define i32 @eval_sext_multi_use_in_one_inst(i32 %x) {
;
; DBGINFO-LABEL: @eval_sext_multi_use_in_one_inst(
; DBGINFO-NEXT: [[T:%.*]] = trunc i32 [[X:%.*]] to i16, !dbg [[DBG81:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i16 [[T]], metadata [[META76:![0-9]+]], metadata !DIExpression()), !dbg [[DBG81]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i16 [[T]], metadata [[META76:![0-9]+]], metadata !DIExpression()), !dbg [[DBG81]]
; DBGINFO-NEXT: [[A:%.*]] = and i16 [[T]], 14, !dbg [[DBG82:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i16 [[A]], metadata [[META77:![0-9]+]], metadata !DIExpression()), !dbg [[DBG82]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i16 [[A]], metadata [[META77:![0-9]+]], metadata !DIExpression()), !dbg [[DBG82]]
; DBGINFO-NEXT: [[M:%.*]] = mul nuw nsw i16 [[A]], [[A]], !dbg [[DBG83:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i16 [[M]], metadata [[META78:![0-9]+]], metadata !DIExpression()), !dbg [[DBG83]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i16 [[M]], metadata [[META78:![0-9]+]], metadata !DIExpression()), !dbg [[DBG83]]
; DBGINFO-NEXT: [[O:%.*]] = or disjoint i16 [[M]], -32768, !dbg [[DBG84:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i16 [[O]], metadata [[META79:![0-9]+]], metadata !DIExpression()), !dbg [[DBG84]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i16 [[O]], metadata [[META79:![0-9]+]], metadata !DIExpression()), !dbg [[DBG84]]
; DBGINFO-NEXT: [[R:%.*]] = sext i16 [[O]] to i32, !dbg [[DBG85:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i32 [[R]], metadata [[META80:![0-9]+]], metadata !DIExpression()), !dbg [[DBG85]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i32 [[R]], metadata [[META80:![0-9]+]], metadata !DIExpression()), !dbg [[DBG85]]
; DBGINFO-NEXT: ret i32 [[R]], !dbg [[DBG86:![0-9]+]]
;
%t = trunc i32 %x to i16
@@ -196,7 +196,7 @@ define void @PR36225(i32 %a, i32 %b, i1 %c1, i3 %v1, i3 %v2) {
; CHECK: for.end:
; CHECK-NEXT: [[H:%.*]] = phi i8 [ [[SPEC_SELECT]], [[FOR_BODY3_US]] ], [ [[SPEC_SELECT]], [[FOR_BODY3_US]] ], [ 0, [[FOR_BODY3]] ], [ 0, [[FOR_BODY3]] ]
; CHECK-NEXT: [[CONV:%.*]] = zext nneg i8 [[H]] to i32
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV]], [[A:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], [[CONV]]
; CHECK-NEXT: br i1 [[CMP]], label [[EXIT]], label [[EXIT2:%.*]]
; CHECK: exit2:
; CHECK-NEXT: unreachable
@@ -207,13 +207,13 @@ define void @PR36225(i32 %a, i32 %b, i1 %c1, i3 %v1, i3 %v2) {
; DBGINFO-NEXT: entry:
; DBGINFO-NEXT: br label [[WHILE_BODY:%.*]], !dbg [[DBG94:![0-9]+]]
; DBGINFO: while.body:
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i32 [[B:%.*]], metadata [[META89:![0-9]+]], metadata !DIExpression(DW_OP_constu, 0, DW_OP_eq, DW_OP_stack_value)), !dbg [[DBG95:![0-9]+]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i32 [[B:%.*]], metadata [[META89:![0-9]+]], metadata !DIExpression(DW_OP_constu, 0, DW_OP_eq, DW_OP_stack_value)), !dbg [[DBG95:![0-9]+]]
; DBGINFO-NEXT: br i1 [[C1:%.*]], label [[FOR_BODY3_US:%.*]], label [[FOR_BODY3:%.*]], !dbg [[DBG96:![0-9]+]]
; DBGINFO: for.body3.us:
; DBGINFO-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[B]], 0, !dbg [[DBG95]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i1 [[TOBOOL]], metadata [[META89]], metadata !DIExpression()), !dbg [[DBG95]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i1 [[TOBOOL]], metadata [[META89]], metadata !DIExpression()), !dbg [[DBG95]]
; DBGINFO-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[TOBOOL]], i8 0, i8 4, !dbg [[DBG97:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i8 [[SPEC_SELECT]], metadata [[META90:![0-9]+]], metadata !DIExpression()), !dbg [[DBG97]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i8 [[SPEC_SELECT]], metadata [[META90:![0-9]+]], metadata !DIExpression()), !dbg [[DBG97]]
; DBGINFO-NEXT: switch i3 [[V1:%.*]], label [[EXIT:%.*]] [
; DBGINFO-NEXT: i3 0, label [[FOR_END:%.*]]
; DBGINFO-NEXT: i3 -1, label [[FOR_END]]
@@ -225,11 +225,11 @@ define void @PR36225(i32 %a, i32 %b, i1 %c1, i3 %v1, i3 %v2) {
; DBGINFO-NEXT: ], !dbg [[DBG99:![0-9]+]]
; DBGINFO: for.end:
; DBGINFO-NEXT: [[H:%.*]] = phi i8 [ [[SPEC_SELECT]], [[FOR_BODY3_US]] ], [ [[SPEC_SELECT]], [[FOR_BODY3_US]] ], [ 0, [[FOR_BODY3]] ], [ 0, [[FOR_BODY3]] ], !dbg [[DBG100:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i8 [[H]], metadata [[META91:![0-9]+]], metadata !DIExpression()), !dbg [[DBG100]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i8 [[H]], metadata [[META91:![0-9]+]], metadata !DIExpression()), !dbg [[DBG100]]
; DBGINFO-NEXT: [[CONV:%.*]] = zext nneg i8 [[H]] to i32, !dbg [[DBG101:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i32 [[CONV]], metadata [[META92:![0-9]+]], metadata !DIExpression()), !dbg [[DBG101]]
-; DBGINFO-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV]], [[A:%.*]], !dbg [[DBG102:![0-9]+]]
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i1 [[CMP]], metadata [[META93:![0-9]+]], metadata !DIExpression()), !dbg [[DBG102]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i32 [[CONV]], metadata [[META92:![0-9]+]], metadata !DIExpression()), !dbg [[DBG101]]
+; DBGINFO-NEXT: [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], [[CONV]], !dbg [[DBG102:![0-9]+]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i1 [[CMP]], metadata [[META93:![0-9]+]], metadata !DIExpression()), !dbg [[DBG102]]
; DBGINFO-NEXT: br i1 [[CMP]], label [[EXIT]], label [[EXIT2:%.*]], !dbg [[DBG103:![0-9]+]]
; DBGINFO: exit2:
; DBGINFO-NEXT: unreachable, !dbg [[DBG104:![0-9]+]]
@@ -275,7 +275,7 @@ define i1 @foo(i1 zeroext %b) {
; CHECK-NEXT: ret i1 [[B:%.*]]
;
; DBGINFO-LABEL: @foo(
-; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i1 [[B:%.*]], metadata [[META108:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_convert, 1, DW_ATE_unsigned, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_stack_value)), !dbg [[DBG109:![0-9]+]]
+; DBGINFO-NEXT: tail call void @llvm.dbg.value(metadata i1 [[B:%.*]], metadata [[META108:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_convert, 1, DW_ATE_unsigned, DW_OP_LLVM_convert, 8, DW_ATE_unsigned, DW_OP_stack_value)), !dbg [[DBG109:![0-9]+]]
; DBGINFO-NEXT: ret i1 [[B]], !dbg [[DBG110:![0-9]+]]
;
diff --git a/llvm/test/Transforms/InstCombine/cast.ll b/llvm/test/Transforms/InstCombine/cast.ll
index 04a3e8931e62c..4777e39782338 100644
--- a/llvm/test/Transforms/InstCombine/cast.ll
+++ b/llvm/test/Transforms/InstCombine/cast.ll
@@ -512,8 +512,8 @@ define <2 x i16> @test40vec_poison(<2 x i16> %a) {
; ALL-LABEL: @test40vec_poison(
; ALL-NEXT: [[T21:%.*]] = lshr <2 x i16> [[A:%.*]], <i16 9, i16 poison>
; ALL-NEXT: [[T5:%.*]] = shl <2 x i16> [[A]], <i16 8, i16 poison>
-; ALL-NEXT: [[R:%.*]] = or disjoint <2 x i16> [[T21]], [[T5]]
-; ALL-NEXT: ret <2 x i16> [[R]]
+; ALL-NEXT: [[T32:%.*]] = or disjoint <2 x i16> [[T21]], [[T5]]
+; ALL-NEXT: ret <2 x i16> [[T32]]
;
%t = zext <2 x i16> %a to <2 x i32>
%t21 = lshr <2 x i32> %t, <i32 9, i32 poison>
@@ -587,7 +587,7 @@ define i64 @test44(i8 %T) {
define i64 @test45(i8 %A, i64 %Q) {
; ALL-LABEL: @test45(
; ALL-NEXT: [[B:%.*]] = sext i8 [[A:%.*]] to i64
-; ALL-NEXT: [[C:%.*]] = or i64 [[B]], [[Q:%.*]]
+; ALL-NEXT: [[C:%.*]] = or i64 [[Q:%.*]], [[B]]
; ALL-NEXT: [[E:%.*]] = and i64 [[C]], 4294967295
; ALL-NEXT: ret i64 [[E]]
;
@@ -1165,10 +1165,10 @@ define %s @test78(ptr %p, i64 %i, i64 %j, i32 %k, i32 %l, i128 %m, i128 %n) {
; ALL-NEXT: [[A:%.*]] = mul nsw i32 [[K:%.*]], 36
; ALL-NEXT: [[B:%.*]] = mul nsw i32 [[A]], [[L:%.*]]
; ALL-NEXT: [[C:%.*]] = sext i32 [[B]] to i128
-; ALL-NEXT: [[D:%.*]] = mul nsw i128 [[C]], [[M:%.*]]
+; ALL-NEXT: [[D:%.*]] = mul nsw i128 [[M:%.*]], [[C]]
; ALL-NEXT: [[E:%.*]] = mul i128 [[D]], [[N:%.*]]
; ALL-NEXT: [[F:%.*]] = trunc i128 [[E]] to i64
-; ALL-NEXT: [[G:%.*]] = mul nsw i64 [[F]], [[I:%.*]]
+; ALL-NEXT: [[G:%.*]] = mul nsw i64 [[I:%.*]], [[F]]
; ALL-NEXT: [[H:%.*]] = mul nsw i64 [[G]], [[J:%.*]]
; ALL-NEXT: [[PP:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 [[H]]
; ALL-NEXT: [[LOAD:%.*]] = load [[S:%.*]], ptr [[PP]], align 4
@@ -2014,8 +2014,8 @@ define <2 x i8> @trunc_lshr_zext_uniform(<2 x i8> %A) {
define <2 x i8> @trunc_lshr_zext_uniform_poison(<2 x i8> %A) {
; ALL-LABEL: @trunc_lshr_zext_uniform_poison(
-; ALL-NEXT: [[D:%.*]] = lshr <2 x i8> [[A:%.*]], <i8 6, i8 poison>
-; ALL-NEXT: ret <2 x i8> [[D]]
+; ALL-NEXT: [[C:%.*]] = lshr <2 x i8> [[A:%.*]], <i8 6, i8 poison>
+; ALL-NEXT: ret <2 x i8> [[C]]
;
%B = zext <2 x i8> %A to <2 x i32>
%C = lshr <2 x i32> %B, <i32 6, i32 poison>
@@ -2036,8 +2036,8 @@ define <2 x i8> @trunc_lshr_zext_nonuniform(<2 x i8> %A) {
define <3 x i8> @trunc_lshr_zext_nonuniform_poison(<3 x i8> %A) {
; ALL-LABEL: @trunc_lshr_zext_nonuniform_poison(
-; ALL-NEXT: [[D:%.*]] = lshr <3 x i8> [[A:%.*]], <i8 6, i8 2, i8 poison>
-; ALL-NEXT: ret <3 x i8> [[D]]
+; ALL-NEXT: [[C:%.*]] = lshr <3 x i8> [[A:%.*]], <i8 6, i8 2, i8 poison>
+; ALL-NEXT: ret <3 x i8> [[C]]
;
%B = zext <3 x i8> %A to <3 x i32>
%C = lshr <3 x i32> %B, <i32 6, i32 2, i32 poison>
diff --git a/llvm/test/Transforms/InstCombine/cast_phi.ll b/llvm/test/Transforms/InstCombine/cast_phi.ll
index 68847e73ac5d2..7dfe60539138d 100644
--- a/llvm/test/Transforms/InstCombine/cast_phi.ll
+++ b/llvm/test/Transforms/InstCombine/cast_phi.ll
@@ -350,7 +350,7 @@ define i32 @zext_in_loop_and_exit_block(i8 %step, i32 %end) {
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i32
-; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[IV_EXT]], [[END:%.*]]
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[END:%.*]], [[IV_EXT]]
; CHECK-NEXT: br i1 [[CMP_NOT]], label [[EXIT:%.*]], label [[LOOP_LATCH]]
; CHECK: loop.latch:
; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], [[STEP:%.*]]
diff --git a/llvm/test/Transforms/InstCombine/cmp-x-vs-neg-x.ll b/llvm/test/Transforms/InstCombine/cmp-x-vs-neg-x.ll
index c5ff0f90fdaee..96b03e6cd054c 100644
--- a/llvm/test/Transforms/InstCombine/cmp-x-vs-neg-x.ll
+++ b/llvm/test/Transforms/InstCombine/cmp-x-vs-neg-x.ll
@@ -132,7 +132,7 @@ define i1 @t9(i8 %x) {
define i1 @n10(i8 %x) {
; CHECK-LABEL: @n10(
; CHECK-NEXT: [[NEG_X:%.*]] = sub i8 0, [[X:%.*]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[NEG_X]], [[X]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X]], [[NEG_X]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%neg_x = sub i8 0, %x ; not nsw
@@ -154,7 +154,7 @@ define i1 @n11(i8 %x) {
define i1 @n12(i8 %x1, i8 %x2) {
; CHECK-LABEL: @n12(
; CHECK-NEXT: [[NEG_X:%.*]] = sub nsw i8 0, [[X1:%.*]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[NEG_X]], [[X2:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X2:%.*]], [[NEG_X]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%neg_x = sub nsw i8 0, %x1 ; not %x2
diff --git a/llvm/test/Transforms/InstCombine/conditional-negation.ll b/llvm/test/Transforms/InstCombine/conditional-negation.ll
index 1bdfd76edb341..0ae1af8f8e67f 100644
--- a/llvm/test/Transforms/InstCombine/conditional-negation.ll
+++ b/llvm/test/Transforms/InstCombine/conditional-negation.ll
@@ -44,7 +44,7 @@ define i8 @t2(i8 %x, i1 %cond0, i1 %cond1) {
; CHECK-LABEL: @t2(
; CHECK-NEXT: [[COND_SPLAT0:%.*]] = sext i1 [[COND0:%.*]] to i8
; CHECK-NEXT: [[COND_SPLAT1:%.*]] = sext i1 [[COND1:%.*]] to i8
-; CHECK-NEXT: [[SUB:%.*]] = add i8 [[COND_SPLAT0]], [[X:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[COND_SPLAT0]]
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[SUB]], [[COND_SPLAT1]]
; CHECK-NEXT: ret i8 [[XOR]]
;
@@ -59,7 +59,7 @@ define i8 @t2(i8 %x, i1 %cond0, i1 %cond1) {
define i8 @t3(i8 %x, i2 %cond) {
; CHECK-LABEL: @t3(
; CHECK-NEXT: [[COND_SPLAT:%.*]] = sext i2 [[COND:%.*]] to i8
-; CHECK-NEXT: [[SUB:%.*]] = add i8 [[COND_SPLAT]], [[X:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[COND_SPLAT]]
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[SUB]], [[COND_SPLAT]]
; CHECK-NEXT: ret i8 [[XOR]]
;
@@ -71,7 +71,7 @@ define i8 @t3(i8 %x, i2 %cond) {
define <2 x i8> @t3_vec(<2 x i8> %x, <2 x i2> %cond) {
; CHECK-LABEL: @t3_vec(
; CHECK-NEXT: [[COND_SPLAT:%.*]] = sext <2 x i2> [[COND:%.*]] to <2 x i8>
-; CHECK-NEXT: [[SUB:%.*]] = add <2 x i8> [[COND_SPLAT]], [[X:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = add <2 x i8> [[X:%.*]], [[COND_SPLAT]]
; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i8> [[SUB]], [[COND_SPLAT]]
; CHECK-NEXT: ret <2 x i8> [[XOR]]
;
@@ -115,7 +115,7 @@ define i8 @extrause01_v1(i8 %x, i1 %cond) {
define i8 @extrause10_v1(i8 %x, i1 %cond) {
; CHECK-LABEL: @extrause10_v1(
; CHECK-NEXT: [[COND_SPLAT:%.*]] = sext i1 [[COND:%.*]] to i8
-; CHECK-NEXT: [[SUB:%.*]] = add i8 [[COND_SPLAT]], [[X:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[COND_SPLAT]]
; CHECK-NEXT: call void @use.i8(i8 [[SUB]])
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[SUB]], [[COND_SPLAT]]
; CHECK-NEXT: ret i8 [[XOR]]
@@ -130,7 +130,7 @@ define i8 @extrause11_v1(i8 %x, i1 %cond) {
; CHECK-LABEL: @extrause11_v1(
; CHECK-NEXT: [[COND_SPLAT:%.*]] = sext i1 [[COND:%.*]] to i8
; CHECK-NEXT: call void @use.i8(i8 [[COND_SPLAT]])
-; CHECK-NEXT: [[SUB:%.*]] = add i8 [[COND_SPLAT]], [[X:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[COND_SPLAT]]
; CHECK-NEXT: call void @use.i8(i8 [[SUB]])
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[SUB]], [[COND_SPLAT]]
; CHECK-NEXT: ret i8 [[XOR]]
@@ -195,7 +195,7 @@ define i8 @extrause011_v2(i8 %x, i1 %cond) {
define i8 @extrause100_v2(i8 %x, i1 %cond) {
; CHECK-LABEL: @extrause100_v2(
; CHECK-NEXT: [[COND_SPLAT0:%.*]] = sext i1 [[COND:%.*]] to i8
-; CHECK-NEXT: [[SUB:%.*]] = add i8 [[COND_SPLAT0]], [[X:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[COND_SPLAT0]]
; CHECK-NEXT: call void @use.i8(i8 [[SUB]])
; CHECK-NEXT: [[X_NEG:%.*]] = sub i8 0, [[X]]
; CHECK-NEXT: [[XOR:%.*]] = select i1 [[COND]], i8 [[X_NEG]], i8 [[X]]
@@ -212,7 +212,7 @@ define i8 @extrause101_v2(i8 %x, i1 %cond) {
; CHECK-LABEL: @extrause101_v2(
; CHECK-NEXT: [[COND_SPLAT0:%.*]] = sext i1 [[COND:%.*]] to i8
; CHECK-NEXT: call void @use.i8(i8 [[COND_SPLAT0]])
-; CHECK-NEXT: [[SUB:%.*]] = add i8 [[COND_SPLAT0]], [[X:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[COND_SPLAT0]]
; CHECK-NEXT: call void @use.i8(i8 [[SUB]])
; CHECK-NEXT: [[X_NEG:%.*]] = sub i8 0, [[X]]
; CHECK-NEXT: [[XOR:%.*]] = select i1 [[COND]], i8 [[X_NEG]], i8 [[X]]
@@ -231,7 +231,7 @@ define i8 @extrause110_v2(i8 %x, i1 %cond) {
; CHECK-NEXT: [[COND_SPLAT0:%.*]] = sext i1 [[COND:%.*]] to i8
; CHECK-NEXT: [[COND_SPLAT1:%.*]] = sext i1 [[COND]] to i8
; CHECK-NEXT: call void @use.i8(i8 [[COND_SPLAT1]])
-; CHECK-NEXT: [[SUB:%.*]] = add i8 [[COND_SPLAT0]], [[X:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[COND_SPLAT0]]
; CHECK-NEXT: call void @use.i8(i8 [[SUB]])
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[SUB]], [[COND_SPLAT1]]
; CHECK-NEXT: ret i8 [[XOR]]
@@ -250,7 +250,7 @@ define i8 @extrause111_v2(i8 %x, i1 %cond) {
; CHECK-NEXT: call void @use.i8(i8 [[COND_SPLAT0]])
; CHECK-NEXT: [[COND_SPLAT1:%.*]] = sext i1 [[COND]] to i8
; CHECK-NEXT: call void @use.i8(i8 [[COND_SPLAT1]])
-; CHECK-NEXT: [[SUB:%.*]] = add i8 [[COND_SPLAT0]], [[X:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[COND_SPLAT0]]
; CHECK-NEXT: call void @use.i8(i8 [[SUB]])
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[SUB]], [[COND_SPLAT1]]
; CHECK-NEXT: ret i8 [[XOR]]
diff --git a/llvm/test/Transforms/InstCombine/ctpop-cttz.ll b/llvm/test/Transforms/InstCombine/ctpop-cttz.ll
index a505654fa96e7..bcfbce8dfd3d2 100644
--- a/llvm/test/Transforms/InstCombine/ctpop-cttz.ll
+++ b/llvm/test/Transforms/InstCombine/ctpop-cttz.ll
@@ -33,7 +33,7 @@ define <2 x i32> @ctpop1v(<2 x i32> %0) {
define i32 @ctpop1_multiuse(i32 %0) {
; CHECK-LABEL: @ctpop1_multiuse(
; CHECK-NEXT: [[TMP2:%.*]] = sub i32 0, [[TMP0:%.*]]
-; CHECK-NEXT: [[TMP3:%.*]] = or i32 [[TMP2]], [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = xor i32 [[TMP3]], -1
; CHECK-NEXT: [[TMP5:%.*]] = call range(i32 0, 33) i32 @llvm.ctpop.i32(i32 [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[TMP5]], [[TMP3]]
diff --git a/llvm/test/Transforms/InstCombine/ctpop-pow2.ll b/llvm/test/Transforms/InstCombine/ctpop-pow2.ll
index 7facdaf7590d3..8956f76910d28 100644
--- a/llvm/test/Transforms/InstCombine/ctpop-pow2.ll
+++ b/llvm/test/Transforms/InstCombine/ctpop-pow2.ll
@@ -12,7 +12,7 @@ declare void @llvm.assume(i1)
define i16 @ctpop_x_and_negx(i16 %x) {
; CHECK-LABEL: @ctpop_x_and_negx(
; CHECK-NEXT: [[V0:%.*]] = sub i16 0, [[X:%.*]]
-; CHECK-NEXT: [[V1:%.*]] = and i16 [[V0]], [[X]]
+; CHECK-NEXT: [[V1:%.*]] = and i16 [[X]], [[V0]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i16 [[V1]], 0
; CHECK-NEXT: [[CNT:%.*]] = zext i1 [[TMP1]] to i16
; CHECK-NEXT: ret i16 [[CNT]]
@@ -74,7 +74,7 @@ define i8 @ctpop_imin_plus1_lshr_nz(i8 %x) {
define i64 @ctpop_x_and_negx_nz(i64 %x) {
; CHECK-LABEL: @ctpop_x_and_negx_nz(
; CHECK-NEXT: [[V0:%.*]] = sub i64 0, [[X:%.*]]
-; CHECK-NEXT: [[V1:%.*]] = and i64 [[V0]], [[X]]
+; CHECK-NEXT: [[V1:%.*]] = and i64 [[X]], [[V0]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[V1]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: ret i64 1
@@ -127,7 +127,7 @@ define <2 x i32> @ctpop_shl2_1_vec_nz(<2 x i32> %x) {
define <2 x i64> @ctpop_x_and_negx_vec(<2 x i64> %x) {
; CHECK-LABEL: @ctpop_x_and_negx_vec(
; CHECK-NEXT: [[SUB:%.*]] = sub <2 x i64> zeroinitializer, [[X:%.*]]
-; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> [[SUB]], [[X]]
+; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> [[X]], [[SUB]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <2 x i64> [[AND]], zeroinitializer
; CHECK-NEXT: [[CNT:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i64>
; CHECK-NEXT: ret <2 x i64> [[CNT]]
diff --git a/llvm/test/Transforms/InstCombine/cttz.ll b/llvm/test/Transforms/InstCombine/cttz.ll
index 66b7a03fe5d7b..e106faf9cb38f 100644
--- a/llvm/test/Transforms/InstCombine/cttz.ll
+++ b/llvm/test/Transforms/InstCombine/cttz.ll
@@ -193,7 +193,7 @@ define i32 @cttz_of_lowest_set_bit_wrong_const(i32 %x) {
define i32 @cttz_of_lowest_set_bit_wrong_operand(i32 %x, i32 %y) {
; CHECK-LABEL: @cttz_of_lowest_set_bit_wrong_operand(
; CHECK-NEXT: [[SUB:%.*]] = sub i32 0, [[Y:%.*]]
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[SUB]], [[X:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[SUB]]
; CHECK-NEXT: [[TZ:%.*]] = call range(i32 0, 33) i32 @llvm.cttz.i32(i32 [[AND]], i1 false)
; CHECK-NEXT: ret i32 [[TZ]]
;
@@ -206,7 +206,7 @@ define i32 @cttz_of_lowest_set_bit_wrong_operand(i32 %x, i32 %y) {
define i32 @cttz_of_lowest_set_bit_wrong_intrinsic(i32 %x) {
; CHECK-LABEL: @cttz_of_lowest_set_bit_wrong_intrinsic(
; CHECK-NEXT: [[SUB:%.*]] = sub i32 0, [[X:%.*]]
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[SUB]], [[X]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], [[SUB]]
; CHECK-NEXT: [[TZ:%.*]] = call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 [[AND]], i1 false)
; CHECK-NEXT: ret i32 [[TZ]]
;
diff --git a/llvm/test/Transforms/InstCombine/demorgan.ll b/llvm/test/Transforms/InstCombine/demorgan.ll
index c9196b6d49aff..460758d512bb3 100644
--- a/llvm/test/Transforms/InstCombine/demorgan.ll
+++ b/llvm/test/Transforms/InstCombine/demorgan.ll
@@ -119,8 +119,8 @@ define i32 @test3(i32 %A, i32 %B) {
define i32 @test4(i32 %A) {
; CHECK-LABEL: @test4(
-; CHECK-NEXT: [[NOTC1:%.*]] = or i32 [[A:%.*]], -6
-; CHECK-NEXT: ret i32 [[NOTC1]]
+; CHECK-NEXT: [[NOTC:%.*]] = or i32 [[A:%.*]], -6
+; CHECK-NEXT: ret i32 [[NOTC]]
;
%nota = xor i32 %A, -1
%c = and i32 %nota, 5
@@ -191,7 +191,7 @@ define i71 @test5_apint(i71 %A, i71 %B) {
define i8 @demorgan_nand(i8 %A, i8 %B) {
; CHECK-LABEL: @demorgan_nand(
; CHECK-NEXT: [[B_NOT:%.*]] = xor i8 [[B:%.*]], -1
-; CHECK-NEXT: [[NOTC:%.*]] = or i8 [[B_NOT]], [[A:%.*]]
+; CHECK-NEXT: [[NOTC:%.*]] = or i8 [[A:%.*]], [[B_NOT]]
; CHECK-NEXT: ret i8 [[NOTC]]
;
%notx = xor i8 %A, -1
@@ -205,7 +205,7 @@ define i8 @demorgan_nand(i8 %A, i8 %B) {
define i7 @demorgan_nand_apint1(i7 %A, i7 %B) {
; CHECK-LABEL: @demorgan_nand_apint1(
; CHECK-NEXT: [[B_NOT:%.*]] = xor i7 [[B:%.*]], -1
-; CHECK-NEXT: [[NOTC:%.*]] = or i7 [[B_NOT]], [[A:%.*]]
+; CHECK-NEXT: [[NOTC:%.*]] = or i7 [[A:%.*]], [[B_NOT]]
; CHECK-NEXT: ret i7 [[NOTC]]
;
%nota = xor i7 %A, -1
@@ -219,7 +219,7 @@ define i7 @demorgan_nand_apint1(i7 %A, i7 %B) {
define i117 @demorgan_nand_apint2(i117 %A, i117 %B) {
; CHECK-LABEL: @demorgan_nand_apint2(
; CHECK-NEXT: [[B_NOT:%.*]] = xor i117 [[B:%.*]], -1
-; CHECK-NEXT: [[NOTC:%.*]] = or i117 [[B_NOT]], [[A:%.*]]
+; CHECK-NEXT: [[NOTC:%.*]] = or i117 [[A:%.*]], [[B_NOT]]
; CHECK-NEXT: ret i117 [[NOTC]]
;
%nota = xor i117 %A, -1
@@ -233,7 +233,7 @@ define i117 @demorgan_nand_apint2(i117 %A, i117 %B) {
define i8 @demorgan_nor(i8 %A, i8 %B) {
; CHECK-LABEL: @demorgan_nor(
; CHECK-NEXT: [[B_NOT:%.*]] = xor i8 [[B:%.*]], -1
-; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[B_NOT]], [[A:%.*]]
+; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[A:%.*]], [[B_NOT]]
; CHECK-NEXT: ret i8 [[NOTC]]
;
%notx = xor i8 %A, -1
@@ -249,7 +249,7 @@ define i8 @demorgan_nor_use2a(i8 %A, i8 %B) {
; CHECK-NEXT: [[NOTA:%.*]] = xor i8 [[A:%.*]], -1
; CHECK-NEXT: [[USE2A:%.*]] = mul i8 [[NOTA]], 23
; CHECK-NEXT: [[B_NOT:%.*]] = xor i8 [[B:%.*]], -1
-; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[B_NOT]], [[A]]
+; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[A]], [[B_NOT]]
; CHECK-NEXT: [[R:%.*]] = sdiv i8 [[NOTC]], [[USE2A]]
; CHECK-NEXT: ret i8 [[R]]
;
@@ -267,7 +267,7 @@ define i8 @demorgan_nor_use2b(i8 %A, i8 %B) {
; CHECK-LABEL: @demorgan_nor_use2b(
; CHECK-NEXT: [[USE2B:%.*]] = mul i8 [[B:%.*]], 23
; CHECK-NEXT: [[B_NOT:%.*]] = xor i8 [[B]], -1
-; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[B_NOT]], [[A:%.*]]
+; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[A:%.*]], [[B_NOT]]
; CHECK-NEXT: [[R:%.*]] = sdiv i8 [[NOTC]], [[USE2B]]
; CHECK-NEXT: ret i8 [[R]]
;
@@ -284,7 +284,7 @@ define i8 @demorgan_nor_use2b(i8 %A, i8 %B) {
define i8 @demorgan_nor_use2c(i8 %A, i8 %B) {
; CHECK-LABEL: @demorgan_nor_use2c(
; CHECK-NEXT: [[NOTA:%.*]] = xor i8 [[A:%.*]], -1
-; CHECK-NEXT: [[C:%.*]] = or i8 [[NOTA]], [[B:%.*]]
+; CHECK-NEXT: [[C:%.*]] = or i8 [[B:%.*]], [[NOTA]]
; CHECK-NEXT: [[USE2C:%.*]] = mul i8 [[C]], 23
; CHECK-NEXT: [[NOTC:%.*]] = xor i8 [[C]], -1
; CHECK-NEXT: [[R:%.*]] = sdiv i8 [[NOTC]], [[USE2C]]
@@ -306,7 +306,7 @@ define i8 @demorgan_nor_use2ab(i8 %A, i8 %B) {
; CHECK-NEXT: [[NOTA:%.*]] = xor i8 [[A:%.*]], -1
; CHECK-NEXT: [[USE2A:%.*]] = mul i8 [[NOTA]], 17
; CHECK-NEXT: [[B_NOT:%.*]] = xor i8 [[B]], -1
-; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[B_NOT]], [[A]]
+; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[A]], [[B_NOT]]
; CHECK-NEXT: [[R1:%.*]] = sdiv i8 [[NOTC]], [[USE2B]]
; CHECK-NEXT: [[R2:%.*]] = sdiv i8 [[R1]], [[USE2A]]
; CHECK-NEXT: ret i8 [[R2]]
@@ -327,7 +327,7 @@ define i8 @demorgan_nor_use2ac(i8 %A, i8 %B) {
; CHECK-LABEL: @demorgan_nor_use2ac(
; CHECK-NEXT: [[NOTA:%.*]] = xor i8 [[A:%.*]], -1
; CHECK-NEXT: [[USE2A:%.*]] = mul i8 [[NOTA]], 17
-; CHECK-NEXT: [[C:%.*]] = or i8 [[NOTA]], [[B:%.*]]
+; CHECK-NEXT: [[C:%.*]] = or i8 [[B:%.*]], [[NOTA]]
; CHECK-NEXT: [[USE2C:%.*]] = mul i8 [[C]], 23
; CHECK-NEXT: [[NOTC:%.*]] = xor i8 [[C]], -1
; CHECK-NEXT: [[R1:%.*]] = sdiv i8 [[NOTC]], [[USE2C]]
@@ -350,7 +350,7 @@ define i8 @demorgan_nor_use2bc(i8 %A, i8 %B) {
; CHECK-LABEL: @demorgan_nor_use2bc(
; CHECK-NEXT: [[USE2B:%.*]] = mul i8 [[B:%.*]], 23
; CHECK-NEXT: [[NOTA:%.*]] = xor i8 [[A:%.*]], -1
-; CHECK-NEXT: [[C:%.*]] = or i8 [[NOTA]], [[B]]
+; CHECK-NEXT: [[C:%.*]] = or i8 [[B]], [[NOTA]]
; CHECK-NEXT: [[USE2C:%.*]] = mul i8 [[C]], 23
; CHECK-NEXT: [[NOTC:%.*]] = xor i8 [[C]], -1
; CHECK-NEXT: [[R1:%.*]] = sdiv i8 [[NOTC]], [[USE2C]]
diff --git a/llvm/test/Transforms/InstCombine/dependent-ivs.ll b/llvm/test/Transforms/InstCombine/dependent-ivs.ll
index c2cff61ecb388..a6c857b5d395a 100644
--- a/llvm/test/Transforms/InstCombine/dependent-ivs.ll
+++ b/llvm/test/Transforms/InstCombine/dependent-ivs.ll
@@ -452,7 +452,7 @@ define void @int_iv_add_wrong_start(i64 %base, i64 %end) {
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 1, [[ENTRY]] ]
; CHECK-NEXT: call void @use.i64(i64 [[IV2]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 4
-; CHECK-NEXT: [[IV2_NEXT]] = add i64 [[IV_NEXT]], [[BASE]]
+; CHECK-NEXT: [[IV2_NEXT]] = add i64 [[BASE]], [[IV_NEXT]]
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], [[END]]
; CHECK-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[LOOP]]
; CHECK: exit:
@@ -675,7 +675,7 @@ define void @different_loops(i64 %base) {
; CHECK: loop2:
; CHECK-NEXT: [[IV2:%.*]] = phi i64 [ [[IV2_NEXT:%.*]], [[LOOP2]] ], [ [[BASE]], [[LOOP1]] ]
; CHECK-NEXT: call void @use.i64(i64 [[IV2]])
-; CHECK-NEXT: [[IV2_NEXT]] = add nuw i64 [[IV_NEXT]], [[BASE]]
+; CHECK-NEXT: [[IV2_NEXT]] = add nuw i64 [[BASE]], [[IV_NEXT]]
; CHECK-NEXT: [[CMP2:%.*]] = call i1 @get.i1()
; CHECK-NEXT: br i1 [[CMP2]], label [[EXIT:%.*]], label [[LOOP2]]
; CHECK: exit:
diff --git a/llvm/test/Transforms/InstCombine/fadd-fsub-factor.ll b/llvm/test/Transforms/InstCombine/fadd-fsub-factor.ll
index 4b9c4fd9f9544..0be7f50cfddae 100644
--- a/llvm/test/Transforms/InstCombine/fadd-fsub-factor.ll
+++ b/llvm/test/Transforms/InstCombine/fadd-fsub-factor.ll
@@ -474,8 +474,8 @@ define float @fdiv_fsub_denorm(float %x) {
define float @lerp_commute0(float %a, float %b, float %c) {
; CHECK-LABEL: @lerp_commute0(
; CHECK-NEXT: [[TMP1:%.*]] = fsub fast float [[B:%.*]], [[A:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = fmul fast float [[TMP1]], [[C:%.*]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP2]], [[A]]
+; CHECK-NEXT: [[TMP2:%.*]] = fmul fast float [[C:%.*]], [[TMP1]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[A]], [[TMP2]]
; CHECK-NEXT: ret float [[ADD]]
;
%sub = fsub fast float 1.0, %c
@@ -488,8 +488,8 @@ define float @lerp_commute0(float %a, float %b, float %c) {
define <2 x float> @lerp_commute1(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
; CHECK-LABEL: @lerp_commute1(
; CHECK-NEXT: [[TMP1:%.*]] = fsub fast <2 x float> [[B:%.*]], [[A:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = fmul fast <2 x float> [[TMP1]], [[C:%.*]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast <2 x float> [[TMP2]], [[A]]
+; CHECK-NEXT: [[TMP2:%.*]] = fmul fast <2 x float> [[C:%.*]], [[TMP1]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast <2 x float> [[A]], [[TMP2]]
; CHECK-NEXT: ret <2 x float> [[ADD]]
;
%sub = fsub <2 x float> <float 1.0, float 1.0>, %c
@@ -502,8 +502,8 @@ define <2 x float> @lerp_commute1(<2 x float> %a, <2 x float> %b, <2 x float> %c
define float @lerp_commute2(float %a, float %b, float %c) {
; CHECK-LABEL: @lerp_commute2(
; CHECK-NEXT: [[TMP1:%.*]] = fsub reassoc nsz float [[B:%.*]], [[A:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = fmul reassoc nsz float [[TMP1]], [[C:%.*]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd reassoc nsz float [[TMP2]], [[A]]
+; CHECK-NEXT: [[TMP2:%.*]] = fmul reassoc nsz float [[C:%.*]], [[TMP1]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd reassoc nsz float [[A]], [[TMP2]]
; CHECK-NEXT: ret float [[ADD]]
;
%sub = fsub float 1.0, %c
@@ -516,8 +516,8 @@ define float @lerp_commute2(float %a, float %b, float %c) {
define float @lerp_commute3(float %a, float %b, float %c) {
; CHECK-LABEL: @lerp_commute3(
; CHECK-NEXT: [[TMP1:%.*]] = fsub reassoc ninf nsz float [[B:%.*]], [[A:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = fmul reassoc ninf nsz float [[TMP1]], [[C:%.*]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd reassoc ninf nsz float [[TMP2]], [[A]]
+; CHECK-NEXT: [[TMP2:%.*]] = fmul reassoc ninf nsz float [[C:%.*]], [[TMP1]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd reassoc ninf nsz float [[A]], [[TMP2]]
; CHECK-NEXT: ret float [[ADD]]
;
%sub = fsub fast float 1.0, %c
@@ -530,8 +530,8 @@ define float @lerp_commute3(float %a, float %b, float %c) {
define double @lerp_commute4(double %a, double %b, double %c) {
; CHECK-LABEL: @lerp_commute4(
; CHECK-NEXT: [[TMP1:%.*]] = fsub fast double [[B:%.*]], [[A:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = fmul fast double [[TMP1]], [[C:%.*]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast double [[TMP2]], [[A]]
+; CHECK-NEXT: [[TMP2:%.*]] = fmul fast double [[C:%.*]], [[TMP1]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast double [[A]], [[TMP2]]
; CHECK-NEXT: ret double [[ADD]]
;
%sub = fsub fast double 1.0, %c
@@ -544,8 +544,8 @@ define double @lerp_commute4(double %a, double %b, double %c) {
define double @lerp_commute5(double %a, double %b, double %c) {
; CHECK-LABEL: @lerp_commute5(
; CHECK-NEXT: [[TMP1:%.*]] = fsub fast double [[B:%.*]], [[A:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = fmul fast double [[TMP1]], [[C:%.*]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast double [[TMP2]], [[A]]
+; CHECK-NEXT: [[TMP2:%.*]] = fmul fast double [[C:%.*]], [[TMP1]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast double [[A]], [[TMP2]]
; CHECK-NEXT: ret double [[ADD]]
;
%sub = fsub fast double 1.0, %c
@@ -558,8 +558,8 @@ define double @lerp_commute5(double %a, double %b, double %c) {
define half @lerp_commute6(half %a, half %b, half %c) {
; CHECK-LABEL: @lerp_commute6(
; CHECK-NEXT: [[TMP1:%.*]] = fsub fast half [[B:%.*]], [[A:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = fmul fast half [[TMP1]], [[C:%.*]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast half [[TMP2]], [[A]]
+; CHECK-NEXT: [[TMP2:%.*]] = fmul fast half [[C:%.*]], [[TMP1]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast half [[A]], [[TMP2]]
; CHECK-NEXT: ret half [[ADD]]
;
%sub = fsub fast half 1.0, %c
@@ -572,8 +572,8 @@ define half @lerp_commute6(half %a, half %b, half %c) {
define half @lerp_commute7(half %a, half %b, half %c) {
; CHECK-LABEL: @lerp_commute7(
; CHECK-NEXT: [[TMP1:%.*]] = fsub fast half [[B:%.*]], [[A:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = fmul fast half [[TMP1]], [[C:%.*]]
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast half [[TMP2]], [[A]]
+; CHECK-NEXT: [[TMP2:%.*]] = fmul fast half [[C:%.*]], [[TMP1]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast half [[A]], [[TMP2]]
; CHECK-NEXT: ret half [[ADD]]
;
%sub = fsub fast half 1.0, %c
@@ -586,7 +586,7 @@ define half @lerp_commute7(half %a, half %b, half %c) {
define float @lerp_extra_use1(float %a, float %b, float %c) {
; CHECK-LABEL: @lerp_extra_use1(
; CHECK-NEXT: [[SUB:%.*]] = fsub fast float 1.000000e+00, [[C:%.*]]
-; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[SUB]], [[A:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[A:%.*]], [[SUB]]
; CHECK-NEXT: [[BC:%.*]] = fmul fast float [[B:%.*]], [[C]]
; CHECK-NEXT: call void @use(float [[BC]])
; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[BC]], [[MUL]]
@@ -603,7 +603,7 @@ define float @lerp_extra_use1(float %a, float %b, float %c) {
define float @lerp_extra_use2(float %a, float %b, float %c) {
; CHECK-LABEL: @lerp_extra_use2(
; CHECK-NEXT: [[SUB:%.*]] = fsub fast float 1.000000e+00, [[C:%.*]]
-; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[SUB]], [[A:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[A:%.*]], [[SUB]]
; CHECK-NEXT: call void @use(float [[MUL]])
; CHECK-NEXT: [[BC:%.*]] = fmul fast float [[B:%.*]], [[C]]
; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[BC]], [[MUL]]
@@ -621,7 +621,7 @@ define float @lerp_extra_use3(float %a, float %b, float %c) {
; CHECK-LABEL: @lerp_extra_use3(
; CHECK-NEXT: [[SUB:%.*]] = fsub fast float 1.000000e+00, [[C:%.*]]
; CHECK-NEXT: call void @use(float [[SUB]])
-; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[SUB]], [[A:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[A:%.*]], [[SUB]]
; CHECK-NEXT: [[BC:%.*]] = fmul fast float [[B:%.*]], [[C]]
; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[BC]], [[MUL]]
; CHECK-NEXT: ret float [[ADD]]
diff --git a/llvm/test/Transforms/InstCombine/fadd.ll b/llvm/test/Transforms/InstCombine/fadd.ll
index 38508cdb09e1f..840ccaef1086a 100644
--- a/llvm/test/Transforms/InstCombine/fadd.ll
+++ b/llvm/test/Transforms/InstCombine/fadd.ll
@@ -83,7 +83,7 @@ define double @fmul_fneg2(double %x, double %py, double %pz) {
; CHECK-LABEL: @fmul_fneg2(
; CHECK-NEXT: [[Y:%.*]] = frem double -4.200000e+01, [[PY:%.*]]
; CHECK-NEXT: [[Z:%.*]] = frem double 4.200000e+01, [[PZ:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = fmul double [[Y]], [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = fmul double [[X:%.*]], [[Y]]
; CHECK-NEXT: [[R:%.*]] = fsub double [[Z]], [[TMP1]]
; CHECK-NEXT: ret double [[R]]
;
@@ -149,7 +149,7 @@ define double @fmul_fneg2_commute(double %x, double %py, double %pz) {
; CHECK-LABEL: @fmul_fneg2_commute(
; CHECK-NEXT: [[Y:%.*]] = frem double 4.100000e+01, [[PY:%.*]]
; CHECK-NEXT: [[Z:%.*]] = frem double 4.200000e+01, [[PZ:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = fmul double [[Y]], [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = fmul double [[X:%.*]], [[Y]]
; CHECK-NEXT: [[R:%.*]] = fsub double [[Z]], [[TMP1]]
; CHECK-NEXT: ret double [[R]]
;
@@ -207,7 +207,7 @@ define <2 x float> @fmul_fneg1_extra_use(<2 x float> %x, <2 x float> %y, <2 x fl
; CHECK-LABEL: @fmul_fneg1_extra_use(
; CHECK-NEXT: [[Z:%.*]] = frem <2 x float> <float 4.200000e+01, float -1.000000e+00>, [[PZ:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = fneg <2 x float> [[X:%.*]]
-; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[Y:%.*]], [[NEG]]
; CHECK-NEXT: call void @use_vec(<2 x float> [[MUL]])
; CHECK-NEXT: [[R:%.*]] = fadd <2 x float> [[Z]], [[MUL]]
; CHECK-NEXT: ret <2 x float> [[R]]
@@ -299,7 +299,7 @@ define float @fmul_fneg2_extra_use2(float %x, float %py, float %z) {
; CHECK-NEXT: [[Y:%.*]] = frem float -4.200000e+01, [[PY:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]]
; CHECK-NEXT: call void @use(float [[NEG]])
-; CHECK-NEXT: [[TMP1:%.*]] = fmul float [[Y]], [[X]]
+; CHECK-NEXT: [[TMP1:%.*]] = fmul float [[X]], [[Y]]
; CHECK-NEXT: [[R:%.*]] = fsub float [[Z:%.*]], [[TMP1]]
; CHECK-NEXT: ret float [[R]]
;
@@ -355,7 +355,7 @@ define <2 x float> @fmul_fneg1_extra_use3(<2 x float> %x, <2 x float> %y, <2 x f
; CHECK-LABEL: @fmul_fneg1_extra_use3(
; CHECK-NEXT: [[NEG:%.*]] = fneg <2 x float> [[X:%.*]]
; CHECK-NEXT: call void @use_vec(<2 x float> [[NEG]])
-; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[Y:%.*]], [[NEG]]
; CHECK-NEXT: call void @use_vec(<2 x float> [[MUL]])
; CHECK-NEXT: [[R:%.*]] = fadd <2 x float> [[MUL]], [[Z:%.*]]
; CHECK-NEXT: ret <2 x float> [[R]]
diff --git a/llvm/test/Transforms/InstCombine/fast-basictest.ll b/llvm/test/Transforms/InstCombine/fast-basictest.ll
index 3c7776a43e55e..62fa123b7cf00 100644
--- a/llvm/test/Transforms/InstCombine/fast-basictest.ll
+++ b/llvm/test/Transforms/InstCombine/fast-basictest.ll
@@ -424,7 +424,7 @@ define float @test14_reassoc(float %arg) {
define float @test15(float %b, float %a) {
; CHECK-LABEL: @test15(
; CHECK-NEXT: [[TMP1:%.*]] = fadd fast float [[A:%.*]], 1.234000e+03
-; CHECK-NEXT: [[TMP2:%.*]] = fadd fast float [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = fadd fast float [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = fsub fast float [[TMP2]], [[A]]
; CHECK-NEXT: ret float [[TMP3]]
;
@@ -438,7 +438,7 @@ define float @test15(float %b, float %a) {
define float @test15_unary_fneg(float %b, float %a) {
; CHECK-LABEL: @test15_unary_fneg(
; CHECK-NEXT: [[TMP1:%.*]] = fadd fast float [[A:%.*]], 1.234000e+03
-; CHECK-NEXT: [[TMP2:%.*]] = fadd fast float [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = fadd fast float [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = fsub fast float [[TMP2]], [[A]]
; CHECK-NEXT: ret float [[TMP3]]
;
@@ -452,7 +452,7 @@ define float @test15_unary_fneg(float %b, float %a) {
define float @test15_reassoc_nsz(float %b, float %a) {
; CHECK-LABEL: @test15_reassoc_nsz(
; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc nsz float [[A:%.*]], 1.234000e+03
-; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc nsz float [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc nsz float [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = fsub reassoc nsz float [[TMP2]], [[A]]
; CHECK-NEXT: ret float [[TMP3]]
;
@@ -466,7 +466,7 @@ define float @test15_reassoc_nsz(float %b, float %a) {
define float @test15_reassoc(float %b, float %a) {
; CHECK-LABEL: @test15_reassoc(
; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc float [[A:%.*]], 1.234000e+03
-; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc float [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc float [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = fsub reassoc float 0.000000e+00, [[A]]
; CHECK-NEXT: [[TMP4:%.*]] = fadd reassoc float [[TMP2]], [[TMP3]]
; CHECK-NEXT: ret float [[TMP4]]
@@ -549,8 +549,9 @@ define float @test16_reassoc(float %a, float %b, float %z) {
define float @test17(float %a, float %b, float %z) {
; CHECK-LABEL: @test17(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[Z:%.*]], 4.000000e+01
-; CHECK-NEXT: [[F:%.*]] = fmul fast float [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[C:%.*]] = fmul fast float [[Z:%.*]], -4.000000e+01
+; CHECK-NEXT: [[TMP1:%.*]] = fneg fast float [[A:%.*]]
+; CHECK-NEXT: [[F:%.*]] = fmul fast float [[C]], [[TMP1]]
; CHECK-NEXT: ret float [[F]]
;
%d = fmul fast float %z, 4.000000e+01
@@ -562,8 +563,9 @@ define float @test17(float %a, float %b, float %z) {
define float @test17_unary_fneg(float %a, float %b, float %z) {
; CHECK-LABEL: @test17_unary_fneg(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[Z:%.*]], 4.000000e+01
-; CHECK-NEXT: [[F:%.*]] = fmul fast float [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[C:%.*]] = fmul fast float [[Z:%.*]], -4.000000e+01
+; CHECK-NEXT: [[TMP1:%.*]] = fneg fast float [[A:%.*]]
+; CHECK-NEXT: [[F:%.*]] = fmul fast float [[C]], [[TMP1]]
; CHECK-NEXT: ret float [[F]]
;
%d = fmul fast float %z, 4.000000e+01
@@ -575,8 +577,9 @@ define float @test17_unary_fneg(float %a, float %b, float %z) {
define float @test17_reassoc_nsz(float %a, float %b, float %z) {
; CHECK-LABEL: @test17_reassoc_nsz(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc nsz float [[Z:%.*]], 4.000000e+01
-; CHECK-NEXT: [[F:%.*]] = fmul reassoc nsz float [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[C:%.*]] = fmul reassoc nsz float [[Z:%.*]], -4.000000e+01
+; CHECK-NEXT: [[TMP1:%.*]] = fneg reassoc nsz float [[A:%.*]]
+; CHECK-NEXT: [[F:%.*]] = fmul reassoc nsz float [[C]], [[TMP1]]
; CHECK-NEXT: ret float [[F]]
;
%d = fmul reassoc nsz float %z, 4.000000e+01
@@ -591,7 +594,7 @@ define float @test17_reassoc(float %a, float %b, float %z) {
; CHECK-LABEL: @test17_reassoc(
; CHECK-NEXT: [[D:%.*]] = fmul reassoc float [[Z:%.*]], 4.000000e+01
; CHECK-NEXT: [[C:%.*]] = fsub reassoc float 0.000000e+00, [[D]]
-; CHECK-NEXT: [[E:%.*]] = fmul reassoc float [[C]], [[A:%.*]]
+; CHECK-NEXT: [[E:%.*]] = fmul reassoc float [[A:%.*]], [[C]]
; CHECK-NEXT: [[F:%.*]] = fsub reassoc float 0.000000e+00, [[E]]
; CHECK-NEXT: ret float [[F]]
;
@@ -606,8 +609,9 @@ define float @test17_reassoc(float %a, float %b, float %z) {
define float @test17_unary_fneg_no_FMF(float %a, float %b, float %z) {
; CHECK-LABEL: @test17_unary_fneg_no_FMF(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul float [[Z:%.*]], 4.000000e+01
-; CHECK-NEXT: [[F:%.*]] = fmul float [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[C:%.*]] = fmul float [[Z:%.*]], -4.000000e+01
+; CHECK-NEXT: [[TMP1:%.*]] = fneg float [[A:%.*]]
+; CHECK-NEXT: [[F:%.*]] = fmul float [[C]], [[TMP1]]
; CHECK-NEXT: ret float [[F]]
;
%d = fmul float %z, 4.000000e+01
@@ -619,8 +623,9 @@ define float @test17_unary_fneg_no_FMF(float %a, float %b, float %z) {
define float @test17_reassoc_unary_fneg(float %a, float %b, float %z) {
; CHECK-LABEL: @test17_reassoc_unary_fneg(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc float [[Z:%.*]], 4.000000e+01
-; CHECK-NEXT: [[F:%.*]] = fmul reassoc float [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[C:%.*]] = fmul reassoc float [[Z:%.*]], -4.000000e+01
+; CHECK-NEXT: [[TMP1:%.*]] = fneg reassoc float [[A:%.*]]
+; CHECK-NEXT: [[F:%.*]] = fmul reassoc float [[C]], [[TMP1]]
; CHECK-NEXT: ret float [[F]]
;
%d = fmul reassoc float %z, 4.000000e+01
diff --git a/llvm/test/Transforms/InstCombine/fast-math.ll b/llvm/test/Transforms/InstCombine/fast-math.ll
index da403555ebe24..2f10b7fa06e7a 100644
--- a/llvm/test/Transforms/InstCombine/fast-math.ll
+++ b/llvm/test/Transforms/InstCombine/fast-math.ll
@@ -65,7 +65,7 @@ define double @fold3_reassoc_nsz(double %f1) {
define double @fold3_reassoc(double %f1) {
; CHECK-LABEL: @fold3_reassoc(
; CHECK-NEXT: [[T1:%.*]] = fmul reassoc double [[F1:%.*]], 5.000000e+00
-; CHECK-NEXT: [[T2:%.*]] = fadd reassoc double [[T1]], [[F1]]
+; CHECK-NEXT: [[T2:%.*]] = fadd reassoc double [[F1]], [[T1]]
; CHECK-NEXT: ret double [[T2]]
;
%t1 = fmul reassoc double 5.000000e+00, %f1
@@ -175,7 +175,7 @@ define float @fold6_reassoc_nsz(float %f1) {
define float @fold6_reassoc(float %f1) {
; CHECK-LABEL: @fold6_reassoc(
; CHECK-NEXT: [[T1:%.*]] = fadd reassoc float [[F1:%.*]], [[F1]]
-; CHECK-NEXT: [[T2:%.*]] = fadd reassoc float [[T1]], [[F1]]
+; CHECK-NEXT: [[T2:%.*]] = fadd reassoc float [[F1]], [[T1]]
; CHECK-NEXT: [[T3:%.*]] = fadd reassoc float [[T2]], [[F1]]
; CHECK-NEXT: ret float [[T3]]
;
@@ -506,7 +506,7 @@ define float @fold16(float %x, float %y) {
; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = fneg float [[Y]]
; CHECK-NEXT: [[R_P:%.*]] = select i1 [[CMP]], float [[Y]], float [[TMP1]]
-; CHECK-NEXT: [[R:%.*]] = fadd float [[R_P]], [[X]]
+; CHECK-NEXT: [[R:%.*]] = fadd float [[X]], [[R_P]]
; CHECK-NEXT: ret float [[R]]
;
%cmp = fcmp ogt float %x, %y
@@ -711,9 +711,9 @@ define double @sqrt_intrinsic_three_args5(double %x, double %y) {
define double @sqrt_intrinsic_three_args6(double %x, double %y) {
; CHECK-LABEL: @sqrt_intrinsic_three_args6(
-; CHECK-NEXT: [[FABS:%.*]] = call fast double @llvm.fabs.f64(double [[X:%.*]])
-; CHECK-NEXT: [[SQRT1:%.*]] = call fast double @llvm.sqrt.f64(double [[Y:%.*]])
-; CHECK-NEXT: [[SQRT:%.*]] = fmul fast double [[FABS]], [[SQRT1]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul fast double [[X:%.*]], [[X]]
+; CHECK-NEXT: [[MUL2:%.*]] = fmul fast double [[Y:%.*]], [[MUL]]
+; CHECK-NEXT: [[SQRT:%.*]] = call fast double @llvm.sqrt.f64(double [[MUL2]])
; CHECK-NEXT: ret double [[SQRT]]
;
%mul = fmul fast double %x, %x
diff --git a/llvm/test/Transforms/InstCombine/fcmp.ll b/llvm/test/Transforms/InstCombine/fcmp.ll
index 4d907800219d6..329020a752f5d 100644
--- a/llvm/test/Transforms/InstCombine/fcmp.ll
+++ b/llvm/test/Transforms/InstCombine/fcmp.ll
@@ -1289,7 +1289,7 @@ define <1 x i1> @bitcast_1vec_eq0(i32 %x) {
define i1 @fcmp_fadd_zero_ugt(float %x, float %y) {
; CHECK-LABEL: @fcmp_fadd_zero_ugt(
-; CHECK-NEXT: [[CMP:%.*]] = fcmp ugt float [[ADD:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ugt float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add = fadd float %x, 0.000000e+00
@@ -1299,7 +1299,7 @@ define i1 @fcmp_fadd_zero_ugt(float %x, float %y) {
define i1 @fcmp_fadd_zero_uge(float %x, float %y) {
; CHECK-LABEL: @fcmp_fadd_zero_uge(
-; CHECK-NEXT: [[CMP:%.*]] = fcmp uge float [[ADD:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp uge float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add = fadd float %x, 0.000000e+00
@@ -1309,7 +1309,7 @@ define i1 @fcmp_fadd_zero_uge(float %x, float %y) {
define i1 @fcmp_fadd_zero_ogt(float %x, float %y) {
; CHECK-LABEL: @fcmp_fadd_zero_ogt(
-; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float [[ADD:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add = fadd float %x, 0.000000e+00
@@ -1319,7 +1319,7 @@ define i1 @fcmp_fadd_zero_ogt(float %x, float %y) {
define i1 @fcmp_fadd_zero_oge(float %x, float %y) {
; CHECK-LABEL: @fcmp_fadd_zero_oge(
-; CHECK-NEXT: [[CMP:%.*]] = fcmp oge float [[ADD:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oge float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add = fadd float %x, 0.000000e+00
@@ -1329,7 +1329,7 @@ define i1 @fcmp_fadd_zero_oge(float %x, float %y) {
define i1 @fcmp_fadd_zero_ult(float %x, float %y) {
; CHECK-LABEL: @fcmp_fadd_zero_ult(
-; CHECK-NEXT: [[CMP:%.*]] = fcmp ult float [[ADD:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ult float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add = fadd float %x, 0.000000e+00
@@ -1339,7 +1339,7 @@ define i1 @fcmp_fadd_zero_ult(float %x, float %y) {
define i1 @fcmp_fadd_zero_ule(float %x, float %y) {
; CHECK-LABEL: @fcmp_fadd_zero_ule(
-; CHECK-NEXT: [[CMP:%.*]] = fcmp ule float [[ADD:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ule float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add = fadd float %x, 0.000000e+00
@@ -1349,7 +1349,7 @@ define i1 @fcmp_fadd_zero_ule(float %x, float %y) {
define i1 @fcmp_fadd_zero_olt(float %x, float %y) {
; CHECK-LABEL: @fcmp_fadd_zero_olt(
-; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[ADD:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add = fadd float %x, 0.000000e+00
@@ -1359,7 +1359,7 @@ define i1 @fcmp_fadd_zero_olt(float %x, float %y) {
define i1 @fcmp_fadd_zero_ole(float %x, float %y) {
; CHECK-LABEL: @fcmp_fadd_zero_ole(
-; CHECK-NEXT: [[CMP:%.*]] = fcmp ole float [[ADD:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ole float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add = fadd float %x, 0.000000e+00
@@ -1369,7 +1369,7 @@ define i1 @fcmp_fadd_zero_ole(float %x, float %y) {
define i1 @fcmp_fadd_zero_oeq(float %x, float %y) {
; CHECK-LABEL: @fcmp_fadd_zero_oeq(
-; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[ADD:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add = fadd float %x, 0.000000e+00
@@ -1379,7 +1379,7 @@ define i1 @fcmp_fadd_zero_oeq(float %x, float %y) {
define i1 @fcmp_fadd_zero_one(float %x, float %y) {
; CHECK-LABEL: @fcmp_fadd_zero_one(
-; CHECK-NEXT: [[CMP:%.*]] = fcmp one float [[ADD:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp one float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add = fadd float %x, 0.000000e+00
@@ -1389,7 +1389,7 @@ define i1 @fcmp_fadd_zero_one(float %x, float %y) {
define i1 @fcmp_fadd_zero_ueq(float %x, float %y) {
; CHECK-LABEL: @fcmp_fadd_zero_ueq(
-; CHECK-NEXT: [[CMP:%.*]] = fcmp ueq float [[ADD:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ueq float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add = fadd float %x, 0.000000e+00
@@ -1399,7 +1399,7 @@ define i1 @fcmp_fadd_zero_ueq(float %x, float %y) {
define i1 @fcmp_fadd_zero_une(float %x, float %y) {
; CHECK-LABEL: @fcmp_fadd_zero_une(
-; CHECK-NEXT: [[CMP:%.*]] = fcmp une float [[ADD:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp une float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add = fadd float %x, 0.000000e+00
@@ -1409,7 +1409,7 @@ define i1 @fcmp_fadd_zero_une(float %x, float %y) {
define i1 @fcmp_fadd_zero_ord(float %x, float %y) {
; CHECK-LABEL: @fcmp_fadd_zero_ord(
-; CHECK-NEXT: [[CMP:%.*]] = fcmp ord float [[ADD:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ord float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add = fadd float %x, 0.000000e+00
@@ -1419,7 +1419,7 @@ define i1 @fcmp_fadd_zero_ord(float %x, float %y) {
define i1 @fcmp_fadd_zero_uno(float %x, float %y) {
; CHECK-LABEL: @fcmp_fadd_zero_uno(
-; CHECK-NEXT: [[CMP:%.*]] = fcmp uno float [[ADD:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp uno float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add = fadd float %x, 0.000000e+00
@@ -1439,7 +1439,7 @@ define i1 @fcmp_fadd_neg_zero(float %x, float %y) {
define i1 @fcmp_fadd_zero_switched(float %x, float %y) {
; CHECK-LABEL: @fcmp_fadd_zero_switched(
-; CHECK-NEXT: [[CMP:%.*]] = fcmp ult float [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp ugt float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add = fadd float %y, 0.000000e+00
diff --git a/llvm/test/Transforms/InstCombine/fdiv-sqrt.ll b/llvm/test/Transforms/InstCombine/fdiv-sqrt.ll
index 9f030c5ebf7bb..c5078ff1efc5a 100644
--- a/llvm/test/Transforms/InstCombine/fdiv-sqrt.ll
+++ b/llvm/test/Transforms/InstCombine/fdiv-sqrt.ll
@@ -8,7 +8,7 @@ define double @sqrt_div_fast(double %x, double %y, double %z) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = fdiv fast double [[Z:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = call fast double @llvm.sqrt.f64(double [[TMP0]])
-; CHECK-NEXT: [[DIV1:%.*]] = fmul fast double [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[DIV1:%.*]] = fmul fast double [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret double [[DIV1]]
;
entry:
@@ -38,7 +38,7 @@ define double @sqrt_div_reassoc_arcp(double %x, double %y, double %z) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = fdiv reassoc arcp double [[Z:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = call reassoc arcp double @llvm.sqrt.f64(double [[TMP0]])
-; CHECK-NEXT: [[DIV1:%.*]] = fmul reassoc arcp double [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[DIV1:%.*]] = fmul reassoc arcp double [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret double [[DIV1]]
;
entry:
@@ -98,7 +98,7 @@ define double @sqrt_div_arcp_missing(double %x, double %y, double %z) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = fdiv reassoc double [[Z:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = call reassoc arcp double @llvm.sqrt.f64(double [[TMP0]])
-; CHECK-NEXT: [[DIV1:%.*]] = fmul reassoc arcp double [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[DIV1:%.*]] = fmul reassoc arcp double [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret double [[DIV1]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/fdiv.ll b/llvm/test/Transforms/InstCombine/fdiv.ll
index ca11685c98417..12d6e6463de65 100644
--- a/llvm/test/Transforms/InstCombine/fdiv.ll
+++ b/llvm/test/Transforms/InstCombine/fdiv.ll
@@ -678,7 +678,7 @@ define float @pow_divisor(float %x, float %y, float %z) {
; CHECK-LABEL: @pow_divisor(
; CHECK-NEXT: [[TMP1:%.*]] = fneg reassoc arcp float [[Y:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = call reassoc arcp float @llvm.pow.f32(float [[X:%.*]], float [[TMP1]])
-; CHECK-NEXT: [[R:%.*]] = fmul reassoc arcp float [[TMP2]], [[Z:%.*]]
+; CHECK-NEXT: [[R:%.*]] = fmul reassoc arcp float [[Z:%.*]], [[TMP2]]
; CHECK-NEXT: ret float [[R]]
;
%p = call float @llvm.pow.f32(float %x, float %y)
@@ -744,7 +744,7 @@ define float @exp_divisor(float %y, float %z) {
; CHECK-LABEL: @exp_divisor(
; CHECK-NEXT: [[TMP1:%.*]] = fneg reassoc arcp float [[Y:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = call reassoc arcp float @llvm.exp.f32(float [[TMP1]])
-; CHECK-NEXT: [[R:%.*]] = fmul reassoc arcp float [[TMP2]], [[Z:%.*]]
+; CHECK-NEXT: [[R:%.*]] = fmul reassoc arcp float [[Z:%.*]], [[TMP2]]
; CHECK-NEXT: ret float [[R]]
;
%p = call float @llvm.exp.f32(float %y)
@@ -810,7 +810,7 @@ define float @exp2_divisor(float %y, float %z) {
; CHECK-LABEL: @exp2_divisor(
; CHECK-NEXT: [[TMP1:%.*]] = fneg reassoc arcp float [[Y:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = call reassoc arcp float @llvm.exp2.f32(float [[TMP1]])
-; CHECK-NEXT: [[R:%.*]] = fmul reassoc arcp float [[TMP2]], [[Z:%.*]]
+; CHECK-NEXT: [[R:%.*]] = fmul reassoc arcp float [[Z:%.*]], [[TMP2]]
; CHECK-NEXT: ret float [[R]]
;
%p = call float @llvm.exp2.f32(float %y)
@@ -876,7 +876,7 @@ define float @powi_divisor(float %x, i32 %y, float %z) {
; CHECK-LABEL: @powi_divisor(
; CHECK-NEXT: [[TMP1:%.*]] = sub i32 0, [[Y:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = call reassoc ninf arcp float @llvm.powi.f32.i32(float [[X:%.*]], i32 [[TMP1]])
-; CHECK-NEXT: [[R:%.*]] = fmul reassoc ninf arcp float [[TMP2]], [[Z:%.*]]
+; CHECK-NEXT: [[R:%.*]] = fmul reassoc ninf arcp float [[Z:%.*]], [[TMP2]]
; CHECK-NEXT: ret float [[R]]
;
%p = call float @llvm.powi.f32.i32(float %x, i32 %y)
diff --git a/llvm/test/Transforms/InstCombine/float-shrink-compare.ll b/llvm/test/Transforms/InstCombine/float-shrink-compare.ll
index e6e41ad03ce59..77b6ed7c5abe8 100644
--- a/llvm/test/Transforms/InstCombine/float-shrink-compare.ll
+++ b/llvm/test/Transforms/InstCombine/float-shrink-compare.ll
@@ -215,7 +215,7 @@ define i1 @test7_intrin(float %x, float %y) {
define i1 @test8(float %x, float %y) {
; CHECK-LABEL: @test8(
; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.ceil.f32(float [[X:%.*]])
-; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%x.ext = fpext float %x to double
@@ -228,7 +228,7 @@ define i1 @test8(float %x, float %y) {
define i1 @test8_intrin(float %x, float %y) {
; CHECK-LABEL: @test8_intrin(
; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.ceil.f32(float [[X:%.*]])
-; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%x.ext = fpext float %x to double
@@ -241,7 +241,7 @@ define i1 @test8_intrin(float %x, float %y) {
define i1 @test9(float %x, float %y) {
; CHECK-LABEL: @test9(
; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.fabs.f32(float [[X:%.*]])
-; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%x.ext = fpext float %x to double
@@ -254,7 +254,7 @@ define i1 @test9(float %x, float %y) {
define i1 @test9_intrin(float %x, float %y) {
; CHECK-LABEL: @test9_intrin(
; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.fabs.f32(float [[X:%.*]])
-; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%x.ext = fpext float %x to double
@@ -319,7 +319,7 @@ define i1 @test11_intrin(float %x, float %y) {
define i1 @test12(float %x, float %y) {
; CHECK-LABEL: @test12(
; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.rint.f32(float [[X:%.*]])
-; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%x.ext = fpext float %x to double
@@ -332,7 +332,7 @@ define i1 @test12(float %x, float %y) {
define i1 @test13(float %x, float %y) {
; CHECK-LABEL: @test13(
; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.round.f32(float [[X:%.*]])
-; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%x.ext = fpext float %x to double
@@ -345,7 +345,7 @@ define i1 @test13(float %x, float %y) {
define i1 @test13_intrin(float %x, float %y) {
; CHECK-LABEL: @test13_intrin(
; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.round.f32(float [[X:%.*]])
-; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%x.ext = fpext float %x to double
@@ -358,7 +358,7 @@ define i1 @test13_intrin(float %x, float %y) {
define i1 @test13a(float %x, float %y) {
; CHECK-LABEL: @test13a(
; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.roundeven.f32(float [[X:%.*]])
-; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%x.ext = fpext float %x to double
@@ -371,7 +371,7 @@ define i1 @test13a(float %x, float %y) {
define i1 @test13a_intrin(float %x, float %y) {
; CHECK-LABEL: @test13a_intrin(
; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.roundeven.f32(float [[X:%.*]])
-; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%x.ext = fpext float %x to double
@@ -384,7 +384,7 @@ define i1 @test13a_intrin(float %x, float %y) {
define i1 @test14(float %x, float %y) {
; CHECK-LABEL: @test14(
; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.trunc.f32(float [[X:%.*]])
-; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%x.ext = fpext float %x to double
@@ -397,7 +397,7 @@ define i1 @test14(float %x, float %y) {
define i1 @test14_intrin(float %x, float %y) {
; CHECK-LABEL: @test14_intrin(
; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.trunc.f32(float [[X:%.*]])
-; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%x.ext = fpext float %x to double
@@ -424,7 +424,7 @@ define i1 @test15(float %x, float %y, float %z) {
define i1 @test16(float %x, float %y, float %z) {
; CHECK-LABEL: @test16(
; CHECK-NEXT: [[FMINF:%.*]] = call nsz float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]])
-; CHECK-NEXT: [[TMP1:%.*]] = fcmp oeq float [[FMINF]], [[Z:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = fcmp oeq float [[Z:%.*]], [[FMINF]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
%1 = fpext float %z to double
@@ -452,7 +452,7 @@ define i1 @test17(float %x, float %y, float %z) {
define i1 @test18(float %x, float %y, float %z) {
; CHECK-LABEL: @test18(
; CHECK-NEXT: [[FMAXF:%.*]] = call nsz float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]])
-; CHECK-NEXT: [[TMP1:%.*]] = fcmp oeq float [[FMAXF]], [[Z:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = fcmp oeq float [[Z:%.*]], [[FMAXF]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
%1 = fpext float %z to double
@@ -480,7 +480,7 @@ define i1 @test19(float %x, float %y, float %z) {
define i1 @test20(float %x, float %y) {
; CHECK-LABEL: @test20(
; CHECK-NEXT: [[FMINF:%.*]] = call nsz float @llvm.minnum.f32(float [[X:%.*]], float 1.000000e+00)
-; CHECK-NEXT: [[TMP1:%.*]] = fcmp oeq float [[FMINF]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = fcmp oeq float [[Y:%.*]], [[FMINF]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
%1 = fpext float %y to double
diff --git a/llvm/test/Transforms/InstCombine/fmul.ll b/llvm/test/Transforms/InstCombine/fmul.ll
index 1526956c5b241..9ad8a3161b4ca 100644
--- a/llvm/test/Transforms/InstCombine/fmul.ll
+++ b/llvm/test/Transforms/InstCombine/fmul.ll
@@ -281,7 +281,7 @@ define float @neg_unary_neg_multi_use(float %x, float %y) {
define float @neg_mul(float %x, float %y) {
; CHECK-LABEL: @neg_mul(
; CHECK-NEXT: [[SUB:%.*]] = fneg float [[X:%.*]]
-; CHECK-NEXT: [[MUL:%.*]] = fmul float [[SUB]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul float [[Y:%.*]], [[SUB]]
; CHECK-NEXT: ret float [[MUL]]
;
%sub = fsub float -0.0, %x
@@ -292,7 +292,7 @@ define float @neg_mul(float %x, float %y) {
define float @unary_neg_mul(float %x, float %y) {
; CHECK-LABEL: @unary_neg_mul(
; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]]
-; CHECK-NEXT: [[MUL:%.*]] = fmul float [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul float [[Y:%.*]], [[NEG]]
; CHECK-NEXT: ret float [[MUL]]
;
%neg = fneg float %x
@@ -303,7 +303,7 @@ define float @unary_neg_mul(float %x, float %y) {
define <2 x float> @neg_mul_vec(<2 x float> %x, <2 x float> %y) {
; CHECK-LABEL: @neg_mul_vec(
; CHECK-NEXT: [[SUB:%.*]] = fneg <2 x float> [[X:%.*]]
-; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[SUB]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[Y:%.*]], [[SUB]]
; CHECK-NEXT: ret <2 x float> [[MUL]]
;
%sub = fsub <2 x float> <float -0.0, float -0.0>, %x
@@ -314,7 +314,7 @@ define <2 x float> @neg_mul_vec(<2 x float> %x, <2 x float> %y) {
define <2 x float> @unary_neg_mul_vec(<2 x float> %x, <2 x float> %y) {
; CHECK-LABEL: @unary_neg_mul_vec(
; CHECK-NEXT: [[SUB:%.*]] = fneg <2 x float> [[X:%.*]]
-; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[SUB]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[Y:%.*]], [[SUB]]
; CHECK-NEXT: ret <2 x float> [[MUL]]
;
%sub = fneg <2 x float> %x
@@ -325,7 +325,7 @@ define <2 x float> @unary_neg_mul_vec(<2 x float> %x, <2 x float> %y) {
define <2 x float> @neg_mul_vec_poison(<2 x float> %x, <2 x float> %y) {
; CHECK-LABEL: @neg_mul_vec_poison(
; CHECK-NEXT: [[SUB:%.*]] = fneg <2 x float> [[X:%.*]]
-; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[SUB]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[Y:%.*]], [[SUB]]
; CHECK-NEXT: ret <2 x float> [[MUL]]
;
%sub = fsub <2 x float> <float poison, float -0.0>, %x
@@ -337,7 +337,7 @@ define <2 x float> @neg_mul_vec_poison(<2 x float> %x, <2 x float> %y) {
define float @neg_sink_nsz(float %x, float %y) {
; CHECK-LABEL: @neg_sink_nsz(
; CHECK-NEXT: [[SUB1:%.*]] = fneg nsz float [[X:%.*]]
-; CHECK-NEXT: [[MUL:%.*]] = fmul float [[SUB1]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul float [[Y:%.*]], [[SUB1]]
; CHECK-NEXT: ret float [[MUL]]
;
%sub1 = fsub nsz float 0.0, %x
@@ -348,7 +348,7 @@ define float @neg_sink_nsz(float %x, float %y) {
define float @neg_sink_multi_use(float %x, float %y) {
; CHECK-LABEL: @neg_sink_multi_use(
; CHECK-NEXT: [[SUB1:%.*]] = fneg float [[X:%.*]]
-; CHECK-NEXT: [[MUL:%.*]] = fmul float [[SUB1]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul float [[Y:%.*]], [[SUB1]]
; CHECK-NEXT: [[MUL2:%.*]] = fmul float [[MUL]], [[SUB1]]
; CHECK-NEXT: ret float [[MUL2]]
;
@@ -361,7 +361,7 @@ define float @neg_sink_multi_use(float %x, float %y) {
define float @unary_neg_mul_multi_use(float %x, float %y) {
; CHECK-LABEL: @unary_neg_mul_multi_use(
; CHECK-NEXT: [[SUB1:%.*]] = fneg float [[X:%.*]]
-; CHECK-NEXT: [[MUL:%.*]] = fmul float [[SUB1]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul float [[Y:%.*]], [[SUB1]]
; CHECK-NEXT: [[MUL2:%.*]] = fmul float [[MUL]], [[SUB1]]
; CHECK-NEXT: ret float [[MUL2]]
;
@@ -449,7 +449,7 @@ declare double @llvm.sqrt.f64(double)
define double @sqrt_squared2(double %f) {
; CHECK-LABEL: @sqrt_squared2(
; CHECK-NEXT: [[SQRT:%.*]] = call double @llvm.sqrt.f64(double [[F:%.*]])
-; CHECK-NEXT: [[MUL2:%.*]] = fmul double [[SQRT]], [[F]]
+; CHECK-NEXT: [[MUL2:%.*]] = fmul double [[F]], [[SQRT]]
; CHECK-NEXT: ret double [[MUL2]]
;
%sqrt = call double @llvm.sqrt.f64(double %f)
@@ -1132,7 +1132,7 @@ for.body:
define double @fmul_negated_constant_expression(double %x) {
; CHECK-LABEL: @fmul_negated_constant_expression(
; CHECK-NEXT: [[FSUB:%.*]] = fneg double bitcast (i64 ptrtoint (ptr getelementptr inbounds ({ [2 x ptr] }, ptr @g, i64 1, i32 0, i64 0) to i64) to double)
-; CHECK-NEXT: [[R:%.*]] = fmul double [[FSUB]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = fmul double [[X:%.*]], [[FSUB]]
; CHECK-NEXT: ret double [[R]]
;
%fsub = fsub double -0.000000e+00, bitcast (i64 ptrtoint (ptr getelementptr inbounds ({ [2 x ptr] }, ptr @g, i64 0, i32 0, i64 2) to i64) to double)
diff --git a/llvm/test/Transforms/InstCombine/fold-inc-of-add-of-not-x-and-y-to-sub-x-from-y.ll b/llvm/test/Transforms/InstCombine/fold-inc-of-add-of-not-x-and-y-to-sub-x-from-y.ll
index 1fd570bf2635b..d16f36927d71a 100644
--- a/llvm/test/Transforms/InstCombine/fold-inc-of-add-of-not-x-and-y-to-sub-x-from-y.ll
+++ b/llvm/test/Transforms/InstCombine/fold-inc-of-add-of-not-x-and-y-to-sub-x-from-y.ll
@@ -92,7 +92,7 @@ define i32 @t5(i32 %x, i32 %y) {
define i32 @t6(i32 %x, i32 %y) {
; CHECK-LABEL: @t6(
; CHECK-NEXT: [[T0:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[T1:%.*]] = add i32 [[T0]], [[Y:%.*]]
+; CHECK-NEXT: [[T1:%.*]] = add i32 [[Y:%.*]], [[T0]]
; CHECK-NEXT: call void @use32(i32 [[T1]])
; CHECK-NEXT: [[T2:%.*]] = sub i32 [[Y]], [[X]]
; CHECK-NEXT: ret i32 [[T2]]
@@ -108,7 +108,7 @@ define i32 @t7(i32 %x, i32 %y) {
; CHECK-LABEL: @t7(
; CHECK-NEXT: [[T0:%.*]] = xor i32 [[X:%.*]], -1
; CHECK-NEXT: call void @use32(i32 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = add i32 [[T0]], [[Y:%.*]]
+; CHECK-NEXT: [[T1:%.*]] = add i32 [[Y:%.*]], [[T0]]
; CHECK-NEXT: call void @use32(i32 [[T1]])
; CHECK-NEXT: [[T2:%.*]] = sub i32 [[Y]], [[X]]
; CHECK-NEXT: ret i32 [[T2]]
@@ -202,7 +202,7 @@ define i32 @n11(i32 %x, i32 %y) {
define i32 @n12(i32 %x, i32 %y) {
; CHECK-LABEL: @n12(
; CHECK-NEXT: [[T0:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[T1:%.*]] = add i32 [[T0]], [[Y:%.*]]
+; CHECK-NEXT: [[T1:%.*]] = add i32 [[Y:%.*]], [[T0]]
; CHECK-NEXT: [[T2:%.*]] = add i32 [[T1]], 2
; CHECK-NEXT: ret i32 [[T2]]
;
diff --git a/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll b/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll
index dedd12f8cc7a3..1c28b151825c1 100644
--- a/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll
+++ b/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll
@@ -428,7 +428,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32(float %x, float %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
-; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]]
+; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
@@ -442,7 +442,7 @@ define float @fmul_by_fabs_var_if_0_oeq_zero_f32(float %x, float %y) {
; CHECK-NEXT: [[Y_FABS:%.*]] = call float @llvm.fabs.f32(float [[Y:%.*]])
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y_FABS]], float 1.000000e+00
-; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]]
+; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%y.fabs = call float @llvm.fabs.f32(float %y)
@@ -468,7 +468,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_nsz_fmul(float %x, float %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_nsz_fmul(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
-; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nsz float [[SCALED_X]], [[X]]
+; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nsz float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
@@ -482,7 +482,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_nsz_ninf_fmul(float %x, float %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_nsz_ninf_fmul(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
-; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul ninf nsz float [[SCALED_X]], [[X]]
+; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul ninf nsz float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
@@ -496,7 +496,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_nsz_nnan_fmul(float %x, float %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_nsz_nnan_fmul(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
-; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan nsz float [[SCALED_X]], [[X]]
+; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan nsz float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
@@ -510,7 +510,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_nnan_ninf_fmul(float %x, float %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_nnan_ninf_fmul(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
-; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf float [[SCALED_X]], [[X]]
+; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
@@ -524,7 +524,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_nsz_nnan_ninf_select(float %x, float
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_nsz_nnan_ninf_select(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan ninf nsz i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
-; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]]
+; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
@@ -559,7 +559,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz(float %x, float %
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
-; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf nsz float [[SCALED_X]], [[X]]
+; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf nsz float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
@@ -572,7 +572,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz_commuted(float %x
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz_commuted(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
-; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf nsz float [[SCALED_X]], [[X]]
+; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf nsz float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
@@ -586,7 +586,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_ne
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_negzero(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
-; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf float [[SCALED_X]], [[X]]
+; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
@@ -599,7 +599,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_ne
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_negzero_negsub(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
-; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf float [[SCALED_X]], [[X]]
+; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
@@ -623,7 +623,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_known_never_nan_inf_negzero(flo
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_known_never_nan_inf_negzero(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
-; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]]
+; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
@@ -636,7 +636,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_known_never_nan_inf_negzero_nsu
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_known_never_nan_inf_negzero_nsub(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
-; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]]
+; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
@@ -693,7 +693,7 @@ define float @fmul_by_self_if_0_oeq_zero_f32(float %x) {
; CHECK-LABEL: @fmul_by_self_if_0_oeq_zero_f32(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[X]], float 1.000000e+00
-; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]]
+; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
@@ -706,7 +706,7 @@ define float @fmul_by_self_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz(float %x) {
; CHECK-LABEL: @fmul_by_self_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[X]], float 1.000000e+00
-; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf nsz float [[SCALED_X]], [[X]]
+; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf nsz float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
@@ -719,7 +719,7 @@ define float @fmul_by_self_if_0_oeq_zero_f32_select_nnan_ninf_nsz(float %x) {
; CHECK-LABEL: @fmul_by_self_if_0_oeq_zero_f32_select_nnan_ninf_nsz(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan ninf nsz i1 [[X_IS_ZERO]], float [[X]], float 1.000000e+00
-; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]]
+; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
%x.is.zero = fcmp oeq float %x, 0.0
diff --git a/llvm/test/Transforms/InstCombine/fold-signbit-test-power2.ll b/llvm/test/Transforms/InstCombine/fold-signbit-test-power2.ll
index f5024664f58c3..a5c7cb3306ed0 100644
--- a/llvm/test/Transforms/InstCombine/fold-signbit-test-power2.ll
+++ b/llvm/test/Transforms/InstCombine/fold-signbit-test-power2.ll
@@ -124,7 +124,7 @@ define i1 @pow2_or_zero_is_negative_extra_use(i8 %x) {
; CHECK-LABEL: @pow2_or_zero_is_negative_extra_use(
; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[X:%.*]]
; CHECK-NEXT: call void @use(i8 [[NEG]])
-; CHECK-NEXT: [[POW2_OR_ZERO:%.*]] = and i8 [[NEG]], [[X]]
+; CHECK-NEXT: [[POW2_OR_ZERO:%.*]] = and i8 [[X]], [[NEG]]
; CHECK-NEXT: call void @use(i8 [[POW2_OR_ZERO]])
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[X]], -128
; CHECK-NEXT: ret i1 [[CMP]]
diff --git a/llvm/test/Transforms/InstCombine/fpextend.ll b/llvm/test/Transforms/InstCombine/fpextend.ll
index 19f512d717a97..c9adbe10d8db4 100644
--- a/llvm/test/Transforms/InstCombine/fpextend.ll
+++ b/llvm/test/Transforms/InstCombine/fpextend.ll
@@ -142,7 +142,7 @@ define float @test9(half %x, half %y) nounwind {
define float @test10(half %x, float %y) nounwind {
; CHECK-LABEL: @test10(
; CHECK-NEXT: [[TMP1:%.*]] = fpext half [[X:%.*]] to float
-; CHECK-NEXT: [[T56:%.*]] = fmul float [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[T56:%.*]] = fmul float [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret float [[T56]]
;
%t1 = fpext half %x to double
@@ -167,7 +167,7 @@ define float @test11(half %x) nounwind {
define float @test12(float %x, half %y) nounwind {
; CHECK-LABEL: @test12(
; CHECK-NEXT: [[TMP1:%.*]] = fpext half [[Y:%.*]] to float
-; CHECK-NEXT: [[T34:%.*]] = fadd float [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[T34:%.*]] = fadd float [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret float [[T34]]
;
%t1 = fpext float %x to double
@@ -440,8 +440,8 @@ define half @bf16_to_f32_to_f16(bfloat %a) nounwind {
define bfloat @bf16_frem(bfloat %x) {
; CHECK-LABEL: @bf16_frem(
-; CHECK-NEXT: [[FREM:%.*]] = frem bfloat [[X:%.*]], 0xR40C9
-; CHECK-NEXT: ret bfloat [[FREM]]
+; CHECK-NEXT: [[TMP1:%.*]] = frem bfloat [[X:%.*]], 0xR40C9
+; CHECK-NEXT: ret bfloat [[TMP1]]
;
%t1 = fpext bfloat %x to float
%t2 = frem float %t1, 6.281250e+00
diff --git a/llvm/test/Transforms/InstCombine/fptrunc.ll b/llvm/test/Transforms/InstCombine/fptrunc.ll
index c78df0b83d9cd..825868b107033 100644
--- a/llvm/test/Transforms/InstCombine/fptrunc.ll
+++ b/llvm/test/Transforms/InstCombine/fptrunc.ll
@@ -4,7 +4,7 @@
define float @fadd_fpext_op0(float %x, double %y) {
; CHECK-LABEL: @fadd_fpext_op0(
; CHECK-NEXT: [[EXT:%.*]] = fpext float [[X:%.*]] to double
-; CHECK-NEXT: [[BO:%.*]] = fadd reassoc double [[EXT]], [[Y:%.*]]
+; CHECK-NEXT: [[BO:%.*]] = fadd reassoc double [[Y:%.*]], [[EXT]]
; CHECK-NEXT: [[R:%.*]] = fptrunc double [[BO]] to float
; CHECK-NEXT: ret float [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/free-inversion.ll b/llvm/test/Transforms/InstCombine/free-inversion.ll
index a89887a586b58..ebb9310ee0a78 100644
--- a/llvm/test/Transforms/InstCombine/free-inversion.ll
+++ b/llvm/test/Transforms/InstCombine/free-inversion.ll
@@ -30,7 +30,7 @@ define i8 @xor_2(i8 %a, i1 %c, i8 %x, i8 %y) {
; CHECK-LABEL: @xor_2(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -124
; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[C:%.*]], i8 [[X:%.*]], i8 [[TMP1]]
-; CHECK-NEXT: [[NOT_AB:%.*]] = xor i8 [[TMP2]], [[A:%.*]]
+; CHECK-NEXT: [[NOT_AB:%.*]] = xor i8 [[A:%.*]], [[TMP2]]
; CHECK-NEXT: ret i8 [[NOT_AB]]
;
%nx = xor i8 %x, -1
@@ -45,7 +45,7 @@ define i8 @xor_fail(i8 %a, i1 %c, i8 %x, i8 %y) {
; CHECK-LABEL: @xor_fail(
; CHECK-NEXT: [[NX:%.*]] = xor i8 [[X:%.*]], -1
; CHECK-NEXT: [[B:%.*]] = select i1 [[C:%.*]], i8 [[NX]], i8 [[Y:%.*]]
-; CHECK-NEXT: [[AB:%.*]] = xor i8 [[B]], [[A:%.*]]
+; CHECK-NEXT: [[AB:%.*]] = xor i8 [[A:%.*]], [[B]]
; CHECK-NEXT: [[NOT_AB:%.*]] = xor i8 [[AB]], -1
; CHECK-NEXT: ret i8 [[NOT_AB]]
;
@@ -91,7 +91,7 @@ define i8 @add_fail(i8 %a, i1 %c, i8 %x, i8 %y) {
; CHECK-NEXT: [[NX:%.*]] = xor i8 [[X:%.*]], [[A:%.*]]
; CHECK-NEXT: [[YY:%.*]] = xor i8 [[Y:%.*]], 123
; CHECK-NEXT: [[B:%.*]] = select i1 [[C:%.*]], i8 [[NX]], i8 [[YY]]
-; CHECK-NEXT: [[AB:%.*]] = add i8 [[B]], [[A]]
+; CHECK-NEXT: [[AB:%.*]] = add i8 [[A]], [[B]]
; CHECK-NEXT: [[NOT_AB:%.*]] = xor i8 [[AB]], -1
; CHECK-NEXT: ret i8 [[NOT_AB]]
;
@@ -605,7 +605,7 @@ define i32 @test_inv_free_i32(i1 %c1, i1 %c2, i32 %c3, i32 %c4) {
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ 0, [[B1]] ], [ -1, [[B2]] ], [ [[C3:%.*]], [[B3]] ]
-; CHECK-NEXT: [[COND:%.*]] = xor i32 [[TMP0]], [[C4:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = xor i32 [[C4:%.*]], [[TMP0]]
; CHECK-NEXT: ret i32 [[COND]]
;
entry:
@@ -682,7 +682,7 @@ define i32 @test_inv_free_i32_newinst(i1 %c1, i1 %c2, i32 %c3, i32 %c4) {
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: [[VAL:%.*]] = phi i32 [ -1, [[B1]] ], [ 0, [[B2]] ], [ [[ASHR]], [[B3]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = xor i32 [[VAL]], [[C4:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = xor i32 [[C4:%.*]], [[VAL]]
; CHECK-NEXT: [[COND:%.*]] = xor i32 [[TMP0]], -1
; CHECK-NEXT: ret i32 [[COND]]
;
diff --git a/llvm/test/Transforms/InstCombine/fsh.ll b/llvm/test/Transforms/InstCombine/fsh.ll
index 505a228367254..f1fba6cb272f9 100644
--- a/llvm/test/Transforms/InstCombine/fsh.ll
+++ b/llvm/test/Transforms/InstCombine/fsh.ll
@@ -725,7 +725,7 @@ define i32 @fsh_orconst_rotate(i32 %a) {
define i32 @fsh_rotate_5(i8 %x, i32 %y) {
; CHECK-LABEL: @fsh_rotate_5(
; CHECK-NEXT: [[T1:%.*]] = zext i8 [[X:%.*]] to i32
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[T1]], [[Y:%.*]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[T1]]
; CHECK-NEXT: [[OR2:%.*]] = call i32 @llvm.fshl.i32(i32 [[OR1]], i32 [[OR1]], i32 5)
; CHECK-NEXT: ret i32 [[OR2]]
;
@@ -741,7 +741,7 @@ define i32 @fsh_rotate_5(i8 %x, i32 %y) {
define i32 @fsh_rotate_18(i8 %x, i32 %y) {
; CHECK-LABEL: @fsh_rotate_18(
; CHECK-NEXT: [[T1:%.*]] = zext i8 [[X:%.*]] to i32
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[T1]], [[Y:%.*]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[T1]]
; CHECK-NEXT: [[OR2:%.*]] = call i32 @llvm.fshl.i32(i32 [[OR1]], i32 [[OR1]], i32 18)
; CHECK-NEXT: ret i32 [[OR2]]
;
diff --git a/llvm/test/Transforms/InstCombine/fsub.ll b/llvm/test/Transforms/InstCombine/fsub.ll
index f1e7086e697e8..cffc63405ddcb 100644
--- a/llvm/test/Transforms/InstCombine/fsub.ll
+++ b/llvm/test/Transforms/InstCombine/fsub.ll
@@ -86,7 +86,7 @@ define float @unary_neg_sub_nsz_extra_use(float %x, float %y) {
define float @sub_sub_nsz(float %x, float %y, float %z) {
; CHECK-LABEL: @sub_sub_nsz(
; CHECK-NEXT: [[TMP1:%.*]] = fsub nsz float [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT: [[T2:%.*]] = fadd nsz float [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = fadd nsz float [[Z:%.*]], [[TMP1]]
; CHECK-NEXT: ret float [[T2]]
;
%t1 = fsub float %x, %y
@@ -219,7 +219,7 @@ define <2 x float> @neg_op1_vec_poison(<2 x float> %x, <2 x float> %y) {
define double @neg_ext_op1(float %a, double %b) {
; CHECK-LABEL: @neg_ext_op1(
; CHECK-NEXT: [[TMP1:%.*]] = fpext float [[A:%.*]] to double
-; CHECK-NEXT: [[T3:%.*]] = fadd double [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = fadd double [[B:%.*]], [[TMP1]]
; CHECK-NEXT: ret double [[T3]]
;
%t1 = fsub float -0.0, %a
@@ -231,7 +231,7 @@ define double @neg_ext_op1(float %a, double %b) {
define double @unary_neg_ext_op1(float %a, double %b) {
; CHECK-LABEL: @unary_neg_ext_op1(
; CHECK-NEXT: [[TMP1:%.*]] = fpext float [[A:%.*]] to double
-; CHECK-NEXT: [[T3:%.*]] = fadd double [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = fadd double [[B:%.*]], [[TMP1]]
; CHECK-NEXT: ret double [[T3]]
;
%t1 = fneg float %a
@@ -245,7 +245,7 @@ define double @unary_neg_ext_op1(float %a, double %b) {
define <2 x float> @neg_trunc_op1(<2 x double> %a, <2 x float> %b) {
; CHECK-LABEL: @neg_trunc_op1(
; CHECK-NEXT: [[TMP1:%.*]] = fptrunc <2 x double> [[A:%.*]] to <2 x float>
-; CHECK-NEXT: [[T3:%.*]] = fadd <2 x float> [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = fadd <2 x float> [[B:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x float> [[T3]]
;
%t1 = fsub <2 x double> <double -0.0, double -0.0>, %a
@@ -257,7 +257,7 @@ define <2 x float> @neg_trunc_op1(<2 x double> %a, <2 x float> %b) {
define <2 x float> @unary_neg_trunc_op1(<2 x double> %a, <2 x float> %b) {
; CHECK-LABEL: @unary_neg_trunc_op1(
; CHECK-NEXT: [[TMP1:%.*]] = fptrunc <2 x double> [[A:%.*]] to <2 x float>
-; CHECK-NEXT: [[T3:%.*]] = fadd <2 x float> [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = fadd <2 x float> [[B:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x float> [[T3]]
;
%t1 = fneg <2 x double> %a
@@ -271,7 +271,7 @@ define <2 x float> @unary_neg_trunc_op1(<2 x double> %a, <2 x float> %b) {
define double @neg_ext_op1_fast(float %a, double %b) {
; CHECK-LABEL: @neg_ext_op1_fast(
; CHECK-NEXT: [[TMP1:%.*]] = fpext float [[A:%.*]] to double
-; CHECK-NEXT: [[T3:%.*]] = fadd fast double [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = fadd fast double [[B:%.*]], [[TMP1]]
; CHECK-NEXT: ret double [[T3]]
;
%t1 = fsub float -0.0, %a
@@ -283,7 +283,7 @@ define double @neg_ext_op1_fast(float %a, double %b) {
define double @unary_neg_ext_op1_fast(float %a, double %b) {
; CHECK-LABEL: @unary_neg_ext_op1_fast(
; CHECK-NEXT: [[TMP1:%.*]] = fpext float [[A:%.*]] to double
-; CHECK-NEXT: [[T3:%.*]] = fadd fast double [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = fadd fast double [[B:%.*]], [[TMP1]]
; CHECK-NEXT: ret double [[T3]]
;
%t1 = fneg float %a
@@ -332,7 +332,7 @@ define float @neg_trunc_op1_extra_use(double %a, float %b) {
; CHECK-LABEL: @neg_trunc_op1_extra_use(
; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double [[A:%.*]] to float
; CHECK-NEXT: [[T2:%.*]] = fneg float [[TMP1]]
-; CHECK-NEXT: [[T3:%.*]] = fadd float [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = fadd float [[B:%.*]], [[TMP1]]
; CHECK-NEXT: call void @use(float [[T2]])
; CHECK-NEXT: ret float [[T3]]
;
@@ -347,7 +347,7 @@ define float @unary_neg_trunc_op1_extra_use(double %a, float %b) {
; CHECK-LABEL: @unary_neg_trunc_op1_extra_use(
; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double [[A:%.*]] to float
; CHECK-NEXT: [[T2:%.*]] = fneg float [[TMP1]]
-; CHECK-NEXT: [[T3:%.*]] = fadd float [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = fadd float [[B:%.*]], [[TMP1]]
; CHECK-NEXT: call void @use(float [[T2]])
; CHECK-NEXT: ret float [[T3]]
;
@@ -407,7 +407,7 @@ define float @PR37605(float %conv) {
define double @fsub_fdiv_fneg1(double %x, double %y, double %z) {
; CHECK-LABEL: @fsub_fdiv_fneg1(
; CHECK-NEXT: [[TMP1:%.*]] = fdiv double [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[R:%.*]] = fadd double [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[R:%.*]] = fadd double [[Z:%.*]], [[TMP1]]
; CHECK-NEXT: ret double [[R]]
;
%neg = fsub double -0.000000e+00, %x
@@ -419,7 +419,7 @@ define double @fsub_fdiv_fneg1(double %x, double %y, double %z) {
define <2 x double> @fsub_fdiv_fneg2(<2 x double> %x, <2 x double> %y, <2 x double> %z) {
; CHECK-LABEL: @fsub_fdiv_fneg2(
; CHECK-NEXT: [[TMP1:%.*]] = fdiv <2 x double> [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT: [[R:%.*]] = fadd <2 x double> [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[R:%.*]] = fadd <2 x double> [[Z:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x double> [[R]]
;
%neg = fsub <2 x double> <double -0.0, double -0.0>, %x
@@ -431,7 +431,7 @@ define <2 x double> @fsub_fdiv_fneg2(<2 x double> %x, <2 x double> %y, <2 x doub
define double @fsub_fmul_fneg1(double %x, double %y, double %z) {
; CHECK-LABEL: @fsub_fmul_fneg1(
; CHECK-NEXT: [[TMP1:%.*]] = fmul double [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[R:%.*]] = fadd double [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[R:%.*]] = fadd double [[Z:%.*]], [[TMP1]]
; CHECK-NEXT: ret double [[R]]
;
%neg = fsub double -0.000000e+00, %x
@@ -443,7 +443,7 @@ define double @fsub_fmul_fneg1(double %x, double %y, double %z) {
define double @fsub_fmul_fneg2(double %x, double %y, double %z) {
; CHECK-LABEL: @fsub_fmul_fneg2(
; CHECK-NEXT: [[TMP1:%.*]] = fmul double [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[R:%.*]] = fadd double [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[R:%.*]] = fadd double [[Z:%.*]], [[TMP1]]
; CHECK-NEXT: ret double [[R]]
;
%neg = fsub double -0.000000e+00, %x
@@ -487,7 +487,7 @@ declare void @use_vec(<2 x float>)
define <2 x float> @fsub_fmul_fneg1_extra_use(<2 x float> %x, <2 x float> %y, <2 x float> %z) {
; CHECK-LABEL: @fsub_fmul_fneg1_extra_use(
; CHECK-NEXT: [[NEG:%.*]] = fneg <2 x float> [[X:%.*]]
-; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[Y:%.*]], [[NEG]]
; CHECK-NEXT: call void @use_vec(<2 x float> [[MUL]])
; CHECK-NEXT: [[R:%.*]] = fsub <2 x float> [[Z:%.*]], [[MUL]]
; CHECK-NEXT: ret <2 x float> [[R]]
@@ -502,7 +502,7 @@ define <2 x float> @fsub_fmul_fneg1_extra_use(<2 x float> %x, <2 x float> %y, <2
define float @fsub_fmul_fneg2_extra_use(float %x, float %y, float %z) {
; CHECK-LABEL: @fsub_fmul_fneg2_extra_use(
; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]]
-; CHECK-NEXT: [[MUL:%.*]] = fmul float [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul float [[Y:%.*]], [[NEG]]
; CHECK-NEXT: call void @use(float [[MUL]])
; CHECK-NEXT: [[R:%.*]] = fsub float [[Z:%.*]], [[MUL]]
; CHECK-NEXT: ret float [[R]]
@@ -519,7 +519,7 @@ define float @fsub_fdiv_fneg1_extra_use2(float %x, float %y, float %z) {
; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]]
; CHECK-NEXT: call void @use(float [[NEG]])
; CHECK-NEXT: [[TMP1:%.*]] = fdiv float [[X]], [[Y:%.*]]
-; CHECK-NEXT: [[R:%.*]] = fadd float [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[R:%.*]] = fadd float [[Z:%.*]], [[TMP1]]
; CHECK-NEXT: ret float [[R]]
;
%neg = fsub float -0.000000e+00, %x
@@ -534,7 +534,7 @@ define float @fsub_fdiv_fneg2_extra_use2(float %x, float %y, float %z) {
; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]]
; CHECK-NEXT: call void @use(float [[NEG]])
; CHECK-NEXT: [[TMP1:%.*]] = fdiv float [[Y:%.*]], [[X]]
-; CHECK-NEXT: [[R:%.*]] = fadd float [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[R:%.*]] = fadd float [[Z:%.*]], [[TMP1]]
; CHECK-NEXT: ret float [[R]]
;
%neg = fsub float -0.000000e+00, %x
@@ -549,7 +549,7 @@ define <2 x float> @fsub_fmul_fneg1_extra_use2(<2 x float> %x, <2 x float> %y, <
; CHECK-NEXT: [[NEG:%.*]] = fneg <2 x float> [[X:%.*]]
; CHECK-NEXT: call void @use_vec(<2 x float> [[NEG]])
; CHECK-NEXT: [[TMP1:%.*]] = fmul <2 x float> [[X]], [[Y:%.*]]
-; CHECK-NEXT: [[R:%.*]] = fadd <2 x float> [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[R:%.*]] = fadd <2 x float> [[Z:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x float> [[R]]
;
%neg = fsub <2 x float> <float -0.0, float -0.0>, %x
@@ -564,7 +564,7 @@ define float @fsub_fmul_fneg2_extra_use2(float %x, float %y, float %z) {
; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]]
; CHECK-NEXT: call void @use(float [[NEG]])
; CHECK-NEXT: [[TMP1:%.*]] = fmul float [[X]], [[Y:%.*]]
-; CHECK-NEXT: [[R:%.*]] = fadd float [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[R:%.*]] = fadd float [[Z:%.*]], [[TMP1]]
; CHECK-NEXT: ret float [[R]]
;
%neg = fsub float -0.000000e+00, %x
@@ -612,7 +612,7 @@ define <2 x float> @fsub_fmul_fneg1_extra_use3(<2 x float> %x, <2 x float> %y, <
; CHECK-LABEL: @fsub_fmul_fneg1_extra_use3(
; CHECK-NEXT: [[NEG:%.*]] = fneg <2 x float> [[X:%.*]]
; CHECK-NEXT: call void @use_vec(<2 x float> [[NEG]])
-; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[Y:%.*]], [[NEG]]
; CHECK-NEXT: call void @use_vec(<2 x float> [[MUL]])
; CHECK-NEXT: [[R:%.*]] = fsub <2 x float> [[Z:%.*]], [[MUL]]
; CHECK-NEXT: ret <2 x float> [[R]]
@@ -629,7 +629,7 @@ define float @fsub_fmul_fneg2_extra_use3(float %x, float %y, float %z) {
; CHECK-LABEL: @fsub_fmul_fneg2_extra_use3(
; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]]
; CHECK-NEXT: call void @use(float [[NEG]])
-; CHECK-NEXT: [[MUL:%.*]] = fmul float [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul float [[Y:%.*]], [[NEG]]
; CHECK-NEXT: call void @use(float [[MUL]])
; CHECK-NEXT: [[R:%.*]] = fsub float [[Z:%.*]], [[MUL]]
; CHECK-NEXT: ret float [[R]]
@@ -805,7 +805,7 @@ define float @fsub_fadd_fsub_reassoc(float %w, float %x, float %y, float %z) {
define <2 x float> @fsub_fadd_fsub_reassoc_commute(<2 x float> %w, <2 x float> %x, <2 x float> %y, <2 x float> %z) {
; CHECK-LABEL: @fsub_fadd_fsub_reassoc_commute(
; CHECK-NEXT: [[D:%.*]] = fdiv <2 x float> [[Y:%.*]], <float 4.200000e+01, float -4.200000e+01>
-; CHECK-NEXT: [[TMP1:%.*]] = fadd fast <2 x float> [[D]], [[W:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = fadd fast <2 x float> [[W:%.*]], [[D]]
; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <2 x float> [[X:%.*]], [[Z:%.*]]
; CHECK-NEXT: [[S2:%.*]] = fsub fast <2 x float> [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret <2 x float> [[S2]]
@@ -823,7 +823,7 @@ define float @fsub_fadd_fsub_reassoc_twice(float %v, float %w, float %x, float %
; CHECK-LABEL: @fsub_fadd_fsub_reassoc_twice(
; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc nsz float [[W:%.*]], [[Z:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc nsz float [[X:%.*]], [[V:%.*]]
-; CHECK-NEXT: [[TMP3:%.*]] = fadd reassoc nsz float [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = fadd reassoc nsz float [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: [[S3:%.*]] = fsub reassoc nsz float [[TMP2]], [[TMP3]]
; CHECK-NEXT: ret float [[S3]]
;
diff --git a/llvm/test/Transforms/InstCombine/funnel.ll b/llvm/test/Transforms/InstCombine/funnel.ll
index a54e6e4642b75..fa0d59b226998 100644
--- a/llvm/test/Transforms/InstCombine/funnel.ll
+++ b/llvm/test/Transforms/InstCombine/funnel.ll
@@ -464,10 +464,10 @@ define i32 @fshl_concat_i8_i8_different_slot(i8 %x, i8 %y, ptr %addr) {
define i32 @fshl_concat_unknown_source(i32 %zext.x, i32 %zext.y, ptr %addr) {
; CHECK-LABEL: @fshl_concat_unknown_source(
; CHECK-NEXT: [[SLX:%.*]] = shl i32 [[ZEXT_X:%.*]], 16
-; CHECK-NEXT: [[XY:%.*]] = or i32 [[SLX]], [[ZEXT_Y:%.*]]
+; CHECK-NEXT: [[XY:%.*]] = or i32 [[ZEXT_Y:%.*]], [[SLX]]
; CHECK-NEXT: store i32 [[XY]], ptr [[ADDR:%.*]], align 4
; CHECK-NEXT: [[SLY:%.*]] = shl i32 [[ZEXT_Y]], 16
-; CHECK-NEXT: [[YX:%.*]] = or i32 [[SLY]], [[ZEXT_X]]
+; CHECK-NEXT: [[YX:%.*]] = or i32 [[ZEXT_X]], [[SLY]]
; CHECK-NEXT: ret i32 [[YX]]
;
%slx = shl i32 %zext.x, 16
diff --git a/llvm/test/Transforms/InstCombine/getelementptr.ll b/llvm/test/Transforms/InstCombine/getelementptr.ll
index 04b0c196ab510..8d1bb458ba3f8 100644
--- a/llvm/test/Transforms/InstCombine/getelementptr.ll
+++ b/llvm/test/Transforms/InstCombine/getelementptr.ll
@@ -267,8 +267,8 @@ define <2 x i1> @test13_fixed_scalable(i64 %X, ptr %P, <2 x i64> %y) nounwind {
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 4
; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <2 x i64> poison, i64 [[TMP3]], i64 0
-; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector <2 x i64> [[DOTSPLATINSERT1]], <2 x i64> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[B_IDX:%.*]] = mul nsw <2 x i64> [[DOTSPLAT2]], [[Y:%.*]]
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i64> [[DOTSPLATINSERT1]], <2 x i64> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[B_IDX:%.*]] = mul nsw <2 x i64> [[Y:%.*]], [[DOTSPLAT]]
; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i64> [[A_IDX]], [[B_IDX]]
; CHECK-NEXT: ret <2 x i1> [[C]]
;
@@ -287,7 +287,7 @@ define <vscale x 2 x i1> @test13_scalable_scalable(i64 %X, ptr %P, <vscale x 2 x
; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP1]], 4
; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP2]], i64 0
; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT1]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: [[B_IDX:%.*]] = mul nsw <vscale x 2 x i64> [[DOTSPLAT2]], [[Y:%.*]]
+; CHECK-NEXT: [[B_IDX:%.*]] = mul nsw <vscale x 2 x i64> [[Y:%.*]], [[DOTSPLAT2]]
; CHECK-NEXT: [[C:%.*]] = icmp eq <vscale x 2 x i64> [[A_IDX]], [[B_IDX]]
; CHECK-NEXT: ret <vscale x 2 x i1> [[C]]
;
diff --git a/llvm/test/Transforms/InstCombine/hoist-negation-out-of-bias-calculation.ll b/llvm/test/Transforms/InstCombine/hoist-negation-out-of-bias-calculation.ll
index e4cae13519783..6049997db4d1a 100644
--- a/llvm/test/Transforms/InstCombine/hoist-negation-out-of-bias-calculation.ll
+++ b/llvm/test/Transforms/InstCombine/hoist-negation-out-of-bias-calculation.ll
@@ -15,7 +15,7 @@
define i8 @t0(i8 %x, i8 %y) {
; CHECK-LABEL: @t0(
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[NEGBIAS:%.*]] = sub i8 0, [[TMP2]]
; CHECK-NEXT: ret i8 [[NEGBIAS]]
;
@@ -45,7 +45,7 @@ define i8 @t1_commutative(i8 %y) {
define <2 x i8> @t2_vec(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @t2_vec(
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[Y:%.*]], <i8 -1, i8 -1>
-; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i8> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[NEGBIAS:%.*]] = sub <2 x i8> zeroinitializer, [[TMP2]]
; CHECK-NEXT: ret <2 x i8> [[NEGBIAS]]
;
@@ -58,7 +58,7 @@ define <2 x i8> @t2_vec(<2 x i8> %x, <2 x i8> %y) {
define <2 x i8> @t3_vec_poison(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @t3_vec_poison(
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[Y:%.*]], <i8 -1, i8 -1>
-; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i8> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[NEGBIAS:%.*]] = sub <2 x i8> zeroinitializer, [[TMP2]]
; CHECK-NEXT: ret <2 x i8> [[NEGBIAS]]
;
@@ -76,7 +76,7 @@ define i8 @n4_extrause0(i8 %x, i8 %y) {
; CHECK-LABEL: @n4_extrause0(
; CHECK-NEXT: [[NEGY:%.*]] = sub i8 0, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[NEGY]])
-; CHECK-NEXT: [[UNBIASEDX:%.*]] = and i8 [[NEGY]], [[X:%.*]]
+; CHECK-NEXT: [[UNBIASEDX:%.*]] = and i8 [[X:%.*]], [[NEGY]]
; CHECK-NEXT: [[NEGBIAS:%.*]] = sub i8 [[UNBIASEDX]], [[X]]
; CHECK-NEXT: ret i8 [[NEGBIAS]]
;
@@ -89,7 +89,7 @@ define i8 @n4_extrause0(i8 %x, i8 %y) {
define i8 @n5_extrause1(i8 %x, i8 %y) {
; CHECK-LABEL: @n5_extrause1(
; CHECK-NEXT: [[NEGY:%.*]] = sub i8 0, [[Y:%.*]]
-; CHECK-NEXT: [[UNBIASEDX:%.*]] = and i8 [[NEGY]], [[X:%.*]]
+; CHECK-NEXT: [[UNBIASEDX:%.*]] = and i8 [[X:%.*]], [[NEGY]]
; CHECK-NEXT: call void @use8(i8 [[UNBIASEDX]])
; CHECK-NEXT: [[NEGBIAS:%.*]] = sub i8 [[UNBIASEDX]], [[X]]
; CHECK-NEXT: ret i8 [[NEGBIAS]]
@@ -104,7 +104,7 @@ define i8 @n6_extrause2(i8 %x, i8 %y) {
; CHECK-LABEL: @n6_extrause2(
; CHECK-NEXT: [[NEGY:%.*]] = sub i8 0, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[NEGY]])
-; CHECK-NEXT: [[UNBIASEDX:%.*]] = and i8 [[NEGY]], [[X:%.*]]
+; CHECK-NEXT: [[UNBIASEDX:%.*]] = and i8 [[X:%.*]], [[NEGY]]
; CHECK-NEXT: call void @use8(i8 [[UNBIASEDX]])
; CHECK-NEXT: [[NEGBIAS:%.*]] = sub i8 [[UNBIASEDX]], [[X]]
; CHECK-NEXT: ret i8 [[NEGBIAS]]
@@ -122,7 +122,7 @@ define i8 @n6_extrause2(i8 %x, i8 %y) {
define i8 @n7(i8 %x, i8 %y) {
; CHECK-LABEL: @n7(
; CHECK-NEXT: [[NEGY_NOT:%.*]] = add i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[NEGBIAS:%.*]] = and i8 [[NEGY_NOT]], [[X:%.*]]
+; CHECK-NEXT: [[NEGBIAS:%.*]] = and i8 [[X:%.*]], [[NEGY_NOT]]
; CHECK-NEXT: ret i8 [[NEGBIAS]]
;
%negy = sub i8 0, %y
@@ -147,7 +147,7 @@ define i8 @n8(i8 %x, i8 %y) {
define i8 @n9(i8 %x0, i8 %x1, i8 %y) {
; CHECK-LABEL: @n9(
; CHECK-NEXT: [[NEGY:%.*]] = sub i8 0, [[Y:%.*]]
-; CHECK-NEXT: [[UNBIASEDX:%.*]] = and i8 [[NEGY]], [[X1:%.*]]
+; CHECK-NEXT: [[UNBIASEDX:%.*]] = and i8 [[X1:%.*]], [[NEGY]]
; CHECK-NEXT: [[NEGBIAS:%.*]] = sub i8 [[UNBIASEDX]], [[X0:%.*]]
; CHECK-NEXT: ret i8 [[NEGBIAS]]
;
diff --git a/llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll b/llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll
index db2c8e2f22f6e..87328f884ae33 100644
--- a/llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll
+++ b/llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll
@@ -64,7 +64,7 @@ define i8 @t4_extrause(i8 %x, i8 %y) {
define i8 @t5_commutativity(i8 %x) {
; CHECK-LABEL: @t5_commutativity(
; CHECK-NEXT: [[Y:%.*]] = call i8 @gen8()
-; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y]], [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], [[Y]]
; CHECK-NEXT: [[R:%.*]] = xor i8 [[TMP1]], 42
; CHECK-NEXT: ret i8 [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/icmp-add.ll b/llvm/test/Transforms/InstCombine/icmp-add.ll
index 6b4e5a5372c52..80ca76a77961d 100644
--- a/llvm/test/Transforms/InstCombine/icmp-add.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-add.ll
@@ -207,7 +207,7 @@ define i1 @cvt_icmp_neg_1_sext_plus_zext_eq(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @cvt_icmp_neg_1_sext_plus_zext_eq(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG1:%.*]], true
-; CHECK-NEXT: [[T:%.*]] = and i1 [[TMP0]], [[ARG:%.*]]
+; CHECK-NEXT: [[T:%.*]] = and i1 [[ARG:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[T]]
;
bb:
@@ -237,7 +237,7 @@ define i1 @cvt_icmp_1_sext_plus_zext_eq(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @cvt_icmp_1_sext_plus_zext_eq(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true
-; CHECK-NEXT: [[T:%.*]] = and i1 [[TMP0]], [[ARG1:%.*]]
+; CHECK-NEXT: [[T:%.*]] = and i1 [[ARG1:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[T]]
;
bb:
@@ -458,7 +458,7 @@ define i1 @cvt_icmp_neg_1_sext_plus_zext_ne(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @cvt_icmp_neg_1_sext_plus_zext_ne(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true
-; CHECK-NEXT: [[T:%.*]] = or i1 [[TMP0]], [[ARG1:%.*]]
+; CHECK-NEXT: [[T:%.*]] = or i1 [[ARG1:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[T]]
;
bb:
@@ -487,7 +487,7 @@ define i1 @cvt_icmp_1_sext_plus_zext_ne(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @cvt_icmp_1_sext_plus_zext_ne(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG1:%.*]], true
-; CHECK-NEXT: [[T:%.*]] = or i1 [[TMP0]], [[ARG:%.*]]
+; CHECK-NEXT: [[T:%.*]] = or i1 [[ARG:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[T]]
;
bb:
@@ -557,7 +557,7 @@ define i1 @cvt_icmp_neg_1_zext_plus_sext_eq(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @cvt_icmp_neg_1_zext_plus_sext_eq(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true
-; CHECK-NEXT: [[T:%.*]] = and i1 [[TMP0]], [[ARG1:%.*]]
+; CHECK-NEXT: [[T:%.*]] = and i1 [[ARG1:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[T]]
;
bb:
@@ -587,7 +587,7 @@ define i1 @cvt_icmp_1_zext_plus_sext_eq(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @cvt_icmp_1_zext_plus_sext_eq(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG1:%.*]], true
-; CHECK-NEXT: [[T:%.*]] = and i1 [[TMP0]], [[ARG:%.*]]
+; CHECK-NEXT: [[T:%.*]] = and i1 [[ARG:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[T]]
;
bb:
@@ -628,7 +628,7 @@ define i1 @cvt_icmp_neg_1_zext_plus_sext_ne(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @cvt_icmp_neg_1_zext_plus_sext_ne(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG1:%.*]], true
-; CHECK-NEXT: [[T:%.*]] = or i1 [[TMP0]], [[ARG:%.*]]
+; CHECK-NEXT: [[T:%.*]] = or i1 [[ARG:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[T]]
;
bb:
@@ -657,7 +657,7 @@ define i1 @cvt_icmp_1_zext_plus_sext_ne(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @cvt_icmp_1_zext_plus_sext_ne(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true
-; CHECK-NEXT: [[T:%.*]] = or i1 [[TMP0]], [[ARG1:%.*]]
+; CHECK-NEXT: [[T:%.*]] = or i1 [[ARG1:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[T]]
;
bb:
@@ -849,7 +849,7 @@ define i1 @test_sext_zext_cvt_neg_2_ult_icmp(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @test_sext_zext_cvt_neg_2_ult_icmp(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[ARG_NOT:%.*]] = xor i1 [[ARG:%.*]], true
-; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG_NOT]], [[ARG1:%.*]]
+; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG1:%.*]], [[ARG_NOT]]
; CHECK-NEXT: ret i1 [[I4]]
;
bb:
@@ -864,7 +864,7 @@ define i1 @test_sext_zext_cvt_neg_1_ult_icmp(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @test_sext_zext_cvt_neg_1_ult_icmp(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true
-; CHECK-NEXT: [[I4:%.*]] = or i1 [[TMP0]], [[ARG1:%.*]]
+; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG1:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[I4]]
;
bb:
@@ -892,7 +892,7 @@ define i1 @test_sext_zext_cvt_2_ult_icmp(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @test_sext_zext_cvt_2_ult_icmp(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[ARG_NOT:%.*]] = xor i1 [[ARG:%.*]], true
-; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG_NOT]], [[ARG1:%.*]]
+; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG1:%.*]], [[ARG_NOT]]
; CHECK-NEXT: ret i1 [[I4]]
;
bb:
@@ -907,7 +907,7 @@ define i1 @test_zext_sext_cvt_neg_1_ult_icmp(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @test_zext_sext_cvt_neg_1_ult_icmp(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG1:%.*]], true
-; CHECK-NEXT: [[I4:%.*]] = or i1 [[TMP0]], [[ARG:%.*]]
+; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[I4]]
;
bb:
@@ -1047,7 +1047,7 @@ define i1 @test_zext_sext_cvt_neg_2_ugt_icmp(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @test_zext_sext_cvt_neg_2_ugt_icmp(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true
-; CHECK-NEXT: [[I4:%.*]] = and i1 [[TMP0]], [[ARG1:%.*]]
+; CHECK-NEXT: [[I4:%.*]] = and i1 [[ARG1:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[I4]]
;
bb:
@@ -1089,7 +1089,7 @@ define i1 @test_zext_sext_cvt_1_ugt_icmp(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @test_zext_sext_cvt_1_ugt_icmp(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true
-; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[TMP0]], [[ARG1:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[ARG1:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
bb:
@@ -1104,7 +1104,7 @@ define i1 @test_zext_sext_cvt_2_ugt_icmp(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @test_zext_sext_cvt_2_ugt_icmp(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true
-; CHECK-NEXT: [[I4:%.*]] = and i1 [[TMP0]], [[ARG1:%.*]]
+; CHECK-NEXT: [[I4:%.*]] = and i1 [[ARG1:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[I4]]
;
bb:
@@ -1256,7 +1256,7 @@ define i1 @test_zext_sext_cvt_neg_1_sgt_icmp(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @test_zext_sext_cvt_neg_1_sgt_icmp(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[ARG1_NOT:%.*]] = xor i1 [[ARG1:%.*]], true
-; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG1_NOT]], [[ARG:%.*]]
+; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG:%.*]], [[ARG1_NOT]]
; CHECK-NEXT: ret i1 [[I4]]
;
bb:
@@ -1271,7 +1271,7 @@ define i1 @test_zext_sext_cvt_0_sgt_icmp(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @test_zext_sext_cvt_0_sgt_icmp(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG1:%.*]], true
-; CHECK-NEXT: [[I4:%.*]] = and i1 [[TMP0]], [[ARG:%.*]]
+; CHECK-NEXT: [[I4:%.*]] = and i1 [[ARG:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[I4]]
;
bb:
@@ -1420,7 +1420,7 @@ define i1 @test_zext_sext_cvt_0_slt_icmp(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @test_zext_sext_cvt_0_slt_icmp(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true
-; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[TMP0]], [[ARG1:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[ARG1:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
bb:
@@ -1435,7 +1435,7 @@ define i1 @test_zext_sext_cvt_1_slt_icmp(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @test_zext_sext_cvt_1_slt_icmp(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true
-; CHECK-NEXT: [[I4:%.*]] = or i1 [[TMP0]], [[ARG1:%.*]]
+; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG1:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[I4]]
;
bb:
@@ -1617,7 +1617,7 @@ define i1 @test_cvt_icmp19(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @test_cvt_icmp19(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG1:%.*]], true
-; CHECK-NEXT: [[I4:%.*]] = and i1 [[TMP0]], [[ARG:%.*]]
+; CHECK-NEXT: [[I4:%.*]] = and i1 [[ARG:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[I4]]
;
bb:
@@ -1646,7 +1646,7 @@ define i1 @test_cvt_icmp21(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @test_cvt_icmp21(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[ARG_NOT:%.*]] = xor i1 [[ARG:%.*]], true
-; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG_NOT]], [[ARG1:%.*]]
+; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG1:%.*]], [[ARG_NOT]]
; CHECK-NEXT: ret i1 [[I4]]
;
bb:
@@ -1661,7 +1661,7 @@ define i1 @test_cvt_icmp22(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @test_cvt_icmp22(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true
-; CHECK-NEXT: [[I4:%.*]] = or i1 [[TMP0]], [[ARG1:%.*]]
+; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG1:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[I4]]
;
bb:
@@ -1689,7 +1689,7 @@ define i1 @test_cvt_icmp24(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @test_cvt_icmp24(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[ARG_NOT:%.*]] = xor i1 [[ARG:%.*]], true
-; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG_NOT]], [[ARG1:%.*]]
+; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG1:%.*]], [[ARG_NOT]]
; CHECK-NEXT: ret i1 [[I4]]
;
bb:
@@ -1704,7 +1704,7 @@ define i1 @test_cvt_icmp25(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @test_cvt_icmp25(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG1:%.*]], true
-; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[TMP0]], [[ARG:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[ARG:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[TMP1]]
;
bb:
@@ -2390,7 +2390,7 @@ define <2 x i1> @icmp_eq_add_non_splat2(<2 x i32> %a) {
define i1 @without_nsw_nuw(i8 %x, i8 %y) {
; CHECK-LABEL: @without_nsw_nuw(
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X:%.*]], 2
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TOBOOL]]
;
%t1 = add i8 %x, 37
@@ -2402,7 +2402,7 @@ define i1 @without_nsw_nuw(i8 %x, i8 %y) {
define i1 @with_nsw_nuw(i8 %x, i8 %y) {
; CHECK-LABEL: @with_nsw_nuw(
; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i8 [[X:%.*]], 2
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TOBOOL]]
;
%t1 = add nsw nuw i8 %x, 37
@@ -2414,7 +2414,7 @@ define i1 @with_nsw_nuw(i8 %x, i8 %y) {
define i1 @with_nsw_large(i8 %x, i8 %y) {
; CHECK-LABEL: @with_nsw_large(
; CHECK-NEXT: [[TMP1:%.*]] = add nsw i8 [[X:%.*]], 2
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TOBOOL]]
;
%t1 = add nsw i8 %x, 37
@@ -2438,7 +2438,7 @@ define i1 @with_nsw_small(i8 %x, i8 %y) {
define i1 @with_nuw_large(i8 %x, i8 %y) {
; CHECK-LABEL: @with_nuw_large(
; CHECK-NEXT: [[TMP1:%.*]] = add nuw i8 [[X:%.*]], 2
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TOBOOL]]
;
%t1 = add nuw i8 %x, 37
@@ -2462,7 +2462,7 @@ define i1 @with_nuw_small(i8 %x, i8 %y) {
define i1 @with_nuw_large_negative(i8 %x, i8 %y) {
; CHECK-LABEL: @with_nuw_large_negative(
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X:%.*]], -2
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[TOBOOL]]
;
%t1 = add nuw i8 %x, -37
@@ -2751,7 +2751,7 @@ define i32 @decrement_min(i32 %x) {
define i1 @icmp_add_add_C(i32 %a, i32 %b) {
; CHECK-LABEL: @icmp_add_add_C(
; CHECK-NEXT: [[TMP1:%.*]] = sub i32 0, [[B:%.*]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add1 = add i32 %a, %b
@@ -2763,7 +2763,7 @@ define i1 @icmp_add_add_C(i32 %a, i32 %b) {
define i1 @icmp_add_add_C_pred(i32 %a, i32 %b) {
; CHECK-LABEL: @icmp_add_add_C_pred(
; CHECK-NEXT: [[TMP1:%.*]] = sub i32 0, [[B:%.*]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp uge i32 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ule i32 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add1 = add i32 %a, %b
@@ -2837,7 +2837,7 @@ define <2 x i1> @icmp_add_add_C_vector_undef(<2 x i8> %a, <2 x i8> %b) {
define i1 @icmp_add_add_C_comm1(i32 %a, i32 %b) {
; CHECK-LABEL: @icmp_add_add_C_comm1(
; CHECK-NEXT: [[TMP1:%.*]] = sub i32 0, [[B:%.*]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add1 = add i32 %b, %a
@@ -2923,7 +2923,7 @@ define i1 @icmp_add_add_C_extra_use2(i32 %a, i32 %b) {
; CHECK-NEXT: [[ADD1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: call void @use(i32 [[ADD1]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i32 0, [[B]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], [[A]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[A]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%add1 = add i32 %a, %b
diff --git a/llvm/test/Transforms/InstCombine/icmp-and-add-sub-xor-p2.ll b/llvm/test/Transforms/InstCombine/icmp-and-add-sub-xor-p2.ll
index c8a3dfcd68cd4..711d59c1ebfd5 100644
--- a/llvm/test/Transforms/InstCombine/icmp-and-add-sub-xor-p2.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-and-add-sub-xor-p2.ll
@@ -6,10 +6,10 @@ declare void @use.v2i8(<2 x i8>)
define i1 @src_add_eq_p2(i8 %x, i8 %yy) {
; CHECK-LABEL: @src_add_eq_p2(
; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[YY:%.*]]
-; CHECK-NEXT: [[Y:%.*]] = and i8 [[NY]], [[YY]]
+; CHECK-NEXT: [[Y:%.*]] = and i8 [[YY]], [[NY]]
; CHECK-NEXT: [[X1:%.*]] = add i8 [[Y]], [[X:%.*]]
; CHECK-NEXT: call void @use.i8(i8 [[X1]])
-; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[Y]], [[X]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], [[Y]]
; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP1]], 0
; CHECK-NEXT: ret i1 [[R]]
;
@@ -25,8 +25,8 @@ define i1 @src_add_eq_p2(i8 %x, i8 %yy) {
define i1 @src_add_eq_p2_fail_multiuse(i8 %x, i8 %yy) {
; CHECK-LABEL: @src_add_eq_p2_fail_multiuse(
; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[YY:%.*]]
-; CHECK-NEXT: [[Y:%.*]] = and i8 [[NY]], [[YY]]
-; CHECK-NEXT: [[X1:%.*]] = add i8 [[Y]], [[X:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = and i8 [[YY]], [[NY]]
+; CHECK-NEXT: [[X1:%.*]] = add i8 [[X:%.*]], [[Y]]
; CHECK-NEXT: call void @use.i8(i8 [[X1]])
; CHECK-NEXT: [[V:%.*]] = and i8 [[X1]], [[Y]]
; CHECK-NEXT: call void @use.i8(i8 [[V]])
@@ -46,10 +46,10 @@ define i1 @src_add_eq_p2_fail_multiuse(i8 %x, i8 %yy) {
define i1 @src_xor_ne_zero(i8 %x, i8 %yy) {
; CHECK-LABEL: @src_xor_ne_zero(
; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[YY:%.*]]
-; CHECK-NEXT: [[Y:%.*]] = and i8 [[NY]], [[YY]]
-; CHECK-NEXT: [[X1:%.*]] = xor i8 [[Y]], [[X:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = and i8 [[YY]], [[NY]]
+; CHECK-NEXT: [[X1:%.*]] = xor i8 [[X:%.*]], [[Y]]
; CHECK-NEXT: call void @use.i8(i8 [[X1]])
-; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[Y]], [[X]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], [[Y]]
; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[TMP1]], [[Y]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -65,9 +65,9 @@ define i1 @src_xor_ne_zero(i8 %x, i8 %yy) {
define i1 @src_xor_ne_zero_fail_different_p2(i8 %x, i8 %yy) {
; CHECK-LABEL: @src_xor_ne_zero_fail_different_p2(
; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[YY:%.*]]
-; CHECK-NEXT: [[Y:%.*]] = and i8 [[NY]], [[YY]]
+; CHECK-NEXT: [[Y:%.*]] = and i8 [[YY]], [[NY]]
; CHECK-NEXT: [[Y2:%.*]] = shl i8 [[Y]], 1
-; CHECK-NEXT: [[X1:%.*]] = xor i8 [[Y]], [[X:%.*]]
+; CHECK-NEXT: [[X1:%.*]] = xor i8 [[X:%.*]], [[Y]]
; CHECK-NEXT: call void @use.i8(i8 [[X1]])
; CHECK-NEXT: [[V:%.*]] = and i8 [[X1]], [[Y2]]
; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[V]], 0
@@ -86,10 +86,10 @@ define i1 @src_xor_ne_zero_fail_different_p2(i8 %x, i8 %yy) {
define <2 x i1> @src_sub_ne_p2(<2 x i8> %x, <2 x i8> %yy) {
; CHECK-LABEL: @src_sub_ne_p2(
; CHECK-NEXT: [[NY:%.*]] = sub <2 x i8> zeroinitializer, [[YY:%.*]]
-; CHECK-NEXT: [[Y:%.*]] = and <2 x i8> [[NY]], [[YY]]
+; CHECK-NEXT: [[Y:%.*]] = and <2 x i8> [[YY]], [[NY]]
; CHECK-NEXT: [[X1:%.*]] = sub <2 x i8> [[X:%.*]], [[Y]]
; CHECK-NEXT: call void @use.v2i8(<2 x i8> [[X1]])
-; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[Y]], [[X]]
+; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[X]], [[Y]]
; CHECK-NEXT: [[R:%.*]] = icmp ne <2 x i8> [[TMP1]], zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[R]]
;
@@ -107,7 +107,7 @@ define <2 x i1> @src_sub_eq_zero(<2 x i8> %x, <2 x i8> %yy) {
; CHECK-NEXT: [[Y:%.*]] = shl <2 x i8> <i8 1, i8 2>, [[YY:%.*]]
; CHECK-NEXT: [[X1:%.*]] = sub <2 x i8> [[X:%.*]], [[Y]]
; CHECK-NEXT: call void @use.v2i8(<2 x i8> [[X1]])
-; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[Y]], [[X]]
+; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[X]], [[Y]]
; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i8> [[TMP1]], [[Y]]
; CHECK-NEXT: ret <2 x i1> [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll b/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll
index 5de3e89d7027a..2facb91125ef7 100644
--- a/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll
@@ -138,7 +138,7 @@ define i1 @src_is_mask_xor(i8 %x_in, i8 %y) {
; CHECK-LABEL: @src_is_mask_xor(
; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[MASK:%.*]] = xor i8 [[Y_M1]], [[Y]]
+; CHECK-NEXT: [[MASK:%.*]] = xor i8 [[Y]], [[Y_M1]]
; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -154,7 +154,7 @@ define i1 @src_is_mask_xor_fail_notmask(i8 %x_in, i8 %y) {
; CHECK-LABEL: @src_is_mask_xor_fail_notmask(
; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[Y:%.*]]
-; CHECK-NEXT: [[NOTMASK:%.*]] = xor i8 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[NOTMASK:%.*]] = xor i8 [[Y]], [[TMP1]]
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[NOTMASK]]
; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[AND]], [[X]]
; CHECK-NEXT: ret i1 [[R]]
@@ -172,7 +172,7 @@ define i1 @src_is_mask_select(i8 %x_in, i8 %y, i1 %cond) {
; CHECK-LABEL: @src_is_mask_select(
; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]]
+; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y]], [[Y_M1]]
; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[YMASK]], i8 15
; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
@@ -191,7 +191,7 @@ define i1 @src_is_mask_select_fail_wrong_pattern(i8 %x_in, i8 %y, i1 %cond, i8 %
; CHECK-LABEL: @src_is_mask_select_fail_wrong_pattern(
; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]]
+; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y]], [[Y_M1]]
; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[YMASK]], i8 15
; CHECK-NEXT: [[AND:%.*]] = and i8 [[MASK]], [[X]]
; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[AND]], [[Z:%.*]]
@@ -247,7 +247,7 @@ define i1 @src_is_mask_lshr(i8 %x_in, i8 %y, i8 %z, i1 %cond) {
; CHECK-LABEL: @src_is_mask_lshr(
; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]]
+; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y]], [[Y_M1]]
; CHECK-NEXT: [[SMASK:%.*]] = select i1 [[COND:%.*]], i8 [[YMASK]], i8 15
; CHECK-NEXT: [[MASK:%.*]] = lshr i8 [[SMASK]], [[Z:%.*]]
; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X]], [[MASK]]
@@ -267,7 +267,7 @@ define i1 @src_is_mask_ashr(i8 %x_in, i8 %y, i8 %z, i1 %cond) {
; CHECK-LABEL: @src_is_mask_ashr(
; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]]
+; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y]], [[Y_M1]]
; CHECK-NEXT: [[SMASK:%.*]] = select i1 [[COND:%.*]], i8 [[YMASK]], i8 15
; CHECK-NEXT: [[MASK:%.*]] = ashr i8 [[SMASK]], [[Z:%.*]]
; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X]], [[MASK]]
@@ -303,7 +303,7 @@ define i1 @src_is_mask_umax(i8 %x_in, i8 %y) {
; CHECK-LABEL: @src_is_mask_umax(
; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]]
+; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y]], [[Y_M1]]
; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.umax.i8(i8 [[YMASK]], i8 3)
; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
@@ -322,7 +322,7 @@ define i1 @src_is_mask_umin(i8 %x_in, i8 %y, i8 %z) {
; CHECK-LABEL: @src_is_mask_umin(
; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]]
+; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y]], [[Y_M1]]
; CHECK-NEXT: [[ZMASK:%.*]] = lshr i8 15, [[Z:%.*]]
; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.umin.i8(i8 [[YMASK]], i8 [[ZMASK]])
; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X]], [[MASK]]
@@ -343,7 +343,7 @@ define i1 @src_is_mask_umin_fail_mismatch(i8 %x_in, i8 %y) {
; CHECK-LABEL: @src_is_mask_umin_fail_mismatch(
; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]]
+; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y]], [[Y_M1]]
; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.umin.i8(i8 [[YMASK]], i8 -32)
; CHECK-NEXT: [[AND:%.*]] = and i8 [[MASK]], [[X]]
; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[AND]], [[X]]
@@ -363,7 +363,7 @@ define i1 @src_is_mask_smax(i8 %x_in, i8 %y) {
; CHECK-LABEL: @src_is_mask_smax(
; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]]
+; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y]], [[Y_M1]]
; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.smax.i8(i8 [[YMASK]], i8 -1)
; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
@@ -382,7 +382,7 @@ define i1 @src_is_mask_smin(i8 %x_in, i8 %y) {
; CHECK-LABEL: @src_is_mask_smin(
; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]]
+; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y]], [[Y_M1]]
; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.smin.i8(i8 [[YMASK]], i8 0)
; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
@@ -455,9 +455,9 @@ define i1 @src_is_notmask_shl(i8 %x_in, i8 %y, i1 %cond) {
define i1 @src_is_notmask_x_xor_neg_x(i8 %x_in, i8 %y, i1 %cond) {
; CHECK-LABEL: @src_is_notmask_x_xor_neg_x(
; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[NEG_Y:%.*]] = add i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[NOTMASK0:%.*]] = xor i8 [[NEG_Y]], [[Y]]
-; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[COND:%.*]], i8 [[NOTMASK0]], i8 7
+; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y:%.*]], -1
+; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[Y]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[COND:%.*]], i8 [[TMP2]], i8 7
; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X]], [[TMP3]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -473,9 +473,9 @@ define i1 @src_is_notmask_x_xor_neg_x(i8 %x_in, i8 %y, i1 %cond) {
define i1 @src_is_notmask_x_xor_neg_x_inv(i8 %x_in, i8 %y, i1 %cond) {
; CHECK-LABEL: @src_is_notmask_x_xor_neg_x_inv(
; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[NEG_Y:%.*]] = add i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[NOTMASK0:%.*]] = xor i8 [[NEG_Y]], [[Y]]
-; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[COND:%.*]], i8 [[NOTMASK0]], i8 7
+; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y:%.*]], -1
+; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[Y]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[COND:%.*]], i8 [[TMP2]], i8 7
; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X]], [[TMP3]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -591,7 +591,7 @@ define i1 @src_is_notmask_neg_p2_fail_not_invertable(i8 %x_in, i8 %y) {
; CHECK-LABEL: @src_is_notmask_neg_p2_fail_not_invertable(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], -124
; CHECK-NEXT: [[TMP2:%.*]] = sub i8 0, [[Y:%.*]]
-; CHECK-NEXT: [[TMP3:%.*]] = or i8 [[TMP2]], [[Y]]
+; CHECK-NEXT: [[TMP3:%.*]] = or i8 [[Y]], [[TMP2]]
; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[TMP3]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -608,7 +608,7 @@ define i1 @src_is_notmask_xor_fail(i8 %x_in, i8 %y) {
; CHECK-LABEL: @src_is_notmask_xor_fail(
; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[Y:%.*]]
-; CHECK-NEXT: [[NOTMASK_REV:%.*]] = xor i8 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[NOTMASK_REV:%.*]] = xor i8 [[Y]], [[TMP1]]
; CHECK-NEXT: [[NOTMASK:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[NOTMASK_REV]])
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[NOTMASK]]
; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[AND]], [[X]]
@@ -680,7 +680,7 @@ define i1 @src_x_and_mask_slt(i8 %x, i8 %y, i1 %cond) {
; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[MASK0]], i8 0
; CHECK-NEXT: [[MASK_POS:%.*]] = icmp sgt i8 [[MASK]], -1
; CHECK-NEXT: call void @llvm.assume(i1 [[MASK_POS]])
-; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[MASK]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[X:%.*]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
%mask0 = lshr i8 -1, %y
@@ -698,7 +698,7 @@ define i1 @src_x_and_mask_sge(i8 %x, i8 %y, i1 %cond) {
; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[MASK0]], i8 0
; CHECK-NEXT: [[MASK_POS:%.*]] = icmp sgt i8 [[MASK]], -1
; CHECK-NEXT: call void @llvm.assume(i1 [[MASK_POS]])
-; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[MASK]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[X:%.*]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
%mask0 = lshr i8 -1, %y
@@ -714,7 +714,7 @@ define i1 @src_x_and_mask_slt_fail_maybe_neg(i8 %x, i8 %y, i1 %cond) {
; CHECK-LABEL: @src_x_and_mask_slt_fail_maybe_neg(
; CHECK-NEXT: [[MASK0:%.*]] = lshr i8 -1, [[Y:%.*]]
; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[MASK0]], i8 0
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[MASK]], [[X:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[MASK]]
; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[AND]], [[X]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -729,7 +729,7 @@ define i1 @src_x_and_mask_sge_fail_maybe_neg(i8 %x, i8 %y, i1 %cond) {
; CHECK-LABEL: @src_x_and_mask_sge_fail_maybe_neg(
; CHECK-NEXT: [[MASK0:%.*]] = lshr i8 -1, [[Y:%.*]]
; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[MASK0]], i8 0
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[MASK]], [[X:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[MASK]]
; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[AND]], [[X]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -826,7 +826,7 @@ define i1 @src_x_and_nmask_slt_fail_maybe_z(i8 %x, i8 %y, i1 %cond) {
; CHECK-LABEL: @src_x_and_nmask_slt_fail_maybe_z(
; CHECK-NEXT: [[NOT_MASK0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: [[NOT_MASK:%.*]] = select i1 [[COND:%.*]], i8 [[NOT_MASK0]], i8 0
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[NOT_MASK]], [[X:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[NOT_MASK]]
; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[AND]], [[NOT_MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -841,7 +841,7 @@ define i1 @src_x_and_nmask_sge_fail_maybe_z(i8 %x, i8 %y, i1 %cond) {
; CHECK-LABEL: @src_x_and_nmask_sge_fail_maybe_z(
; CHECK-NEXT: [[NOT_MASK0:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: [[NOT_MASK:%.*]] = select i1 [[COND:%.*]], i8 [[NOT_MASK0]], i8 0
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[NOT_MASK]], [[X:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[NOT_MASK]]
; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[AND]], [[NOT_MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -879,7 +879,7 @@ define i1 @src_x_or_mask_ne(i8 %x, i8 %y, i1 %cond) {
; CHECK-LABEL: @src_x_or_mask_ne(
; CHECK-NEXT: [[MASK0:%.*]] = lshr i8 -1, [[Y:%.*]]
; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[MASK0]], i8 0
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[MASK]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X:%.*]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
%mask0 = lshr i8 -1, %y
diff --git a/llvm/test/Transforms/InstCombine/icmp-and-shift.ll b/llvm/test/Transforms/InstCombine/icmp-and-shift.ll
index 08d23e84c3960..7ca1c6a8da1a7 100644
--- a/llvm/test/Transforms/InstCombine/icmp-and-shift.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-and-shift.ll
@@ -497,7 +497,7 @@ define i1 @eq_and_lshr_minval_commute(i8 %px, i8 %y) {
define i1 @eq_and_shl_two(i8 %x, i8 %y) {
; CHECK-LABEL: @eq_and_shl_two(
; CHECK-NEXT: [[POW2_OR_ZERO:%.*]] = shl i8 2, [[Y:%.*]]
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[POW2_OR_ZERO]], [[X:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[POW2_OR_ZERO]]
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[AND]], [[POW2_OR_ZERO]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -511,7 +511,7 @@ define i1 @eq_and_shl_two(i8 %x, i8 %y) {
define i1 @slt_and_shl_one(i8 %x, i8 %y) {
; CHECK-LABEL: @slt_and_shl_one(
; CHECK-NEXT: [[POW2:%.*]] = shl nuw i8 1, [[Y:%.*]]
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[POW2]], [[X:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[POW2]]
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[AND]], [[POW2]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -523,8 +523,8 @@ define i1 @slt_and_shl_one(i8 %x, i8 %y) {
define i1 @fold_eq_lhs(i8 %x, i8 %y) {
; CHECK-LABEL: @fold_eq_lhs(
-; CHECK-NEXT: [[AND:%.*]] = lshr i8 [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP1]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%shl = shl i8 -1, %x
@@ -564,8 +564,8 @@ define i1 @fold_eq_lhs_fail_multiuse_shl(i8 %x, i8 %y) {
define i1 @fold_ne_rhs(i8 %x, i8 %yy) {
; CHECK-LABEL: @fold_ne_rhs(
; CHECK-NEXT: [[Y:%.*]] = xor i8 [[YY:%.*]], 123
-; CHECK-NEXT: [[AND:%.*]] = lshr i8 [[Y]], [[X:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[AND]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 [[Y]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[TMP1]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%y = xor i8 %yy, 123
diff --git a/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll b/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
index a595ddb07db56..76f8c926e9bec 100644
--- a/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
@@ -102,7 +102,7 @@ define i1 @test60_addrspacecast_larger(ptr addrspace(1) %foo, i32 %i, i16 %j) {
; CHECK-LABEL: @test60_addrspacecast_larger(
; CHECK-NEXT: [[I_TR:%.*]] = trunc i32 [[I:%.*]] to i16
; CHECK-NEXT: [[TMP1:%.*]] = shl i16 [[I_TR]], 2
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i16 [[TMP1]], [[J:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i16 [[J:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%bit = addrspacecast ptr addrspace(1) %foo to ptr addrspace(2)
diff --git a/llvm/test/Transforms/InstCombine/icmp-equality-rotate.ll b/llvm/test/Transforms/InstCombine/icmp-equality-rotate.ll
index 30c97a7f25275..154958b0e3fad 100644
--- a/llvm/test/Transforms/InstCombine/icmp-equality-rotate.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-equality-rotate.ll
@@ -7,7 +7,7 @@ declare void @use.i8(i8)
define i1 @cmpeq_rorr_to_rorl(i8 %x, i8 %C) {
; CHECK-LABEL: @cmpeq_rorr_to_rorl(
; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[C:%.*]])
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP1]], [[X]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%x_rorr = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 %C)
@@ -65,7 +65,7 @@ define i1 @cmpne_rorr_rorr(i8 %x, i8 %C0, i8 %C1) {
; CHECK-LABEL: @cmpne_rorr_rorr(
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 [[C0:%.*]], [[C1:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[TMP1]])
-; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[TMP2]], [[X]]
+; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[X]], [[TMP2]]
; CHECK-NEXT: ret i1 [[R]]
;
%x_rorr0 = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 %C0)
@@ -78,7 +78,7 @@ define i1 @cmpne_rorrX_rorrY(i8 %x, i8 %y, i8 %C0, i8 %C1) {
; CHECK-LABEL: @cmpne_rorrX_rorrY(
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 [[C0:%.*]], [[C1:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.fshr.i8(i8 [[X:%.*]], i8 [[X]], i8 [[TMP1]])
-; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: ret i1 [[R]]
;
%x_rorr0 = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 %C0)
@@ -135,7 +135,7 @@ define i1 @cmpeq_rorlXC_rorlYC_multiuse1(i8 %x, i8 %y) {
; CHECK-NEXT: [[Y_RORL1:%.*]] = call i8 @llvm.fshl.i8(i8 [[Y:%.*]], i8 [[Y]], i8 3)
; CHECK-NEXT: call void @use.i8(i8 [[Y_RORL1]])
; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 3)
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Y]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%x_rorl0 = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 6)
diff --git a/llvm/test/Transforms/InstCombine/icmp-equality-xor.ll b/llvm/test/Transforms/InstCombine/icmp-equality-xor.ll
index f5d5ef32c81e8..6c3190d6bb7e8 100644
--- a/llvm/test/Transforms/InstCombine/icmp-equality-xor.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-equality-xor.ll
@@ -84,7 +84,7 @@ define i1 @cmpeq_xor_cst1_multiuse(i32 %a, i32 %b) {
define i1 @cmpeq_xor_cst1_commuted(i32 %a, i32 %b) {
; CHECK-LABEL: @cmpeq_xor_cst1_commuted(
; CHECK-NEXT: [[B2:%.*]] = mul i32 [[B:%.*]], [[B]]
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B2]], [[A:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], [[B2]]
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], 10
; CHECK-NEXT: ret i1 [[CMP]]
;
diff --git a/llvm/test/Transforms/InstCombine/icmp-ext-ext.ll b/llvm/test/Transforms/InstCombine/icmp-ext-ext.ll
index 7fc42c65d758b..1f012d82bc23f 100644
--- a/llvm/test/Transforms/InstCombine/icmp-ext-ext.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-ext-ext.ll
@@ -39,7 +39,7 @@ define i1 @zext_zext_eq(i8 %x, i8 %y) {
define i1 @zext_zext_sle_op0_narrow(i8 %x, i16 %y) {
; CHECK-LABEL: @zext_zext_sle_op0_narrow(
; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[X:%.*]] to i16
-; CHECK-NEXT: [[C:%.*]] = icmp ule i16 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp uge i16 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%a = zext i8 %x to i32
@@ -51,7 +51,7 @@ define i1 @zext_zext_sle_op0_narrow(i8 %x, i16 %y) {
define i1 @zext_zext_ule_op0_wide(i9 %x, i8 %y) {
; CHECK-LABEL: @zext_zext_ule_op0_wide(
; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i9
-; CHECK-NEXT: [[C:%.*]] = icmp uge i9 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ule i9 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%a = zext i9 %x to i32
@@ -96,7 +96,7 @@ define i1 @sext_sext_ne(i8 %x, i8 %y) {
define i1 @sext_sext_sge_op0_narrow(i5 %x, i8 %y) {
; CHECK-LABEL: @sext_sext_sge_op0_narrow(
; CHECK-NEXT: [[TMP1:%.*]] = sext i5 [[X:%.*]] to i8
-; CHECK-NEXT: [[C:%.*]] = icmp sge i8 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp sle i8 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%a = sext i5 %x to i32
@@ -108,7 +108,7 @@ define i1 @sext_sext_sge_op0_narrow(i5 %x, i8 %y) {
define <2 x i1> @sext_sext_uge_op0_wide(<2 x i16> %x, <2 x i8> %y) {
; CHECK-LABEL: @sext_sext_uge_op0_wide(
; CHECK-NEXT: [[TMP1:%.*]] = sext <2 x i8> [[Y:%.*]] to <2 x i16>
-; CHECK-NEXT: [[C:%.*]] = icmp ule <2 x i16> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp uge <2 x i16> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i1> [[C]]
;
%a = sext <2 x i16> %x to <2 x i32>
@@ -208,7 +208,7 @@ define i1 @zext_sext_sle_op0_narrow(i8 %x, i16 %y) {
define i1 @zext_nneg_sext_sle_op0_narrow(i8 %x, i16 %y) {
; CHECK-LABEL: @zext_nneg_sext_sle_op0_narrow(
; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[X:%.*]] to i16
-; CHECK-NEXT: [[C:%.*]] = icmp sle i16 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp sge i16 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%a = zext nneg i8 %x to i32
@@ -233,7 +233,7 @@ define i1 @zext_sext_ule_op0_wide(i9 %x, i8 %y) {
define i1 @zext_nneg_sext_ule_op0_wide(i9 %x, i8 %y) {
; CHECK-LABEL: @zext_nneg_sext_ule_op0_wide(
; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[Y:%.*]] to i9
-; CHECK-NEXT: [[C:%.*]] = icmp uge i9 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ule i9 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%a = zext nneg i9 %x to i32
@@ -333,7 +333,7 @@ define i1 @sext_zext_sge_op0_narrow(i5 %x, i8 %y) {
define i1 @sext_zext_nneg_sge_op0_narrow(i5 %x, i8 %y) {
; CHECK-LABEL: @sext_zext_nneg_sge_op0_narrow(
; CHECK-NEXT: [[TMP1:%.*]] = sext i5 [[X:%.*]] to i8
-; CHECK-NEXT: [[C:%.*]] = icmp sge i8 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp sle i8 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%a = sext i5 %x to i32
@@ -359,7 +359,7 @@ define i1 @sext_zext_uge_op0_wide(i16 %x, i8 %y) {
define i1 @sext_zext_nneg_uge_op0_wide(i16 %x, i8 %y) {
; CHECK-LABEL: @sext_zext_nneg_uge_op0_wide(
; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[Y:%.*]] to i16
-; CHECK-NEXT: [[C:%.*]] = icmp ule i16 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp uge i16 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%a = sext i16 %x to i32
@@ -411,7 +411,7 @@ define i1 @zext_sext_sle_known_nonneg_op0_narrow(i8 %x, i16 %y) {
; CHECK-LABEL: @zext_sext_sle_known_nonneg_op0_narrow(
; CHECK-NEXT: [[N:%.*]] = and i8 [[X:%.*]], 12
; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i8 [[N]] to i16
-; CHECK-NEXT: [[C:%.*]] = icmp sle i16 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp sge i16 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%n = and i8 %x, 12
@@ -438,7 +438,7 @@ define i1 @zext_sext_ule_known_nonneg_op0_wide(i9 %x, i8 %y) {
define i1 @sext_zext_slt_known_nonneg(i8 %x, i8 %y) {
; CHECK-LABEL: @sext_zext_slt_known_nonneg(
; CHECK-NEXT: [[N:%.*]] = and i8 [[Y:%.*]], 126
-; CHECK-NEXT: [[C:%.*]] = icmp sgt i8 [[N]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp slt i8 [[X:%.*]], [[N]]
; CHECK-NEXT: ret i1 [[C]]
;
%a = sext i8 %x to i32
@@ -451,7 +451,7 @@ define i1 @sext_zext_slt_known_nonneg(i8 %x, i8 %y) {
define i1 @sext_zext_ult_known_nonneg(i8 %x, i8 %y) {
; CHECK-LABEL: @sext_zext_ult_known_nonneg(
; CHECK-NEXT: [[N:%.*]] = lshr i8 [[Y:%.*]], 6
-; CHECK-NEXT: [[C:%.*]] = icmp ugt i8 [[N]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ult i8 [[X:%.*]], [[N]]
; CHECK-NEXT: ret i1 [[C]]
;
%a = sext i8 %x to i32
@@ -464,7 +464,7 @@ define i1 @sext_zext_ult_known_nonneg(i8 %x, i8 %y) {
define i1 @sext_zext_ne_known_nonneg(i8 %x, i8 %y) {
; CHECK-LABEL: @sext_zext_ne_known_nonneg(
; CHECK-NEXT: [[N:%.*]] = udiv i8 [[Y:%.*]], 6
-; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[N]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[X:%.*]], [[N]]
; CHECK-NEXT: ret i1 [[C]]
;
%a = sext i8 %x to i32
@@ -492,7 +492,7 @@ define i1 @sext_zext_uge_known_nonneg_op0_wide(i16 %x, i8 %y) {
; CHECK-LABEL: @sext_zext_uge_known_nonneg_op0_wide(
; CHECK-NEXT: [[N:%.*]] = and i8 [[Y:%.*]], 12
; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i8 [[N]] to i16
-; CHECK-NEXT: [[C:%.*]] = icmp ule i16 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp uge i16 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%a = sext i16 %x to i32
diff --git a/llvm/test/Transforms/InstCombine/icmp-gep.ll b/llvm/test/Transforms/InstCombine/icmp-gep.ll
index 29d0c941ac5c8..14548bde96f05 100644
--- a/llvm/test/Transforms/InstCombine/icmp-gep.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-gep.ll
@@ -329,7 +329,7 @@ define i1 @test60_as1(ptr addrspace(1) %foo, i64 %i, i64 %j) {
define i1 @test60_addrspacecast(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60_addrspacecast(
; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i64 [[I:%.*]], 2
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[GEP1_IDX]], [[J:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[J:%.*]], [[GEP1_IDX]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%bit = addrspacecast ptr %foo to ptr addrspace(3)
@@ -359,7 +359,7 @@ define i1 @test60_addrspacecast_larger(ptr addrspace(1) %foo, i32 %i, i16 %j) {
; CHECK-LABEL: @test60_addrspacecast_larger(
; CHECK-NEXT: [[I_TR:%.*]] = trunc i32 [[I:%.*]] to i16
; CHECK-NEXT: [[TMP1:%.*]] = shl i16 [[I_TR]], 2
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i16 [[TMP1]], [[J:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i16 [[J:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%bit = addrspacecast ptr addrspace(1) %foo to ptr addrspace(2)
@@ -515,10 +515,10 @@ define i1 @test_scalable_xy(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test_scalable_xy(
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP1]], 4
-; CHECK-NEXT: [[GEP1_IDX:%.*]] = mul nsw i64 [[TMP2]], [[I:%.*]]
+; CHECK-NEXT: [[GEP1_IDX:%.*]] = mul nsw i64 [[I:%.*]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[TMP3]], 2
-; CHECK-NEXT: [[GEP2_IDX:%.*]] = mul nsw i64 [[TMP4]], [[J:%.*]]
+; CHECK-NEXT: [[GEP2_IDX:%.*]] = mul nsw i64 [[J:%.*]], [[TMP4]]
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[GEP2_IDX]], [[GEP1_IDX]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -534,10 +534,10 @@ define i1 @test_scalable_ij(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test_scalable_ij(
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP1]], 4
-; CHECK-NEXT: [[GEP1_IDX:%.*]] = mul nsw i64 [[TMP2]], [[I:%.*]]
+; CHECK-NEXT: [[GEP1_IDX:%.*]] = mul nsw i64 [[I:%.*]], [[TMP2]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[TMP3]], 2
-; CHECK-NEXT: [[GEP2_IDX:%.*]] = mul nsw i64 [[TMP4]], [[J:%.*]]
+; CHECK-NEXT: [[GEP2_IDX:%.*]] = mul nsw i64 [[J:%.*]], [[TMP4]]
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[GEP1_IDX]], [[GEP2_IDX]]
; CHECK-NEXT: ret i1 [[CMP]]
;
diff --git a/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll b/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll
index aa23a6d27f69b..07536f271ceb1 100644
--- a/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll
@@ -16,7 +16,7 @@ define i32 @sterix(i32, i8, i64) {
; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp ult i64 [[MUL3]], 4294967296
; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[LOR_RHS:%.*]], label [[LOR_END:%.*]]
; CHECK: lor.rhs:
-; CHECK-NEXT: [[AND:%.*]] = and i64 [[MUL3]], [[TMP2]]
+; CHECK-NEXT: [[AND:%.*]] = and i64 [[TMP2]], [[MUL3]]
; CHECK-NEXT: [[TOBOOL7_NOT:%.*]] = icmp eq i64 [[AND]], 0
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TOBOOL7_NOT]] to i32
; CHECK-NEXT: br label [[LOR_END]]
@@ -128,12 +128,12 @@ define i1 @PR46561(i1 %a, i1 %x, i1 %y, i8 %z) {
; CHECK-NEXT: br i1 [[A:%.*]], label [[COND_TRUE:%.*]], label [[END:%.*]]
; CHECK: cond.true:
; CHECK-NEXT: [[MULBOOL:%.*]] = and i1 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i8 [[Z:%.*]] to i1
-; CHECK-NEXT: [[TMP2:%.*]] = xor i1 [[MULBOOL]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
+; CHECK-NEXT: [[TMP0:%.*]] = trunc i8 [[Z:%.*]] to i1
+; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[MULBOOL]], [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = xor i1 [[TMP1]], true
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[P:%.*]] = phi i1 [ [[TMP3]], [[COND_TRUE]] ], [ false, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[P:%.*]] = phi i1 [ [[TMP2]], [[COND_TRUE]] ], [ false, [[ENTRY:%.*]] ]
; CHECK-NEXT: ret i1 [[P]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/icmp-mul.ll b/llvm/test/Transforms/InstCombine/icmp-mul.ll
index 7f76a94f395b6..999ab66bdaab1 100644
--- a/llvm/test/Transforms/InstCombine/icmp-mul.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-mul.ll
@@ -1110,7 +1110,7 @@ define i1 @mul_xy_z_assumeodd_eq(i8 %x, i8 %y, i8 %z) {
define <2 x i1> @reused_mul_nsw_xy_z_setnonzero_vec_ne(<2 x i8> %x, <2 x i8> %y, <2 x i8> %zi) {
; CHECK-LABEL: @reused_mul_nsw_xy_z_setnonzero_vec_ne(
; CHECK-NEXT: [[Z:%.*]] = or <2 x i8> [[ZI:%.*]], <i8 4, i8 4>
-; CHECK-NEXT: [[MULY:%.*]] = mul nsw <2 x i8> [[Z]], [[Y:%.*]]
+; CHECK-NEXT: [[MULY:%.*]] = mul nsw <2 x i8> [[Y:%.*]], [[Z]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i8> [[Y]], [[X:%.*]]
; CHECK-NEXT: call void @usev2xi8(<2 x i8> [[MULY]])
; CHECK-NEXT: ret <2 x i1> [[CMP]]
@@ -1126,8 +1126,8 @@ define <2 x i1> @reused_mul_nsw_xy_z_setnonzero_vec_ne(<2 x i8> %x, <2 x i8> %y,
define i1 @mul_mixed_nuw_nsw_xy_z_setodd_ult(i8 %x, i8 %y, i8 %zi) {
; CHECK-LABEL: @mul_mixed_nuw_nsw_xy_z_setodd_ult(
; CHECK-NEXT: [[Z:%.*]] = or i8 [[ZI:%.*]], 1
-; CHECK-NEXT: [[MULX:%.*]] = mul nsw i8 [[Z]], [[X:%.*]]
-; CHECK-NEXT: [[MULY:%.*]] = mul nuw nsw i8 [[Z]], [[Y:%.*]]
+; CHECK-NEXT: [[MULX:%.*]] = mul nsw i8 [[X:%.*]], [[Z]]
+; CHECK-NEXT: [[MULY:%.*]] = mul nuw nsw i8 [[Y:%.*]], [[Z]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[MULX]], [[MULY]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -1211,7 +1211,7 @@ define i1 @reused_mul_nuw_xy_z_selectnonzero_ugt(i8 %x, i8 %y, i8 %z) {
define <2 x i1> @mul_mixed_nsw_nuw_xy_z_setnonzero_vec_ule(<2 x i8> %x, <2 x i8> %y, <2 x i8> %zi) {
; CHECK-LABEL: @mul_mixed_nsw_nuw_xy_z_setnonzero_vec_ule(
; CHECK-NEXT: [[Z:%.*]] = or <2 x i8> [[ZI:%.*]], <i8 1, i8 3>
-; CHECK-NEXT: [[MULX:%.*]] = mul nuw <2 x i8> [[Z]], [[X:%.*]]
+; CHECK-NEXT: [[MULX:%.*]] = mul nuw <2 x i8> [[X:%.*]], [[Z]]
; CHECK-NEXT: [[MULY:%.*]] = mul nsw <2 x i8> [[Z]], [[Y:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ule <2 x i8> [[MULY]], [[MULX]]
; CHECK-NEXT: ret <2 x i1> [[CMP]]
diff --git a/llvm/test/Transforms/InstCombine/icmp-of-and-x.ll b/llvm/test/Transforms/InstCombine/icmp-of-and-x.ll
index e95c72b75f97d..e7c0fcce8e778 100644
--- a/llvm/test/Transforms/InstCombine/icmp-of-and-x.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-of-and-x.ll
@@ -239,8 +239,8 @@ define i1 @icmp_sle_negx_y_fail_maybe_zero(i8 %x, i8 %y) {
define i1 @icmp_eq_x_invertable_y_todo(i8 %x, i1 %y) {
; CHECK-LABEL: @icmp_eq_x_invertable_y_todo(
; CHECK-NEXT: [[YY:%.*]] = select i1 [[Y:%.*]], i8 7, i8 24
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[YY]], [[X:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], [[X]]
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[YY]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X]], [[AND]]
; CHECK-NEXT: ret i1 [[R]]
;
%yy = select i1 %y, i8 7, i8 24
@@ -252,8 +252,8 @@ define i1 @icmp_eq_x_invertable_y_todo(i8 %x, i1 %y) {
define i1 @icmp_eq_x_invertable_y(i8 %x, i8 %y) {
; CHECK-LABEL: @icmp_eq_x_invertable_y(
; CHECK-NEXT: [[YY:%.*]] = xor i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[YY]], [[X:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], [[X]]
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[YY]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X]], [[AND]]
; CHECK-NEXT: ret i1 [[R]]
;
%yy = xor i8 %y, -1
@@ -265,7 +265,7 @@ define i1 @icmp_eq_x_invertable_y(i8 %x, i8 %y) {
define i1 @icmp_eq_x_invertable_y2_todo(i8 %x, i1 %y) {
; CHECK-LABEL: @icmp_eq_x_invertable_y2_todo(
; CHECK-NEXT: [[YY:%.*]] = select i1 [[Y:%.*]], i8 7, i8 24
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[YY]], [[X:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[YY]]
; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[YY]], [[AND]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -278,7 +278,7 @@ define i1 @icmp_eq_x_invertable_y2_todo(i8 %x, i1 %y) {
define i1 @icmp_eq_x_invertable_y2(i8 %x, i8 %y) {
; CHECK-LABEL: @icmp_eq_x_invertable_y2(
; CHECK-NEXT: [[YY:%.*]] = xor i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[YY]], [[X:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[YY]]
; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], [[YY]]
; CHECK-NEXT: ret i1 [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/icmp-of-or-x.ll b/llvm/test/Transforms/InstCombine/icmp-of-or-x.ll
index 26f53cb4807ef..a3dccde1a9ebc 100644
--- a/llvm/test/Transforms/InstCombine/icmp-of-or-x.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-of-or-x.ll
@@ -95,7 +95,7 @@ define i1 @or_eq_notY_eq_0(i8 %x, i8 %y) {
define i1 @or_eq_notY_eq_0_fail_multiuse(i8 %x, i8 %y) {
; CHECK-LABEL: @or_eq_notY_eq_0_fail_multiuse(
; CHECK-NEXT: [[NY:%.*]] = xor i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i8 [[NY]], [[X:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i8 [[X:%.*]], [[NY]]
; CHECK-NEXT: call void @use.i8(i8 [[OR]])
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[OR]], [[NY]]
; CHECK-NEXT: ret i1 [[CMP]]
@@ -122,7 +122,7 @@ define i1 @or_ne_notY_eq_1s(i8 %x, i8 %y) {
define i1 @or_ne_notY_eq_1s_fail_bad_not(i8 %x, i8 %y) {
; CHECK-LABEL: @or_ne_notY_eq_1s_fail_bad_not(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[TMP2]], -1
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -307,7 +307,7 @@ define i1 @or_simplify_uge(i8 %y_in, i8 %rhs_in, i1 %c) {
define i1 @or_simplify_ule_fail(i8 %y_in, i8 %rhs_in) {
; CHECK-LABEL: @or_simplify_ule_fail(
; CHECK-NEXT: [[RHS:%.*]] = and i8 [[RHS_IN:%.*]], 127
-; CHECK-NEXT: [[Y:%.*]] = or i8 [[RHS]], [[Y_IN:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = or i8 [[Y_IN:%.*]], [[RHS]]
; CHECK-NEXT: [[LBO:%.*]] = or i8 [[Y]], 64
; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[LBO]], [[RHS]]
; CHECK-NEXT: ret i1 [[R]]
@@ -352,7 +352,7 @@ define i1 @or_simplify_ult(i8 %y_in, i8 %rhs_in) {
define i1 @or_simplify_ugt_fail(i8 %y_in, i8 %rhs_in) {
; CHECK-LABEL: @or_simplify_ugt_fail(
; CHECK-NEXT: [[RHS:%.*]] = or i8 [[RHS_IN:%.*]], 1
-; CHECK-NEXT: [[LBO:%.*]] = or i8 [[RHS]], [[Y_IN:%.*]]
+; CHECK-NEXT: [[LBO:%.*]] = or i8 [[Y_IN:%.*]], [[RHS]]
; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[LBO]], [[RHS]]
; CHECK-NEXT: ret i1 [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll b/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll
index a1757fbb84b23..2b3eb185d844b 100644
--- a/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll
@@ -50,7 +50,7 @@ define i1 @icmp_trunc_x_trunc_y_illegal_trunc_to_legal_anyways(i123 %x, i32 %y)
; CHECK-NEXT: call void @llvm.assume(i1 [[X_LB_ONLY]])
; CHECK-NEXT: call void @llvm.assume(i1 [[Y_LB_ONLY]])
; CHECK-NEXT: [[TMP1:%.*]] = trunc nuw nsw i123 [[X]] to i32
-; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[Y]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%x_lb_only = icmp ult i123 %x, 65536
@@ -70,7 +70,7 @@ define i1 @icmp_trunc_x_trunc_y_2_illegal_anyways(i33 %x, i63 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[X_LB_ONLY]])
; CHECK-NEXT: call void @llvm.assume(i1 [[Y_LB_ONLY]])
; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i33 [[X]] to i63
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i63 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[R:%.*]] = icmp ult i63 [[Y]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%x_lb_only = icmp ult i33 %x, 512
@@ -90,7 +90,7 @@ define i1 @icmp_trunc_x_trunc_y_3(i64 %x, i32 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[X_LB_ONLY]])
; CHECK-NEXT: call void @llvm.assume(i1 [[Y_LB_ONLY]])
; CHECK-NEXT: [[TMP1:%.*]] = trunc nuw nsw i64 [[X]] to i32
-; CHECK-NEXT: [[R:%.*]] = icmp uge i32 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i32 [[Y]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%x_lb_only = icmp ult i64 %x, 123
@@ -152,7 +152,7 @@ define i1 @icmp_trunc_x_trunc_y_swap0(i33 %x, i32 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[X_LB_ONLY]])
; CHECK-NEXT: call void @llvm.assume(i1 [[Y_LB_ONLY]])
; CHECK-NEXT: [[TMP1:%.*]] = trunc nuw nsw i33 [[X]] to i32
-; CHECK-NEXT: [[R:%.*]] = icmp ule i32 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[R:%.*]] = icmp uge i32 [[Y]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%x_lb_only = icmp ult i33 %x, 65536
@@ -172,7 +172,7 @@ define i1 @icmp_trunc_x_trunc_y_swap1(i33 %x, i32 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[X_LB_ONLY]])
; CHECK-NEXT: call void @llvm.assume(i1 [[Y_LB_ONLY]])
; CHECK-NEXT: [[TMP1:%.*]] = trunc nuw nsw i33 [[X]] to i32
-; CHECK-NEXT: [[R:%.*]] = icmp uge i32 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i32 [[Y]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%x_lb_only = icmp ult i33 %x, 65536
@@ -190,7 +190,7 @@ define i1 @icmp_trunc_x_zext_y(i32 %x, i8 %y) {
; CHECK-NEXT: [[X_LB_ONLY:%.*]] = icmp ult i32 [[X:%.*]], 65536
; CHECK-NEXT: call void @llvm.assume(i1 [[X_LB_ONLY]])
; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i32
-; CHECK-NEXT: [[R:%.*]] = icmp ult i32 [[TMP1]], [[X]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i32 [[X]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%x_lb_only = icmp ult i32 %x, 65536
@@ -206,7 +206,7 @@ define i1 @icmp_trunc_x_zext_y_2(i32 %x, i8 %y) {
; CHECK-NEXT: [[X_LB_ONLY:%.*]] = icmp ult i32 [[X:%.*]], 65536
; CHECK-NEXT: call void @llvm.assume(i1 [[X_LB_ONLY]])
; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i32
-; CHECK-NEXT: [[R:%.*]] = icmp uge i32 [[TMP1]], [[X]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i32 [[X]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%x_lb_only = icmp ult i32 %x, 65536
@@ -222,7 +222,7 @@ define i1 @icmp_trunc_x_zext_y_3(i6 %x, i32 %y) {
; CHECK-NEXT: [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 65536
; CHECK-NEXT: call void @llvm.assume(i1 [[Y_LB_ONLY]])
; CHECK-NEXT: [[TMP1:%.*]] = zext i6 [[X:%.*]] to i32
-; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[Y]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%y_lb_only = icmp ult i32 %y, 65536
@@ -412,7 +412,7 @@ define i1 @trunc_equality_either(i16 %x, i16 %y) {
define i1 @trunc_unsigned_nuw_zext(i32 %x, i8 %y) {
; CHECK-LABEL: @trunc_unsigned_nuw_zext(
; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i32
-; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%xt = trunc nuw i32 %x to i16
@@ -437,7 +437,7 @@ define i1 @trunc_unsigned_nuw_sext(i32 %x, i8 %y) {
define i1 @trunc_unsigned_nsw_zext(i32 %x, i8 %y) {
; CHECK-LABEL: @trunc_unsigned_nsw_zext(
; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i32
-; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%xt = trunc nsw i32 %x to i16
@@ -449,7 +449,7 @@ define i1 @trunc_unsigned_nsw_zext(i32 %x, i8 %y) {
define i1 @trunc_unsigned_nsw_sext(i32 %x, i8 %y) {
; CHECK-LABEL: @trunc_unsigned_nsw_sext(
; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[Y:%.*]] to i32
-; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%xt = trunc nsw i32 %x to i16
@@ -461,7 +461,7 @@ define i1 @trunc_unsigned_nsw_sext(i32 %x, i8 %y) {
define i1 @trunc_signed_nsw_sext(i32 %x, i8 %y) {
; CHECK-LABEL: @trunc_signed_nsw_sext(
; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[Y:%.*]] to i32
-; CHECK-NEXT: [[C:%.*]] = icmp sgt i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp slt i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%xt = trunc nsw i32 %x to i16
@@ -473,7 +473,7 @@ define i1 @trunc_signed_nsw_sext(i32 %x, i8 %y) {
define i1 @trunc_signed_nsw_zext(i32 %x, i8 %y) {
; CHECK-LABEL: @trunc_signed_nsw_zext(
; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i32
-; CHECK-NEXT: [[C:%.*]] = icmp sgt i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp slt i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%xt = trunc nsw i32 %x to i16
@@ -511,7 +511,7 @@ define i1 @trunc_signed_nuw_zext(i32 %x, i8 %y) {
define i1 @trunc_equality_nuw_zext(i32 %x, i8 %y) {
; CHECK-LABEL: @trunc_equality_nuw_zext(
; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i32
-; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%xt = trunc nuw i32 %x to i16
@@ -536,7 +536,7 @@ define i1 @trunc_equality_nuw_sext(i32 %x, i8 %y) {
define i1 @trunc_equality_nsw_zext(i32 %x, i8 %y) {
; CHECK-LABEL: @trunc_equality_nsw_zext(
; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i32
-; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%xt = trunc nsw i32 %x to i16
@@ -548,7 +548,7 @@ define i1 @trunc_equality_nsw_zext(i32 %x, i8 %y) {
define i1 @trunc_equality_nsw_sext(i32 %x, i8 %y) {
; CHECK-LABEL: @trunc_equality_nsw_sext(
; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[Y:%.*]] to i32
-; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%xt = trunc nsw i32 %x to i16
@@ -560,7 +560,7 @@ define i1 @trunc_equality_nsw_sext(i32 %x, i8 %y) {
define i1 @trunc_equality_both_sext(i32 %x, i8 %y) {
; CHECK-LABEL: @trunc_equality_both_sext(
; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[Y:%.*]] to i32
-; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%xt = trunc nuw nsw i32 %x to i16
@@ -572,7 +572,7 @@ define i1 @trunc_equality_both_sext(i32 %x, i8 %y) {
define i1 @test_eq1(i32 %x, i16 %y) {
; CHECK-LABEL: @test_eq1(
; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[Y:%.*]] to i32
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[COND]]
;
%conv1 = trunc nsw i32 %x to i8
@@ -586,7 +586,7 @@ define i1 @test_eq1(i32 %x, i16 %y) {
define i1 @test_eq2(i32 %x, i16 %y) {
; CHECK-LABEL: @test_eq2(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X:%.*]] to i16
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i16 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = icmp eq i16 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[COND]]
;
%conv1 = trunc nsw i32 %x to i8
@@ -598,7 +598,7 @@ define i1 @test_eq2(i32 %x, i16 %y) {
define i1 @test_ult(i32 %x, i16 %y) {
; CHECK-LABEL: @test_ult(
; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[Y:%.*]] to i32
-; CHECK-NEXT: [[COND:%.*]] = icmp ugt i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[COND]]
;
%conv1 = trunc nsw i32 %x to i8
@@ -610,7 +610,7 @@ define i1 @test_ult(i32 %x, i16 %y) {
define i1 @test_slt(i32 %x, i16 %y) {
; CHECK-LABEL: @test_slt(
; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[Y:%.*]] to i32
-; CHECK-NEXT: [[COND:%.*]] = icmp sgt i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = icmp slt i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[COND]]
;
%conv1 = trunc nsw i32 %x to i8
@@ -622,7 +622,7 @@ define i1 @test_slt(i32 %x, i16 %y) {
define i1 @test_ult_nuw(i32 %x, i16 %y) {
; CHECK-LABEL: @test_ult_nuw(
; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[Y:%.*]] to i32
-; CHECK-NEXT: [[COND:%.*]] = icmp ugt i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[COND]]
;
%conv1 = trunc nuw nsw i32 %x to i8
@@ -634,7 +634,7 @@ define i1 @test_ult_nuw(i32 %x, i16 %y) {
define i1 @test_slt_nuw(i32 %x, i16 %y) {
; CHECK-LABEL: @test_slt_nuw(
; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[Y:%.*]] to i32
-; CHECK-NEXT: [[COND:%.*]] = icmp sgt i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = icmp slt i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[COND]]
;
%conv1 = trunc nuw nsw i32 %x to i8
diff --git a/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
index fd61c8a301662..a4e7acbca930d 100644
--- a/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
@@ -10,7 +10,7 @@ define i1 @test_xor1(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @test_xor1(
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1
; CHECK-NEXT: call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]]
; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -27,7 +27,7 @@ define i1 @test_xor2(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @test_xor2(
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y]], [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], [[Y]]
; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -44,7 +44,7 @@ define i1 @test_xor3(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @test_xor3(
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1
; CHECK-NEXT: call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]]
; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -60,7 +60,7 @@ define i1 @test_xor3(i8 %x, i8 %y, i8 %z) {
define i1 @test_xor_ne(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @test_xor_ne(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[Z:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%nz = xor i8 %z, -1
@@ -73,7 +73,7 @@ define i1 @test_xor_ne(i8 %x, i8 %y, i8 %z) {
define i1 @test_xor_eq(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @test_xor_eq(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Z:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%nz = xor i8 %z, -1
@@ -88,7 +88,7 @@ define i1 @test_xor4(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @test_xor4(
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1
; CHECK-NEXT: call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]]
; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -104,7 +104,7 @@ define i1 @test_xor5(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @test_xor5(
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1
; CHECK-NEXT: call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]]
; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -120,7 +120,7 @@ define i1 @test_xor6(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @test_xor6(
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1
; CHECK-NEXT: call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]]
; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -136,7 +136,7 @@ define i1 @test_xor7(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @test_xor7(
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1
; CHECK-NEXT: call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]]
; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -152,7 +152,7 @@ define i1 @test_xor8(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @test_xor8(
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1
; CHECK-NEXT: call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]]
; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -167,7 +167,7 @@ define i1 @test_xor8(i8 %x, i8 %y, i8 %z) {
; test (~a ^ b) < ~a
define i1 @test_slt_xor(i32 %x, i32 %y) {
; CHECK-LABEL: @test_slt_xor(
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[R:%.*]] = icmp sgt i32 [[TMP1]], [[X]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -180,7 +180,7 @@ define i1 @test_slt_xor(i32 %x, i32 %y) {
; test (a ^ ~b) <= ~b
define i1 @test_sle_xor(i32 %x, i32 %y) {
; CHECK-LABEL: @test_sle_xor(
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[R:%.*]] = icmp sge i32 [[TMP1]], [[Y]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -193,7 +193,7 @@ define i1 @test_sle_xor(i32 %x, i32 %y) {
; test ~a > (~a ^ b)
define i1 @test_sgt_xor(i32 %x, i32 %y) {
; CHECK-LABEL: @test_sgt_xor(
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], [[X]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -205,7 +205,7 @@ define i1 @test_sgt_xor(i32 %x, i32 %y) {
define i1 @test_sge_xor(i32 %x, i32 %y) {
; CHECK-LABEL: @test_sge_xor(
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[X]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -217,7 +217,7 @@ define i1 @test_sge_xor(i32 %x, i32 %y) {
define i1 @test_ult_xor(i32 %x, i32 %y) {
; CHECK-LABEL: @test_ult_xor(
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP1]], [[X]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -229,7 +229,7 @@ define i1 @test_ult_xor(i32 %x, i32 %y) {
define i1 @test_ule_xor(i32 %x, i32 %y) {
; CHECK-LABEL: @test_ule_xor(
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp uge i32 [[TMP1]], [[X]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -241,7 +241,7 @@ define i1 @test_ule_xor(i32 %x, i32 %y) {
define i1 @test_ugt_xor(i32 %x, i32 %y) {
; CHECK-LABEL: @test_ugt_xor(
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], [[X]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -253,7 +253,7 @@ define i1 @test_ugt_xor(i32 %x, i32 %y) {
define i1 @test_uge_xor(i32 %x, i32 %y) {
; CHECK-LABEL: @test_uge_xor(
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ule i32 [[TMP1]], [[X]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -386,7 +386,7 @@ define <2 x i1> @xor_sgt(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @xor_sgt(
; CHECK-NEXT: [[YZ:%.*]] = and <2 x i8> [[Y:%.*]], <i8 31, i8 31>
; CHECK-NEXT: [[Y1:%.*]] = or disjoint <2 x i8> [[YZ]], <i8 64, i8 64>
-; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i8> [[Y1]], [[X:%.*]]
+; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i8> [[X:%.*]], [[Y1]]
; CHECK-NEXT: [[R:%.*]] = icmp sgt <2 x i8> [[XOR]], [[X]]
; CHECK-NEXT: ret <2 x i1> [[R]]
;
@@ -401,7 +401,7 @@ define <2 x i1> @xor_sgt_fail_no_known_msb(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @xor_sgt_fail_no_known_msb(
; CHECK-NEXT: [[YZ:%.*]] = and <2 x i8> [[Y:%.*]], <i8 55, i8 55>
; CHECK-NEXT: [[Y1:%.*]] = or disjoint <2 x i8> [[YZ]], <i8 8, i8 8>
-; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i8> [[Y1]], [[X:%.*]]
+; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i8> [[X:%.*]], [[Y1]]
; CHECK-NEXT: [[R:%.*]] = icmp sgt <2 x i8> [[XOR]], [[X]]
; CHECK-NEXT: ret <2 x i1> [[R]]
;
@@ -415,7 +415,7 @@ define <2 x i1> @xor_sgt_fail_no_known_msb(<2 x i8> %x, <2 x i8> %y) {
define i1 @xor_slt_2(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @xor_slt_2(
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], 88
-; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[XOR]], [[X]]
+; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[X]], [[XOR]]
; CHECK-NEXT: ret i1 [[R]]
;
%xor = xor i8 %x, 88
diff --git a/llvm/test/Transforms/InstCombine/icmp-or.ll b/llvm/test/Transforms/InstCombine/icmp-or.ll
index 1f9db5e5db9aa..0fdcaf8704674 100644
--- a/llvm/test/Transforms/InstCombine/icmp-or.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-or.ll
@@ -172,7 +172,7 @@ define i1 @eq_const_mask_not_same(i8 %x, i8 %y) {
define i1 @eq_const_mask_wrong_opcode(i8 %x, i8 %y) {
; CHECK-LABEL: @eq_const_mask_wrong_opcode(
; CHECK-NEXT: [[B0:%.*]] = or i8 [[X:%.*]], 5
-; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[B0]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[B0]]
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[TMP1]], 5
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -955,7 +955,7 @@ define i1 @icmp_or_xor_with_sub_3_6(i64 %x1, i64 %y1, i64 %x2, i64 %y2, i64 %x3,
define i1 @or_disjoint_with_constants(i8 %x) {
; CHECK-LABEL: @or_disjoint_with_constants(
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[TMP1:%.*]], 18
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[X:%.*]], 18
; CHECK-NEXT: ret i1 [[CMP]]
;
%or = or disjoint i8 %x, 1
@@ -966,8 +966,8 @@ define i1 @or_disjoint_with_constants(i8 %x) {
define i1 @or_disjoint_with_constants2(i8 %x) {
; CHECK-LABEL: @or_disjoint_with_constants2(
-; CHECK-NEXT: [[OR:%.*]] = or disjoint i8 [[TMP1:%.*]], 5
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[TMP1]], 66
+; CHECK-NEXT: [[OR:%.*]] = or disjoint i8 [[X:%.*]], 5
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[X]], 66
; CHECK-NEXT: call void @use(i8 [[OR]])
; CHECK-NEXT: ret i1 [[CMP]]
;
diff --git a/llvm/test/Transforms/InstCombine/icmp-range.ll b/llvm/test/Transforms/InstCombine/icmp-range.ll
index 9ed2f2a4860c6..8b690826a7bf9 100644
--- a/llvm/test/Transforms/InstCombine/icmp-range.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-range.ll
@@ -152,7 +152,7 @@ define i1 @test_two_ranges(ptr nocapture readonly %arg1, ptr nocapture readonly
; Values' ranges overlap each other, so it can not be simplified.
define i1 @test_two_attribute_ranges(i32 range(i32 5, 10) %arg1, i32 range(i32 8, 16) %arg2) {
; CHECK-LABEL: @test_two_attribute_ranges(
-; CHECK-NEXT: [[RVAL:%.*]] = icmp ult i32 [[ARG1:%.*]], [[ARG2:%.*]]
+; CHECK-NEXT: [[RVAL:%.*]] = icmp ult i32 [[ARG2:%.*]], [[ARG1:%.*]]
; CHECK-NEXT: ret i1 [[RVAL]]
;
%rval = icmp ult i32 %arg2, %arg1
@@ -249,7 +249,7 @@ define <2 x i1> @test_two_ranges_vec_true(ptr nocapture readonly %arg1, ptr noca
; Values' ranges overlap each other, so it can not be simplified.
define <2 x i1> @test_two_argument_ranges_vec(<2 x i32> range(i32 5, 10) %arg1, <2 x i32> range(i32 8, 16) %arg2) {
; CHECK-LABEL: @test_two_argument_ranges_vec(
-; CHECK-NEXT: [[RVAL:%.*]] = icmp ult <2 x i32> [[VAL2:%.*]], [[VAL1:%.*]]
+; CHECK-NEXT: [[RVAL:%.*]] = icmp ult <2 x i32> [[ARG2:%.*]], [[ARG1:%.*]]
; CHECK-NEXT: ret <2 x i1> [[RVAL]]
;
%rval = icmp ult <2 x i32> %arg2, %arg1
@@ -281,9 +281,9 @@ declare range(i32 1, 6) i32 @create_range3()
; Values' ranges overlap each other, so it can not be simplified.
define i1 @test_two_return_attribute_ranges_not_simplified() {
; CHECK-LABEL: @test_two_return_attribute_ranges_not_simplified(
-; CHECK-NEXT: [[ARG2:%.*]] = call range(i32 5, 10) i32 @create_range1()
-; CHECK-NEXT: [[ARG1:%.*]] = call i32 @create_range2()
-; CHECK-NEXT: [[RVAL:%.*]] = icmp ult i32 [[ARG1]], [[ARG2]]
+; CHECK-NEXT: [[VAL1:%.*]] = call range(i32 5, 10) i32 @create_range1()
+; CHECK-NEXT: [[VAL2:%.*]] = call i32 @create_range2()
+; CHECK-NEXT: [[RVAL:%.*]] = icmp ult i32 [[VAL2]], [[VAL1]]
; CHECK-NEXT: ret i1 [[RVAL]]
;
%val1 = call range(i32 5, 10) i32 @create_range1()
@@ -296,7 +296,7 @@ define i1 @test_two_return_attribute_ranges_not_simplified() {
define i1 @test_two_return_attribute_ranges_one_in_call() {
; CHECK-LABEL: @test_two_return_attribute_ranges_one_in_call(
; CHECK-NEXT: [[VAL1:%.*]] = call range(i32 1, 6) i32 @create_range1()
-; CHECK-NEXT: [[ARG1:%.*]] = call i32 @create_range2()
+; CHECK-NEXT: [[VAL2:%.*]] = call i32 @create_range2()
; CHECK-NEXT: ret i1 false
;
%val1 = call range(i32 1, 6) i32 @create_range1()
@@ -309,7 +309,7 @@ define i1 @test_two_return_attribute_ranges_one_in_call() {
define i1 @test_two_return_attribute_ranges() {
; CHECK-LABEL: @test_two_return_attribute_ranges(
; CHECK-NEXT: [[VAL1:%.*]] = call i32 @create_range3()
-; CHECK-NEXT: [[ARG1:%.*]] = call i32 @create_range2()
+; CHECK-NEXT: [[VAL2:%.*]] = call i32 @create_range2()
; CHECK-NEXT: ret i1 false
;
%val1 = call i32 @create_range3()
@@ -370,7 +370,7 @@ define <2 x i1> @ult_zext(<2 x i1> %b, <2 x i8> %p) {
define i1 @uge_zext(i1 %b, i8 %x) {
; CHECK-LABEL: @uge_zext(
; CHECK-NEXT: [[Z:%.*]] = zext i1 [[B:%.*]] to i8
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[Z]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[Z]]
; CHECK-NEXT: ret i1 [[R]]
;
%z = zext i1 %b to i8
@@ -399,7 +399,7 @@ define i1 @ugt_zext_use(i1 %b, i8 %x) {
; CHECK-LABEL: @ugt_zext_use(
; CHECK-NEXT: [[Z:%.*]] = zext i1 [[B:%.*]] to i8
; CHECK-NEXT: call void @use(i8 [[Z]])
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[Z]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[X:%.*]], [[Z]]
; CHECK-NEXT: ret i1 [[R]]
;
%z = zext i1 %b to i8
@@ -413,7 +413,7 @@ define i1 @ugt_zext_use(i1 %b, i8 %x) {
define i1 @ult_zext_not_i1(i2 %b, i8 %x) {
; CHECK-LABEL: @ult_zext_not_i1(
; CHECK-NEXT: [[Z:%.*]] = zext i2 [[B:%.*]] to i8
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[Z]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[X:%.*]], [[Z]]
; CHECK-NEXT: ret i1 [[R]]
;
%z = zext i2 %b to i8
@@ -600,7 +600,7 @@ define <2 x i1> @ule_sext(<2 x i1> %b, <2 x i8> %p) {
define i1 @ugt_sext(i1 %b, i8 %x) {
; CHECK-LABEL: @ugt_sext(
; CHECK-NEXT: [[S:%.*]] = sext i1 [[B:%.*]] to i8
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[S]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[X:%.*]], [[S]]
; CHECK-NEXT: ret i1 [[R]]
;
%s = sext i1 %b to i8
@@ -629,7 +629,7 @@ define i1 @uge_sext_use(i1 %b, i8 %x) {
; CHECK-LABEL: @uge_sext_use(
; CHECK-NEXT: [[S:%.*]] = sext i1 [[B:%.*]] to i8
; CHECK-NEXT: call void @use(i8 [[S]])
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[S]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[S]]
; CHECK-NEXT: ret i1 [[R]]
;
%s = sext i1 %b to i8
@@ -643,7 +643,7 @@ define i1 @uge_sext_use(i1 %b, i8 %x) {
define i1 @ule_sext_not_i1(i2 %b, i8 %x) {
; CHECK-LABEL: @ule_sext_not_i1(
; CHECK-NEXT: [[S:%.*]] = sext i2 [[B:%.*]] to i8
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[S]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[S]]
; CHECK-NEXT: ret i1 [[R]]
;
%s = sext i2 %b to i8
@@ -869,7 +869,7 @@ define i1 @zext_sext_add_icmp_i128(i1 %a, i1 %b) {
define i1 @zext_sext_add_icmp_eq_minus1(i1 %a, i1 %b) {
; CHECK-LABEL: @zext_sext_add_icmp_eq_minus1(
; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[A:%.*]], true
-; CHECK-NEXT: [[R:%.*]] = and i1 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[R:%.*]] = and i1 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%zext.a = zext i1 %a to i8
@@ -885,7 +885,7 @@ define i1 @zext_sext_add_icmp_eq_minus1(i1 %a, i1 %b) {
define i1 @zext_sext_add_icmp_ne_minus1(i1 %a, i1 %b) {
; CHECK-LABEL: @zext_sext_add_icmp_ne_minus1(
; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[B:%.*]], true
-; CHECK-NEXT: [[R:%.*]] = or i1 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[R:%.*]] = or i1 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%zext.a = zext i1 %a to i8
@@ -899,8 +899,8 @@ define i1 @zext_sext_add_icmp_ne_minus1(i1 %a, i1 %b) {
define i1 @zext_sext_add_icmp_sgt_minus1(i1 %a, i1 %b) {
; CHECK-LABEL: @zext_sext_add_icmp_sgt_minus1(
-; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[B:%.*]], true
-; CHECK-NEXT: [[R:%.*]] = or i1 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[B_NOT:%.*]] = xor i1 [[B:%.*]], true
+; CHECK-NEXT: [[R:%.*]] = or i1 [[A:%.*]], [[B_NOT]]
; CHECK-NEXT: ret i1 [[R]]
;
%zext.a = zext i1 %a to i8
@@ -915,7 +915,7 @@ define i1 @zext_sext_add_icmp_sgt_minus1(i1 %a, i1 %b) {
define i1 @zext_sext_add_icmp_ult_minus1(i1 %a, i1 %b) {
; CHECK-LABEL: @zext_sext_add_icmp_ult_minus1(
; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[B:%.*]], true
-; CHECK-NEXT: [[R:%.*]] = or i1 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[R:%.*]] = or i1 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%zext.a = zext i1 %a to i8
@@ -930,7 +930,7 @@ define i1 @zext_sext_add_icmp_ult_minus1(i1 %a, i1 %b) {
define i1 @zext_sext_add_icmp_sgt_0(i1 %a, i1 %b) {
; CHECK-LABEL: @zext_sext_add_icmp_sgt_0(
; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[B:%.*]], true
-; CHECK-NEXT: [[R:%.*]] = and i1 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[R:%.*]] = and i1 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%zext.a = zext i1 %a to i8
@@ -945,8 +945,8 @@ define i1 @zext_sext_add_icmp_sgt_0(i1 %a, i1 %b) {
define i1 @zext_sext_add_icmp_slt_0(i1 %a, i1 %b) {
; CHECK-LABEL: @zext_sext_add_icmp_slt_0(
; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[A:%.*]], true
-; CHECK-NEXT: [[R:%.*]] = and i1 [[TMP1]], [[B:%.*]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[B:%.*]], [[TMP1]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%zext.a = zext i1 %a to i8
%sext.b = sext i1 %b to i8
@@ -960,7 +960,7 @@ define i1 @zext_sext_add_icmp_slt_0(i1 %a, i1 %b) {
define i1 @zext_sext_add_icmp_eq_1(i1 %a, i1 %b) {
; CHECK-LABEL: @zext_sext_add_icmp_eq_1(
; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[B:%.*]], true
-; CHECK-NEXT: [[R:%.*]] = and i1 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[R:%.*]] = and i1 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%zext.a = zext i1 %a to i8
@@ -975,7 +975,7 @@ define i1 @zext_sext_add_icmp_eq_1(i1 %a, i1 %b) {
define i1 @zext_sext_add_icmp_ne_1(i1 %a, i1 %b) {
; CHECK-LABEL: @zext_sext_add_icmp_ne_1(
; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[A:%.*]], true
-; CHECK-NEXT: [[R:%.*]] = or i1 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[R:%.*]] = or i1 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%zext.a = zext i1 %a to i8
@@ -990,7 +990,7 @@ define i1 @zext_sext_add_icmp_ne_1(i1 %a, i1 %b) {
define i1 @zext_sext_add_icmp_slt_1(i1 %a, i1 %b) {
; CHECK-LABEL: @zext_sext_add_icmp_slt_1(
; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[A:%.*]], true
-; CHECK-NEXT: [[R:%.*]] = or i1 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[R:%.*]] = or i1 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%zext.a = zext i1 %a to i8
@@ -1005,8 +1005,8 @@ define i1 @zext_sext_add_icmp_slt_1(i1 %a, i1 %b) {
define i1 @zext_sext_add_icmp_ugt_1(i1 %a, i1 %b) {
; CHECK-LABEL: @zext_sext_add_icmp_ugt_1(
; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[A:%.*]], true
-; CHECK-NEXT: [[R:%.*]] = and i1 [[TMP1]], [[B:%.*]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[B:%.*]], [[TMP1]]
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%zext.a = zext i1 %a to i8
%sext.b = sext i1 %b to i8
@@ -1018,7 +1018,7 @@ define i1 @zext_sext_add_icmp_ugt_1(i1 %a, i1 %b) {
define <2 x i1> @vector_zext_sext_add_icmp_slt_1(<2 x i1> %a, <2 x i1> %b) {
; CHECK-LABEL: @vector_zext_sext_add_icmp_slt_1(
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i1> [[A:%.*]], <i1 true, i1 true>
-; CHECK-NEXT: [[R:%.*]] = or <2 x i1> [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[R:%.*]] = or <2 x i1> [[B:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i1> [[R]]
;
%zext.a = zext <2 x i1> %a to <2 x i8>
@@ -1601,7 +1601,7 @@ define i1 @icmp_ne_sext_sgt_zero_nofold(i32 %a) {
; CHECK-LABEL: @icmp_ne_sext_sgt_zero_nofold(
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], 0
; CHECK-NEXT: [[CONV:%.*]] = sext i1 [[CMP]] to i32
-; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[CONV]], [[A]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[A]], [[CONV]]
; CHECK-NEXT: ret i1 [[CMP1]]
;
%cmp = icmp sgt i32 %a, 0
@@ -1614,7 +1614,7 @@ define i1 @icmp_slt_sext_ne_zero_nofold(i32 %a) {
; CHECK-LABEL: @icmp_slt_sext_ne_zero_nofold(
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A:%.*]], 0
; CHECK-NEXT: [[CONV:%.*]] = sext i1 [[CMP]] to i32
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[CONV]], [[A]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[A]], [[CONV]]
; CHECK-NEXT: ret i1 [[CMP1]]
;
%cmp = icmp ne i32 %a, 0
@@ -1627,7 +1627,7 @@ define i1 @icmp_ne_sext_slt_allones_nofold(i32 %a) {
; CHECK-LABEL: @icmp_ne_sext_slt_allones_nofold(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[A:%.*]], -1
; CHECK-NEXT: [[CONV:%.*]] = sext i1 [[CMP]] to i32
-; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[CONV]], [[A]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[A]], [[CONV]]
; CHECK-NEXT: ret i1 [[CMP1]]
;
%cmp = icmp slt i32 %a, -1
@@ -1640,7 +1640,7 @@ define i1 @icmp_slt_sext_ne_allones_nofold(i32 %a) {
; CHECK-LABEL: @icmp_slt_sext_ne_allones_nofold(
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A:%.*]], -1
; CHECK-NEXT: [[CONV:%.*]] = sext i1 [[CMP]] to i32
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[CONV]], [[A]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[A]], [[CONV]]
; CHECK-NEXT: ret i1 [[CMP1]]
;
%cmp = icmp ne i32 %a, -1
@@ -1653,7 +1653,7 @@ define i1 @icmp_ne_sext_slt_otherwise_nofold(i32 %a) {
; CHECK-LABEL: @icmp_ne_sext_slt_otherwise_nofold(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[A:%.*]], 2
; CHECK-NEXT: [[CONV:%.*]] = sext i1 [[CMP]] to i32
-; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[CONV]], [[A]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[A]], [[CONV]]
; CHECK-NEXT: ret i1 [[CMP1]]
;
%cmp = icmp slt i32 %a, 2
@@ -1666,7 +1666,7 @@ define i1 @icmp_slt_sext_ne_otherwise_nofold(i32 %a) {
; CHECK-LABEL: @icmp_slt_sext_ne_otherwise_nofold(
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A:%.*]], 2
; CHECK-NEXT: [[CONV:%.*]] = sext i1 [[CMP]] to i32
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[CONV]], [[A]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[A]], [[CONV]]
; CHECK-NEXT: ret i1 [[CMP1]]
;
%cmp = icmp ne i32 %a, 2
diff --git a/llvm/test/Transforms/InstCombine/icmp-rotate.ll b/llvm/test/Transforms/InstCombine/icmp-rotate.ll
index 2580bb6a865c7..eeaa1c7861097 100644
--- a/llvm/test/Transforms/InstCombine/icmp-rotate.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-rotate.ll
@@ -213,7 +213,7 @@ define i1 @amounts_mismatch(i8 %x, i8 %y, i8 %z, i8 %w) {
; CHECK-LABEL: @amounts_mismatch(
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 [[Z:%.*]], [[W:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[TMP1]])
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: ret i1 [[R]]
;
%f = tail call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %z)
diff --git a/llvm/test/Transforms/InstCombine/icmp-select-implies-common-op.ll b/llvm/test/Transforms/InstCombine/icmp-select-implies-common-op.ll
index bacdb54f787d6..3a0c51aed602d 100644
--- a/llvm/test/Transforms/InstCombine/icmp-select-implies-common-op.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-select-implies-common-op.ll
@@ -4,9 +4,9 @@
define i1 @sgt_3_impliesF_eq_2(i8 %x, i8 %y) {
; CHECK-LABEL: @sgt_3_impliesF_eq_2(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], 4
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[SEL:%.*]], [[X]]
-; CHECK-NEXT: [[CMP3:%.*]] = select i1 [[CMP]], i1 [[CMP2]], i1 false
-; CHECK-NEXT: ret i1 [[CMP3]]
+; CHECK-NEXT: [[CMP21:%.*]] = icmp eq i8 [[Y:%.*]], [[X]]
+; CHECK-NEXT: [[CMP2:%.*]] = select i1 [[CMP]], i1 [[CMP21]], i1 false
+; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp = icmp sgt i8 %x, 3
%sel = select i1 %cmp, i8 2, i8 %y
@@ -17,9 +17,9 @@ define i1 @sgt_3_impliesF_eq_2(i8 %x, i8 %y) {
define i1 @sgt_3_impliesT_sgt_2(i8 %x, i8 %y) {
; CHECK-LABEL: @sgt_3_impliesT_sgt_2(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], 4
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i8 [[SEL:%.*]], [[X]]
-; CHECK-NEXT: [[CMP3:%.*]] = select i1 [[CMP]], i1 [[CMP2]], i1 false
-; CHECK-NEXT: ret i1 [[CMP3]]
+; CHECK-NEXT: [[CMP21:%.*]] = icmp sgt i8 [[Y:%.*]], [[X]]
+; CHECK-NEXT: [[CMP2:%.*]] = select i1 [[CMP]], i1 [[CMP21]], i1 false
+; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp = icmp sgt i8 %x, 3
%sel = select i1 %cmp, i8 2, i8 %y
@@ -44,7 +44,7 @@ define i1 @slt_x_impliesT_ne_smin_todo(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @slt_x_impliesT_ne_smin_todo(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], [[Z:%.*]]
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i8 127, i8 [[Y:%.*]]
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i8 [[SEL]], [[X]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i8 [[X]], [[SEL]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp = icmp slt i8 %x, %z
@@ -68,10 +68,10 @@ define i1 @ult_x_impliesT_eq_umax_todo(i8 %x, i8 %y, i8 %z) {
define i1 @ult_1_impliesF_eq_1(i8 %x, i8 %y) {
; CHECK-LABEL: @ult_1_impliesF_eq_1(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[SEL:%.*]], 0
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[X:%.*]], [[SEL]]
-; CHECK-NEXT: [[CMP3:%.*]] = select i1 [[CMP]], i1 [[CMP2]], i1 false
-; CHECK-NEXT: ret i1 [[CMP3]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[X:%.*]], 0
+; CHECK-NEXT: [[CMP21:%.*]] = icmp eq i8 [[Y:%.*]], [[X]]
+; CHECK-NEXT: [[CMP2:%.*]] = select i1 [[CMP]], i1 [[CMP21]], i1 false
+; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp = icmp ult i8 %x, 1
%sel = select i1 %cmp, i8 1, i8 %y
@@ -83,7 +83,7 @@ define i1 @ugt_x_impliesF_eq_umin_todo(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @ugt_x_impliesF_eq_umin_todo(
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[Z:%.*]], [[X:%.*]]
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i8 0, i8 [[Y:%.*]]
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[SEL]], [[X]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[X]], [[SEL]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp = icmp ugt i8 %z, %x
diff --git a/llvm/test/Transforms/InstCombine/icmp-select.ll b/llvm/test/Transforms/InstCombine/icmp-select.ll
index 59d2a1b165c0f..fb68c6ee94207 100644
--- a/llvm/test/Transforms/InstCombine/icmp-select.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-select.ll
@@ -35,7 +35,7 @@ define i1 @icmp_select_var_commuted(i8 %x, i8 %y, i8 %_z) {
; CHECK-LABEL: @icmp_select_var_commuted(
; CHECK-NEXT: [[Z:%.*]] = udiv i8 42, [[_Z:%.*]]
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[X:%.*]], 0
-; CHECK-NEXT: [[CMP21:%.*]] = icmp eq i8 [[Z]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP21:%.*]] = icmp eq i8 [[Y:%.*]], [[Z]]
; CHECK-NEXT: [[CMP2:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP21]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
@@ -122,7 +122,7 @@ define i1 @icmp_select_var_pred_ult(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @icmp_select_var_pred_ult(
; CHECK-NEXT: [[Z1:%.*]] = add nuw i8 [[Z:%.*]], 2
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[X:%.*]], 0
-; CHECK-NEXT: [[CMP21:%.*]] = icmp ugt i8 [[Z1]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP21:%.*]] = icmp ult i8 [[Y:%.*]], [[Z1]]
; CHECK-NEXT: [[CMP2:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP21]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
@@ -137,7 +137,7 @@ define i1 @icmp_select_var_pred_uge(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @icmp_select_var_pred_uge(
; CHECK-NEXT: [[Z1:%.*]] = add nuw i8 [[Z:%.*]], 2
; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i8 [[X:%.*]], 0
-; CHECK-NEXT: [[CMP21:%.*]] = icmp ule i8 [[Z1]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP21:%.*]] = icmp uge i8 [[Y:%.*]], [[Z1]]
; CHECK-NEXT: [[CMP2:%.*]] = select i1 [[CMP1]], i1 [[CMP21]], i1 false
; CHECK-NEXT: ret i1 [[CMP2]]
;
@@ -152,7 +152,7 @@ define i1 @icmp_select_var_pred_uge_commuted(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @icmp_select_var_pred_uge_commuted(
; CHECK-NEXT: [[Z1:%.*]] = add nuw i8 [[Z:%.*]], 2
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[X:%.*]], 0
-; CHECK-NEXT: [[CMP21:%.*]] = icmp uge i8 [[Z1]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP21:%.*]] = icmp ule i8 [[Y:%.*]], [[Z1]]
; CHECK-NEXT: [[CMP2:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP21]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
diff --git a/llvm/test/Transforms/InstCombine/icmp-sub.ll b/llvm/test/Transforms/InstCombine/icmp-sub.ll
index 5645dededf2e4..8cb3c1c181cec 100644
--- a/llvm/test/Transforms/InstCombine/icmp-sub.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-sub.ll
@@ -622,7 +622,7 @@ define i1 @PR60818_eq_multi_use(i32 %a) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[SUB:%.*]] = sub i32 0, [[A:%.*]]
; CHECK-NEXT: call void @use(i32 [[SUB]])
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[SUB]], [[A]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A]], [[SUB]]
; CHECK-NEXT: ret i1 [[CMP]]
;
entry:
@@ -637,7 +637,7 @@ define i1 @PR60818_sgt(i32 %a) {
; CHECK-LABEL: @PR60818_sgt(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[SUB:%.*]] = sub i32 0, [[A:%.*]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[SUB]], [[A]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[A]], [[SUB]]
; CHECK-NEXT: ret i1 [[CMP]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/icmp-uge-of-not-of-shl-allones-by-bits-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll b/llvm/test/Transforms/InstCombine/icmp-uge-of-not-of-shl-allones-by-bits-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll
index 27b02c8c6e936..ba47ed02edbdf 100644
--- a/llvm/test/Transforms/InstCombine/icmp-uge-of-not-of-shl-allones-by-bits-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-uge-of-not-of-shl-allones-by-bits-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll
@@ -138,7 +138,7 @@ define i1 @oneuse1(i8 %val, i8 %bits) {
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[BITS:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
; CHECK-NEXT: call void @use8(i8 [[T1]])
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[T1]], [[VAL:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[VAL:%.*]], [[T1]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = shl i8 -1, %bits
@@ -154,7 +154,7 @@ define i1 @oneuse2(i8 %val, i8 %bits) {
; CHECK-NEXT: call void @use8(i8 [[T0]])
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
; CHECK-NEXT: call void @use8(i8 [[T1]])
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[T1]], [[VAL:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[VAL:%.*]], [[T1]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = shl i8 -1, %bits
@@ -173,7 +173,7 @@ define i1 @n0(i8 %val, i8 %bits) {
; CHECK-LABEL: @n0(
; CHECK-NEXT: [[T0:%.*]] = shl nuw i8 1, [[BITS:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[T1]], [[VAL:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[VAL:%.*]], [[T1]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = shl i8 1, %bits ; constant is not -1
@@ -199,7 +199,7 @@ define <2 x i1> @n2_vec_nonsplat(<2 x i8> %val, <2 x i8> %bits) {
; CHECK-LABEL: @n2_vec_nonsplat(
; CHECK-NEXT: [[T0:%.*]] = shl <2 x i8> <i8 -1, i8 1>, [[BITS:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor <2 x i8> [[T0]], <i8 -1, i8 -1>
-; CHECK-NEXT: [[R:%.*]] = icmp uge <2 x i8> [[T1]], [[VAL:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule <2 x i8> [[VAL:%.*]], [[T1]]
; CHECK-NEXT: ret <2 x i1> [[R]]
;
%t0 = shl <2 x i8> <i8 -1, i8 1>, %bits ; again, wrong constant
@@ -225,7 +225,7 @@ define i1 @n3(i8 %val, i8 %bits) {
; CHECK-LABEL: @n3(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[BITS:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[T1]], [[VAL:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[VAL:%.*]], [[T1]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = shl i8 -1, %bits
diff --git a/llvm/test/Transforms/InstCombine/icmp-ult-of-not-of-shl-allones-by-bits-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll b/llvm/test/Transforms/InstCombine/icmp-ult-of-not-of-shl-allones-by-bits-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll
index 8441033d4857e..37aa85202e562 100644
--- a/llvm/test/Transforms/InstCombine/icmp-ult-of-not-of-shl-allones-by-bits-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-ult-of-not-of-shl-allones-by-bits-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll
@@ -138,7 +138,7 @@ define i1 @oneuse1(i8 %val, i8 %bits) {
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[BITS:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
; CHECK-NEXT: call void @use8(i8 [[T1]])
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T1]], [[VAL:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[VAL:%.*]], [[T1]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = shl i8 -1, %bits
@@ -154,7 +154,7 @@ define i1 @oneuse2(i8 %val, i8 %bits) {
; CHECK-NEXT: call void @use8(i8 [[T0]])
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
; CHECK-NEXT: call void @use8(i8 [[T1]])
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T1]], [[VAL:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[VAL:%.*]], [[T1]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = shl i8 -1, %bits
@@ -173,7 +173,7 @@ define i1 @n0(i8 %val, i8 %bits) {
; CHECK-LABEL: @n0(
; CHECK-NEXT: [[T0:%.*]] = shl nuw i8 1, [[BITS:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T1]], [[VAL:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[VAL:%.*]], [[T1]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = shl i8 1, %bits ; constant is not -1
@@ -199,7 +199,7 @@ define <2 x i1> @n2_vec_nonsplat(<2 x i8> %val, <2 x i8> %bits) {
; CHECK-LABEL: @n2_vec_nonsplat(
; CHECK-NEXT: [[T0:%.*]] = shl <2 x i8> <i8 -1, i8 1>, [[BITS:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor <2 x i8> [[T0]], <i8 -1, i8 -1>
-; CHECK-NEXT: [[R:%.*]] = icmp ult <2 x i8> [[T1]], [[VAL:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt <2 x i8> [[VAL:%.*]], [[T1]]
; CHECK-NEXT: ret <2 x i1> [[R]]
;
%t0 = shl <2 x i8> <i8 -1, i8 1>, %bits ; again, wrong constant
@@ -225,7 +225,7 @@ define i1 @n3(i8 %val, i8 %bits) {
; CHECK-LABEL: @n3(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[BITS:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[T1]], [[VAL:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[VAL:%.*]], [[T1]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = shl i8 -1, %bits
diff --git a/llvm/test/Transforms/InstCombine/icmp.ll b/llvm/test/Transforms/InstCombine/icmp.ll
index 2d786c8f48833..ad9c60638f754 100644
--- a/llvm/test/Transforms/InstCombine/icmp.ll
+++ b/llvm/test/Transforms/InstCombine/icmp.ll
@@ -581,7 +581,7 @@ define i1 @test28_extra_uses(i32 %x, i32 %y, i32 %z) {
define i1 @ugt_sub(i32 %xsrc, i32 %y) {
; CHECK-LABEL: @ugt_sub(
; CHECK-NEXT: [[X:%.*]] = udiv i32 [[XSRC:%.*]], 42
-; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[Y:%.*]], [[X]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%x = udiv i32 %xsrc, 42 ; thwart complexity-based canonicalization
@@ -1266,7 +1266,7 @@ define i1 @test62_as1(ptr addrspace(1) %a) {
define i1 @low_mask_eq_zext(i8 %a, i32 %b) {
; CHECK-LABEL: @low_mask_eq_zext(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[B:%.*]] to i8
-; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%z = zext i8 %a to i32
@@ -1278,7 +1278,7 @@ define i1 @low_mask_eq_zext(i8 %a, i32 %b) {
define i1 @low_mask_eq_zext_commute(i8 %a, i32 %b) {
; CHECK-LABEL: @low_mask_eq_zext_commute(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[B:%.*]] to i8
-; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%t = and i32 %b, 255
@@ -1322,7 +1322,7 @@ define i1 @low_mask_eq_zext_use1(i8 %a, i32 %b) {
; CHECK-NEXT: [[T:%.*]] = and i32 [[B:%.*]], 255
; CHECK-NEXT: call void @use_i32(i32 [[T]])
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[B]] to i8
-; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%t = and i32 %b, 255
@@ -1337,7 +1337,7 @@ define i1 @low_mask_eq_zext_use2(i8 %a, i32 %b) {
; CHECK-NEXT: [[Z:%.*]] = zext i8 [[A:%.*]] to i32
; CHECK-NEXT: call void @use_i32(i32 [[Z]])
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[B:%.*]] to i8
-; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[TMP1]], [[A]]
+; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[A]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%t = and i32 %b, 255
@@ -1367,7 +1367,7 @@ define i1 @low_mask_eq_zext_use3(i8 %a, i32 %b) {
define <2 x i1> @low_mask_eq_zext_vec_splat(<2 x i8> %a, <2 x i32> %b) {
; CHECK-LABEL: @low_mask_eq_zext_vec_splat(
; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[B:%.*]] to <2 x i8>
-; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i8> [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i8> [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i1> [[C]]
;
%t = and <2 x i32> %b, <i32 255, i32 255>
@@ -1769,7 +1769,7 @@ define i1 @icmp_mul0_ne0(i32 %x) {
define i1 @icmp_add20_eq_add57(i32 %x, i32 %y) {
; CHECK-LABEL: @icmp_add20_eq_add57(
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[Y:%.*]], 37
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%1 = add i32 %x, 20
@@ -1781,7 +1781,7 @@ define i1 @icmp_add20_eq_add57(i32 %x, i32 %y) {
define <2 x i1> @icmp_add20_eq_add57_splat(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @icmp_add20_eq_add57_splat(
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i32> [[Y:%.*]], <i32 37, i32 37>
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%1 = add <2 x i32> %x, <i32 20, i32 20>
@@ -1793,7 +1793,7 @@ define <2 x i1> @icmp_add20_eq_add57_splat(<2 x i32> %x, <2 x i32> %y) {
define <2 x i1> @icmp_add20_eq_add57_poison(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @icmp_add20_eq_add57_poison(
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i32> [[Y:%.*]], <i32 37, i32 37>
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%1 = add <2 x i32> %x, <i32 20, i32 20>
@@ -1805,7 +1805,7 @@ define <2 x i1> @icmp_add20_eq_add57_poison(<2 x i32> %x, <2 x i32> %y) {
define <2 x i1> @icmp_add20_eq_add57_vec_nonsplat(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @icmp_add20_eq_add57_vec_nonsplat(
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i32> [[Y:%.*]], <i32 37, i32 39>
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%1 = add <2 x i32> %x, <i32 20, i32 19>
@@ -1853,7 +1853,7 @@ define <2 x i1> @icmp_sub57_ne_sub20_vec_poison(<2 x i32> %x, <2 x i32> %y) {
define <2 x i1> @icmp_sub57_ne_sub20_vec_nonsplat(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @icmp_sub57_ne_sub20_vec_nonsplat(
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i32> [[Y:%.*]], <i32 37, i32 37>
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%1 = add <2 x i32> %x, <i32 -57, i32 -58>
@@ -1905,7 +1905,7 @@ define i1 @icmp_add1_sle(i32 %x, i32 %y) {
define i1 @icmp_add20_sge_add57(i32 %x, i32 %y) {
; CHECK-LABEL: @icmp_add20_sge_add57(
; CHECK-NEXT: [[TMP1:%.*]] = add nsw i32 [[Y:%.*]], 37
-; CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp sge i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%1 = add nsw i32 %x, 20
@@ -1917,7 +1917,7 @@ define i1 @icmp_add20_sge_add57(i32 %x, i32 %y) {
define <2 x i1> @icmp_add20_sge_add57_splat(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @icmp_add20_sge_add57_splat(
; CHECK-NEXT: [[TMP1:%.*]] = add nsw <2 x i32> [[Y:%.*]], <i32 37, i32 37>
-; CHECK-NEXT: [[CMP:%.*]] = icmp sle <2 x i32> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp sge <2 x i32> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%1 = add nsw <2 x i32> %x, <i32 20, i32 20>
@@ -1929,7 +1929,7 @@ define <2 x i1> @icmp_add20_sge_add57_splat(<2 x i32> %x, <2 x i32> %y) {
define <2 x i1> @icmp_add20_sge_add57_poison(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @icmp_add20_sge_add57_poison(
; CHECK-NEXT: [[TMP1:%.*]] = add nsw <2 x i32> [[Y:%.*]], <i32 37, i32 37>
-; CHECK-NEXT: [[CMP:%.*]] = icmp sle <2 x i32> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp sge <2 x i32> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%1 = add nsw <2 x i32> %x, <i32 20, i32 20>
@@ -3192,7 +3192,7 @@ define i1 @icmp_and_or_lshr(i32 %x, i32 %y) {
; CHECK-LABEL: @icmp_and_or_lshr(
; CHECK-NEXT: [[SHF1:%.*]] = shl nuw i32 1, [[Y:%.*]]
; CHECK-NEXT: [[OR2:%.*]] = or i32 [[SHF1]], 1
-; CHECK-NEXT: [[AND3:%.*]] = and i32 [[OR2]], [[X:%.*]]
+; CHECK-NEXT: [[AND3:%.*]] = and i32 [[X:%.*]], [[OR2]]
; CHECK-NEXT: [[RET:%.*]] = icmp ne i32 [[AND3]], 0
; CHECK-NEXT: ret i1 [[RET]]
;
@@ -3634,7 +3634,7 @@ define i1 @f10(i16 %p) {
define i1 @cmp_sgt_rhs_dec(float %x, i32 %i) {
; CHECK-LABEL: @cmp_sgt_rhs_dec(
; CHECK-NEXT: [[CONV:%.*]] = fptosi float [[X:%.*]] to i32
-; CHECK-NEXT: [[CMP:%.*]] = icmp sge i32 [[CONV]], [[I:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[I:%.*]], [[CONV]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%conv = fptosi float %x to i32
@@ -3646,7 +3646,7 @@ define i1 @cmp_sgt_rhs_dec(float %x, i32 %i) {
define i1 @cmp_sle_rhs_dec(float %x, i32 %i) {
; CHECK-LABEL: @cmp_sle_rhs_dec(
; CHECK-NEXT: [[CONV:%.*]] = fptosi float [[X:%.*]] to i32
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV]], [[I:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[I:%.*]], [[CONV]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%conv = fptosi float %x to i32
@@ -3658,7 +3658,7 @@ define i1 @cmp_sle_rhs_dec(float %x, i32 %i) {
define i1 @cmp_sge_rhs_inc(float %x, i32 %i) {
; CHECK-LABEL: @cmp_sge_rhs_inc(
; CHECK-NEXT: [[CONV:%.*]] = fptosi float [[X:%.*]] to i32
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[CONV]], [[I:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[I:%.*]], [[CONV]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%conv = fptosi float %x to i32
@@ -3670,7 +3670,7 @@ define i1 @cmp_sge_rhs_inc(float %x, i32 %i) {
define i1 @cmp_slt_rhs_inc(float %x, i32 %i) {
; CHECK-LABEL: @cmp_slt_rhs_inc(
; CHECK-NEXT: [[CONV:%.*]] = fptosi float [[X:%.*]] to i32
-; CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV]], [[I:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp sge i32 [[I:%.*]], [[CONV]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%conv = fptosi float %x to i32
@@ -3823,7 +3823,7 @@ define i1 @icmp_add1_ule(i32 %x, i32 %y) {
define i1 @cmp_uge_rhs_inc(float %x, i32 %i) {
; CHECK-LABEL: @cmp_uge_rhs_inc(
; CHECK-NEXT: [[CONV:%.*]] = fptosi float [[X:%.*]] to i32
-; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[CONV]], [[I:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[I:%.*]], [[CONV]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%conv = fptosi float %x to i32
@@ -3835,7 +3835,7 @@ define i1 @cmp_uge_rhs_inc(float %x, i32 %i) {
define i1 @cmp_ult_rhs_inc(float %x, i32 %i) {
; CHECK-LABEL: @cmp_ult_rhs_inc(
; CHECK-NEXT: [[CONV:%.*]] = fptosi float [[X:%.*]] to i32
-; CHECK-NEXT: [[CMP:%.*]] = icmp ule i32 [[CONV]], [[I:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp uge i32 [[I:%.*]], [[CONV]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%conv = fptosi float %x to i32
@@ -4655,7 +4655,7 @@ define <2 x i1> @zext_bool_and_eq1(<2 x i1> %x, <2 x i8> %y) {
define i1 @zext_bool_or_eq0(i1 %x, i8 %y) {
; CHECK-LABEL: @zext_bool_or_eq0(
; CHECK-NEXT: [[ZX:%.*]] = zext i1 [[X:%.*]] to i8
-; CHECK-NEXT: [[A:%.*]] = or i8 [[ZX]], [[Y:%.*]]
+; CHECK-NEXT: [[A:%.*]] = or i8 [[Y:%.*]], [[ZX]]
; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[A]], 0
; CHECK-NEXT: ret i1 [[R]]
;
@@ -4671,7 +4671,7 @@ define i1 @zext_bool_and_eq0_use(i1 %x, i64 %y) {
; CHECK-LABEL: @zext_bool_and_eq0_use(
; CHECK-NEXT: [[ZX:%.*]] = zext i1 [[X:%.*]] to i64
; CHECK-NEXT: call void @use_i64(i64 [[ZX]])
-; CHECK-NEXT: [[A:%.*]] = and i64 [[ZX]], [[Y:%.*]]
+; CHECK-NEXT: [[A:%.*]] = and i64 [[Y:%.*]], [[ZX]]
; CHECK-NEXT: [[R:%.*]] = icmp eq i64 [[A]], 0
; CHECK-NEXT: ret i1 [[R]]
;
@@ -4704,7 +4704,7 @@ define i1 @zext_bool_and_ne0_use(i1 %x, i64 %y) {
define i1 @zext_notbool_and_ne0(i2 %x, i8 %y) {
; CHECK-LABEL: @zext_notbool_and_ne0(
; CHECK-NEXT: [[ZX:%.*]] = zext i2 [[X:%.*]] to i8
-; CHECK-NEXT: [[A:%.*]] = and i8 [[ZX]], [[Y:%.*]]
+; CHECK-NEXT: [[A:%.*]] = and i8 [[Y:%.*]], [[ZX]]
; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[A]], 0
; CHECK-NEXT: ret i1 [[R]]
;
@@ -5055,7 +5055,7 @@ define i1 @or_positive_sgt_zero_multi_use(i8 %a) {
define i1 @disjoint_or_sgt_1(i8 %a, i8 %b) {
; CHECK-LABEL: @disjoint_or_sgt_1(
; CHECK-NEXT: [[B1:%.*]] = add nsw i8 [[B:%.*]], 2
-; CHECK-NEXT: [[ICMP_:%.*]] = icmp sle i8 [[B1]], [[A:%.*]]
+; CHECK-NEXT: [[ICMP_:%.*]] = icmp sge i8 [[A:%.*]], [[B1]]
; CHECK-NEXT: ret i1 [[ICMP_]]
;
%a1 = or disjoint i8 %a, 1
@@ -5093,7 +5093,7 @@ define i1 @disjoint_or_sgt_3(i8 %a, i8 %b) {
define i1 @disjoint_or_ugt_1(i8 %a, i8 %b) {
; CHECK-LABEL: @disjoint_or_ugt_1(
; CHECK-NEXT: [[B1:%.*]] = add nsw i8 [[B:%.*]], 2
-; CHECK-NEXT: [[ICMP_:%.*]] = icmp ule i8 [[B1]], [[A:%.*]]
+; CHECK-NEXT: [[ICMP_:%.*]] = icmp uge i8 [[A:%.*]], [[B1]]
; CHECK-NEXT: ret i1 [[ICMP_]]
;
%a1 = or disjoint i8 %a, 1
@@ -5146,7 +5146,7 @@ define i1 @deduce_nuw_flag_2(i8 %a, i8 %b) {
; CHECK-LABEL: @deduce_nuw_flag_2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = add nuw i8 [[B:%.*]], 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[TMP0]], [[A:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[CMP]]
;
entry:
@@ -5174,7 +5174,7 @@ define i1 @dont_deduce_nuw_flag_2(i8 %a, i8 %b) {
; CHECK-LABEL: @dont_deduce_nuw_flag_2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = add i8 [[B:%.*]], -1
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[TMP0]], [[A:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], [[TMP0]]
; CHECK-NEXT: ret i1 [[CMP]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/implies.ll b/llvm/test/Transforms/InstCombine/implies.ll
index c02d84d3f8371..047b2aa816e0b 100644
--- a/llvm/test/Transforms/InstCombine/implies.ll
+++ b/llvm/test/Transforms/InstCombine/implies.ll
@@ -137,7 +137,7 @@ F:
define i1 @src_or_distjoint_implies_sle_fail(i8 %x, i8 %y, i1 %other) {
; CHECK-LABEL: @src_or_distjoint_implies_sle_fail(
; CHECK-NEXT: [[X2:%.*]] = or disjoint i8 [[X:%.*]], 24
-; CHECK-NEXT: [[COND_NOT:%.*]] = icmp slt i8 [[X2]], [[Y:%.*]]
+; CHECK-NEXT: [[COND_NOT:%.*]] = icmp sgt i8 [[Y:%.*]], [[X2]]
; CHECK-NEXT: br i1 [[COND_NOT]], label [[F:%.*]], label [[T:%.*]]
; CHECK: T:
; CHECK-NEXT: [[X1:%.*]] = or disjoint i8 [[X]], 23
@@ -268,7 +268,7 @@ F:
define i1 @src_or_implies_ule(i8 %x, i8 %y, i8 %z, i1 %other) {
; CHECK-LABEL: @src_or_implies_ule(
; CHECK-NEXT: [[OR:%.*]] = or i8 [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT: [[COND_NOT:%.*]] = icmp ugt i8 [[OR]], [[Z:%.*]]
+; CHECK-NEXT: [[COND_NOT:%.*]] = icmp ult i8 [[Z:%.*]], [[OR]]
; CHECK-NEXT: br i1 [[COND_NOT]], label [[F:%.*]], label [[T:%.*]]
; CHECK: T:
; CHECK-NEXT: ret i1 true
diff --git a/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-scalar.ll b/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-scalar.ll
index fff05a416dece..abb36b6a785e5 100644
--- a/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-scalar.ll
+++ b/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-scalar.ll
@@ -53,7 +53,7 @@ define i4 @in_constant_varx_6_invmask(i4 %x, i4 %mask) {
define i4 @in_constant_mone_vary_invmask(i4 %y, i4 %mask) {
; CHECK-LABEL: @in_constant_mone_vary_invmask(
; CHECK-NEXT: [[MASK_NOT:%.*]] = xor i4 [[MASK:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = or i4 [[MASK_NOT]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = or i4 [[Y:%.*]], [[MASK_NOT]]
; CHECK-NEXT: ret i4 [[R]]
;
%notmask = xor i4 %mask, -1
diff --git a/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-vector.ll b/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-vector.ll
index a76662c4bc439..0440199dadb87 100644
--- a/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-vector.ll
+++ b/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-vector.ll
@@ -95,7 +95,7 @@ define <3 x i4> @in_constant_varx_6_invmask_poison(<3 x i4> %x, <3 x i4> %mask)
define <2 x i4> @in_constant_mone_vary_invmask(<2 x i4> %y, <2 x i4> %mask) {
; CHECK-LABEL: @in_constant_mone_vary_invmask(
; CHECK-NEXT: [[MASK_NOT:%.*]] = xor <2 x i4> [[MASK:%.*]], <i4 -1, i4 -1>
-; CHECK-NEXT: [[R:%.*]] = or <2 x i4> [[MASK_NOT]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = or <2 x i4> [[Y:%.*]], [[MASK_NOT]]
; CHECK-NEXT: ret <2 x i4> [[R]]
;
%notmask = xor <2 x i4> %mask, <i4 -1, i4 -1>
diff --git a/llvm/test/Transforms/InstCombine/ispow2.ll b/llvm/test/Transforms/InstCombine/ispow2.ll
index a143b1347ccee..145c6089b3041 100644
--- a/llvm/test/Transforms/InstCombine/ispow2.ll
+++ b/llvm/test/Transforms/InstCombine/ispow2.ll
@@ -161,7 +161,7 @@ define i1 @is_pow2or0_negate_op_extra_use1(i32 %x) {
define i1 @is_pow2or0_negate_op_extra_use2(i32 %x) {
; CHECK-LABEL: @is_pow2or0_negate_op_extra_use2(
; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]]
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[NEG]], [[X]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], [[NEG]]
; CHECK-NEXT: call void @use(i32 [[AND]])
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], [[X]]
; CHECK-NEXT: ret i1 [[CMP]]
@@ -1190,7 +1190,7 @@ define <2 x i1> @isnot_pow2nor0_wrong_pred3_ctpop_commute_vec(<2 x i8> %x) {
define i1 @is_pow2_fail_pr63327(i32 %x) {
; CHECK-LABEL: @is_pow2_fail_pr63327(
; CHECK-NEXT: [[NX:%.*]] = sub i32 0, [[X:%.*]]
-; CHECK-NEXT: [[X_AND_NX:%.*]] = and i32 [[NX]], [[X]]
+; CHECK-NEXT: [[X_AND_NX:%.*]] = and i32 [[X]], [[NX]]
; CHECK-NEXT: [[R:%.*]] = icmp sge i32 [[X_AND_NX]], [[X]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -1244,7 +1244,7 @@ define i1 @blsmsk_is_p2_or_z_fail(i32 %xx, i32 %yy) {
define i1 @blsmsk_isnt_p2_or_z_fail(i32 %x) {
; CHECK-LABEL: @blsmsk_isnt_p2_or_z_fail(
; CHECK-NEXT: [[XM1:%.*]] = add i32 [[X:%.*]], -1
-; CHECK-NEXT: [[Y:%.*]] = xor i32 [[XM1]], [[X]]
+; CHECK-NEXT: [[Y:%.*]] = xor i32 [[X]], [[XM1]]
; CHECK-NEXT: [[R:%.*]] = icmp ule i32 [[Y]], [[X]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -1259,7 +1259,7 @@ declare void @use.i32(i32)
define i1 @blsmsk_isnt_p2_or_z_fail_multiuse(i32 %x) {
; CHECK-LABEL: @blsmsk_isnt_p2_or_z_fail_multiuse(
; CHECK-NEXT: [[XM1:%.*]] = add i32 [[X:%.*]], -1
-; CHECK-NEXT: [[Y:%.*]] = xor i32 [[XM1]], [[X]]
+; CHECK-NEXT: [[Y:%.*]] = xor i32 [[X]], [[XM1]]
; CHECK-NEXT: call void @use.i32(i32 [[Y]])
; CHECK-NEXT: [[R:%.*]] = icmp ult i32 [[Y]], [[X]]
; CHECK-NEXT: ret i1 [[R]]
@@ -1274,7 +1274,7 @@ define i1 @blsmsk_isnt_p2_or_z_fail_multiuse(i32 %x) {
define i1 @blsmsk_isnt_p2_or_z_fail_wrong_add(i32 %x, i32 %z) {
; CHECK-LABEL: @blsmsk_isnt_p2_or_z_fail_wrong_add(
; CHECK-NEXT: [[XM1:%.*]] = add i32 [[Z:%.*]], -1
-; CHECK-NEXT: [[Y:%.*]] = xor i32 [[XM1]], [[X:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = xor i32 [[X:%.*]], [[XM1]]
; CHECK-NEXT: [[R:%.*]] = icmp ult i32 [[Y]], [[X]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -1288,7 +1288,7 @@ define i1 @blsmsk_isnt_p2_or_z_fail_wrong_add(i32 %x, i32 %z) {
define i1 @blsmsk_isnt_p2_or_z_fail_bad_xor(i32 %x, i32 %z) {
; CHECK-LABEL: @blsmsk_isnt_p2_or_z_fail_bad_xor(
; CHECK-NEXT: [[XM1:%.*]] = add i32 [[X:%.*]], -1
-; CHECK-NEXT: [[Y:%.*]] = xor i32 [[XM1]], [[Z:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = xor i32 [[Z:%.*]], [[XM1]]
; CHECK-NEXT: [[R:%.*]] = icmp ult i32 [[Y]], [[X]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -1302,7 +1302,7 @@ define i1 @blsmsk_isnt_p2_or_z_fail_bad_xor(i32 %x, i32 %z) {
define i1 @blsmsk_is_p2_or_z_fail_bad_cmp(i32 %x, i32 %z) {
; CHECK-LABEL: @blsmsk_is_p2_or_z_fail_bad_cmp(
; CHECK-NEXT: [[XM1:%.*]] = add i32 [[X:%.*]], -1
-; CHECK-NEXT: [[Y:%.*]] = xor i32 [[XM1]], [[X]]
+; CHECK-NEXT: [[Y:%.*]] = xor i32 [[X]], [[XM1]]
; CHECK-NEXT: [[R:%.*]] = icmp uge i32 [[Y]], [[Z:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/known-bits.ll b/llvm/test/Transforms/InstCombine/known-bits.ll
index 8b4249b2c25a9..af05cf7544826 100644
--- a/llvm/test/Transforms/InstCombine/known-bits.ll
+++ b/llvm/test/Transforms/InstCombine/known-bits.ll
@@ -1018,7 +1018,7 @@ define i1 @extract_value_sadd_fail(i8 %xx, i8 %yy) {
define i1 @extract_value_usub(i8 %x, i8 %zz) {
; CHECK-LABEL: @extract_value_usub(
; CHECK-NEXT: [[Z:%.*]] = add nuw i8 [[ZZ:%.*]], 1
-; CHECK-NEXT: [[Y:%.*]] = add i8 [[Z]], [[X:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = add i8 [[X:%.*]], [[Z]]
; CHECK-NEXT: [[SUB_UOV:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 [[X]], i8 [[Y]])
; CHECK-NEXT: [[SUB:%.*]] = extractvalue { i8, i1 } [[SUB_UOV]], 0
; CHECK-NEXT: [[UOV:%.*]] = extractvalue { i8, i1 } [[SUB_UOV]], 1
@@ -1062,7 +1062,7 @@ define i1 @extract_value_usub_fail(i8 %x, i8 %z) {
define i1 @extract_value_ssub(i8 %x, i8 %zz) {
; CHECK-LABEL: @extract_value_ssub(
; CHECK-NEXT: [[Z:%.*]] = add nuw i8 [[ZZ:%.*]], 1
-; CHECK-NEXT: [[Y:%.*]] = add i8 [[Z]], [[X:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = add i8 [[X:%.*]], [[Z]]
; CHECK-NEXT: [[SUB_SOV:%.*]] = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 [[Y]], i8 [[X]])
; CHECK-NEXT: [[SUB:%.*]] = extractvalue { i8, i1 } [[SUB_SOV]], 0
; CHECK-NEXT: [[SOV:%.*]] = extractvalue { i8, i1 } [[SUB_SOV]], 1
diff --git a/llvm/test/Transforms/InstCombine/known-never-nan.ll b/llvm/test/Transforms/InstCombine/known-never-nan.ll
index a1cabc29682b4..35ac6ce56910d 100644
--- a/llvm/test/Transforms/InstCombine/known-never-nan.ll
+++ b/llvm/test/Transforms/InstCombine/known-never-nan.ll
@@ -64,7 +64,7 @@ define i1 @nnan_fadd(double %arg0, double %arg1) {
define i1 @nnan_fadd_maybe_nan_lhs(double %arg0, double %arg1) {
; CHECK-LABEL: @nnan_fadd_maybe_nan_lhs(
; CHECK-NEXT: [[NNAN_ARG1:%.*]] = fadd nnan double [[ARG1:%.*]], 1.000000e+00
-; CHECK-NEXT: [[OP:%.*]] = fadd double [[NNAN_ARG1]], [[ARG0:%.*]]
+; CHECK-NEXT: [[OP:%.*]] = fadd double [[ARG0:%.*]], [[NNAN_ARG1]]
; CHECK-NEXT: [[TMP:%.*]] = fcmp ord double [[OP]], 0.000000e+00
; CHECK-NEXT: ret i1 [[TMP]]
;
diff --git a/llvm/test/Transforms/InstCombine/log-pow.ll b/llvm/test/Transforms/InstCombine/log-pow.ll
index 1dfe5c944eee7..b628e7cc57f15 100644
--- a/llvm/test/Transforms/InstCombine/log-pow.ll
+++ b/llvm/test/Transforms/InstCombine/log-pow.ll
@@ -4,7 +4,7 @@
define double @log_pow(double %x, double %y) {
; CHECK-LABEL: @log_pow(
; CHECK-NEXT: [[LOG1:%.*]] = call fast double @llvm.log.f64(double [[X:%.*]])
-; CHECK-NEXT: [[MUL:%.*]] = fmul fast double [[LOG1]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul fast double [[Y:%.*]], [[LOG1]]
; CHECK-NEXT: ret double [[MUL]]
;
%pow = call fast double @pow(double %x, double %y)
@@ -84,7 +84,7 @@ define double @log_powi_not_fast(double %x, i32 %y) {
define float @log10f_powf(float %x, float %y) {
; CHECK-LABEL: @log10f_powf(
; CHECK-NEXT: [[LOG1:%.*]] = call fast float @llvm.log10.f32(float [[X:%.*]])
-; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[LOG1]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[Y:%.*]], [[LOG1]]
; CHECK-NEXT: ret float [[MUL]]
;
%pow = call fast float @powf(float %x, float %y)
@@ -95,7 +95,7 @@ define float @log10f_powf(float %x, float %y) {
define <2 x double> @log2v_powv(<2 x double> %x, <2 x double> %y) {
; CHECK-LABEL: @log2v_powv(
; CHECK-NEXT: [[LOG1:%.*]] = call fast <2 x double> @llvm.log2.v2f64(<2 x double> [[X:%.*]])
-; CHECK-NEXT: [[MUL:%.*]] = fmul fast <2 x double> [[LOG1]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = fmul fast <2 x double> [[Y:%.*]], [[LOG1]]
; CHECK-NEXT: ret <2 x double> [[MUL]]
;
%pow = call fast <2 x double> @llvm.pow.v2f64(<2 x double> %x, <2 x double> %y)
diff --git a/llvm/test/Transforms/InstCombine/logical-select-inseltpoison.ll b/llvm/test/Transforms/InstCombine/logical-select-inseltpoison.ll
index 20d60206ebcdf..cf0dc35032884 100644
--- a/llvm/test/Transforms/InstCombine/logical-select-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/logical-select-inseltpoison.ll
@@ -4,8 +4,8 @@
define i32 @foo(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-LABEL: @foo(
-; CHECK-NEXT: [[E:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[J:%.*]] = select i1 [[E]], i32 [[C:%.*]], i32 [[D:%.*]]
+; CHECK-NEXT: [[E_NOT:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[J:%.*]] = select i1 [[E_NOT]], i32 [[C:%.*]], i32 [[D:%.*]]
; CHECK-NEXT: ret i32 [[J]]
;
%e = icmp slt i32 %a, %b
@@ -19,8 +19,8 @@ define i32 @foo(i32 %a, i32 %b, i32 %c, i32 %d) {
define i32 @bar(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-LABEL: @bar(
-; CHECK-NEXT: [[E:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[J:%.*]] = select i1 [[E]], i32 [[C:%.*]], i32 [[D:%.*]]
+; CHECK-NEXT: [[E_NOT:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[J:%.*]] = select i1 [[E_NOT]], i32 [[C:%.*]], i32 [[D:%.*]]
; CHECK-NEXT: ret i32 [[J]]
;
%e = icmp slt i32 %a, %b
@@ -34,8 +34,8 @@ define i32 @bar(i32 %a, i32 %b, i32 %c, i32 %d) {
define i32 @goo(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-LABEL: @goo(
-; CHECK-NEXT: [[T0:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[T3:%.*]] = select i1 [[T0]], i32 [[C:%.*]], i32 [[D:%.*]]
+; CHECK-NEXT: [[T0_NOT:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = select i1 [[T0_NOT]], i32 [[C:%.*]], i32 [[D:%.*]]
; CHECK-NEXT: ret i32 [[T3]]
;
%t0 = icmp slt i32 %a, %b
@@ -141,8 +141,8 @@ define <2 x i32> @fold_inverted_icmp_vector_preds(<2 x i32> %a, <2 x i32> %b, <2
define i32 @par(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-LABEL: @par(
-; CHECK-NEXT: [[T0:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[T3:%.*]] = select i1 [[T0]], i32 [[C:%.*]], i32 [[D:%.*]]
+; CHECK-NEXT: [[T0_NOT:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = select i1 [[T0_NOT]], i32 [[C:%.*]], i32 [[D:%.*]]
; CHECK-NEXT: ret i32 [[T3]]
;
%t0 = icmp slt i32 %a, %b
@@ -343,10 +343,10 @@ define <2 x i64> @bitcast_select_multi_uses(<4 x i1> %cmp, <2 x i64> %a, <2 x i6
; CHECK-LABEL: @bitcast_select_multi_uses(
; CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP:%.*]] to <4 x i32>
; CHECK-NEXT: [[BC1:%.*]] = bitcast <4 x i32> [[SEXT]] to <2 x i64>
-; CHECK-NEXT: [[AND1:%.*]] = and <2 x i64> [[BC1]], [[A:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and <2 x i64> [[A:%.*]], [[BC1]]
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[SEXT]] to <2 x i64>
; CHECK-NEXT: [[BC2:%.*]] = xor <2 x i64> [[TMP1]], <i64 -1, i64 -1>
-; CHECK-NEXT: [[AND2:%.*]] = and <2 x i64> [[BC2]], [[B:%.*]]
+; CHECK-NEXT: [[AND2:%.*]] = and <2 x i64> [[B:%.*]], [[BC2]]
; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[AND2]], [[AND1]]
; CHECK-NEXT: [[ADD:%.*]] = add <2 x i64> [[AND2]], [[BC2]]
; CHECK-NEXT: [[SUB:%.*]] = sub <2 x i64> [[OR]], [[ADD]]
@@ -393,7 +393,7 @@ define i1 @bools_logical(i1 %a, i1 %b, i1 %c) {
define i1 @bools_multi_uses1(i1 %a, i1 %b, i1 %c) {
; CHECK-LABEL: @bools_multi_uses1(
; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[C:%.*]], true
-; CHECK-NEXT: [[AND1:%.*]] = and i1 [[NOT]], [[A:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i1 [[A:%.*]], [[NOT]]
; CHECK-NEXT: [[OR:%.*]] = select i1 [[C]], i1 [[B:%.*]], i1 [[A]]
; CHECK-NEXT: [[XOR:%.*]] = xor i1 [[OR]], [[AND1]]
; CHECK-NEXT: ret i1 [[XOR]]
diff --git a/llvm/test/Transforms/InstCombine/logical-select.ll b/llvm/test/Transforms/InstCombine/logical-select.ll
index 6e2ed6bf796d0..62a63839704a4 100644
--- a/llvm/test/Transforms/InstCombine/logical-select.ll
+++ b/llvm/test/Transforms/InstCombine/logical-select.ll
@@ -9,8 +9,8 @@ declare void @use2(<2 x i1>)
define i32 @foo(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-LABEL: @foo(
-; CHECK-NEXT: [[E:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[J:%.*]] = select i1 [[E]], i32 [[C:%.*]], i32 [[D:%.*]]
+; CHECK-NEXT: [[E_NOT:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[J:%.*]] = select i1 [[E_NOT]], i32 [[C:%.*]], i32 [[D:%.*]]
; CHECK-NEXT: ret i32 [[J]]
;
%e = icmp slt i32 %a, %b
@@ -24,8 +24,8 @@ define i32 @foo(i32 %a, i32 %b, i32 %c, i32 %d) {
define i32 @bar(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-LABEL: @bar(
-; CHECK-NEXT: [[E:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[J:%.*]] = select i1 [[E]], i32 [[C:%.*]], i32 [[D:%.*]]
+; CHECK-NEXT: [[E_NOT:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[J:%.*]] = select i1 [[E_NOT]], i32 [[C:%.*]], i32 [[D:%.*]]
; CHECK-NEXT: ret i32 [[J]]
;
%e = icmp slt i32 %a, %b
@@ -39,8 +39,8 @@ define i32 @bar(i32 %a, i32 %b, i32 %c, i32 %d) {
define i32 @goo(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-LABEL: @goo(
-; CHECK-NEXT: [[T0:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[T3:%.*]] = select i1 [[T0]], i32 [[C:%.*]], i32 [[D:%.*]]
+; CHECK-NEXT: [[T0_NOT:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = select i1 [[T0_NOT]], i32 [[C:%.*]], i32 [[D:%.*]]
; CHECK-NEXT: ret i32 [[T3]]
;
%t0 = icmp slt i32 %a, %b
@@ -146,8 +146,8 @@ define <2 x i32> @fold_inverted_icmp_vector_preds(<2 x i32> %a, <2 x i32> %b, <2
define i32 @par(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-LABEL: @par(
-; CHECK-NEXT: [[T0:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[T3:%.*]] = select i1 [[T0]], i32 [[C:%.*]], i32 [[D:%.*]]
+; CHECK-NEXT: [[T0_NOT:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = select i1 [[T0_NOT]], i32 [[C:%.*]], i32 [[D:%.*]]
; CHECK-NEXT: ret i32 [[T3]]
;
%t0 = icmp slt i32 %a, %b
@@ -348,10 +348,10 @@ define <2 x i64> @bitcast_select_multi_uses(<4 x i1> %cmp, <2 x i64> %a, <2 x i6
; CHECK-LABEL: @bitcast_select_multi_uses(
; CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP:%.*]] to <4 x i32>
; CHECK-NEXT: [[BC1:%.*]] = bitcast <4 x i32> [[SEXT]] to <2 x i64>
-; CHECK-NEXT: [[AND1:%.*]] = and <2 x i64> [[BC1]], [[A:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and <2 x i64> [[A:%.*]], [[BC1]]
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[SEXT]] to <2 x i64>
; CHECK-NEXT: [[BC2:%.*]] = xor <2 x i64> [[TMP1]], <i64 -1, i64 -1>
-; CHECK-NEXT: [[AND2:%.*]] = and <2 x i64> [[BC2]], [[B:%.*]]
+; CHECK-NEXT: [[AND2:%.*]] = and <2 x i64> [[B:%.*]], [[BC2]]
; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[AND2]], [[AND1]]
; CHECK-NEXT: [[ADD:%.*]] = add <2 x i64> [[AND2]], [[BC2]]
; CHECK-NEXT: [[SUB:%.*]] = sub <2 x i64> [[OR]], [[ADD]]
@@ -398,7 +398,7 @@ define i1 @bools_logical(i1 %a, i1 %b, i1 %c) {
define i1 @bools_multi_uses1(i1 %a, i1 %b, i1 %c) {
; CHECK-LABEL: @bools_multi_uses1(
; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[C:%.*]], true
-; CHECK-NEXT: [[AND1:%.*]] = and i1 [[NOT]], [[A:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i1 [[A:%.*]], [[NOT]]
; CHECK-NEXT: [[OR:%.*]] = select i1 [[C]], i1 [[B:%.*]], i1 [[A]]
; CHECK-NEXT: [[XOR:%.*]] = xor i1 [[OR]], [[AND1]]
; CHECK-NEXT: ret i1 [[XOR]]
@@ -766,7 +766,7 @@ define <8 x i3> @bitcast_vec_cond_commute1(<3 x i1> noundef %cond, <8 x i3> %pc,
; CHECK-NEXT: [[T9:%.*]] = bitcast <3 x i8> [[S]] to <8 x i3>
; CHECK-NEXT: [[NOTT9:%.*]] = xor <8 x i3> [[T9]], <i3 -1, i3 -1, i3 -1, i3 -1, i3 -1, i3 -1, i3 -1, i3 -1>
; CHECK-NEXT: [[T11:%.*]] = and <8 x i3> [[C]], [[NOTT9]]
-; CHECK-NEXT: [[T12:%.*]] = and <8 x i3> [[T9]], [[D:%.*]]
+; CHECK-NEXT: [[T12:%.*]] = and <8 x i3> [[D:%.*]], [[T9]]
; CHECK-NEXT: [[R:%.*]] = or disjoint <8 x i3> [[T11]], [[T12]]
; CHECK-NEXT: ret <8 x i3> [[R]]
;
@@ -831,8 +831,8 @@ define <2 x i64> @bitcast_fp_vec_cond(<2 x double> noundef %s, <2 x i64> %c, <2
; CHECK-LABEL: @bitcast_fp_vec_cond(
; CHECK-NEXT: [[T9:%.*]] = bitcast <2 x double> [[S:%.*]] to <2 x i64>
; CHECK-NEXT: [[NOTT9:%.*]] = xor <2 x i64> [[T9]], <i64 -1, i64 -1>
-; CHECK-NEXT: [[T11:%.*]] = and <2 x i64> [[NOTT9]], [[C:%.*]]
-; CHECK-NEXT: [[T12:%.*]] = and <2 x i64> [[T9]], [[D:%.*]]
+; CHECK-NEXT: [[T11:%.*]] = and <2 x i64> [[C:%.*]], [[NOTT9]]
+; CHECK-NEXT: [[T12:%.*]] = and <2 x i64> [[D:%.*]], [[T9]]
; CHECK-NEXT: [[R:%.*]] = or disjoint <2 x i64> [[T11]], [[T12]]
; CHECK-NEXT: ret <2 x i64> [[R]]
;
@@ -851,8 +851,8 @@ define <2 x i64> @bitcast_int_vec_cond(i1 noundef %b, <2 x i64> %c, <2 x i64> %d
; CHECK-NEXT: [[S:%.*]] = sext i1 [[B:%.*]] to i128
; CHECK-NEXT: [[T9:%.*]] = bitcast i128 [[S]] to <2 x i64>
; CHECK-NEXT: [[NOTT9:%.*]] = xor <2 x i64> [[T9]], <i64 -1, i64 -1>
-; CHECK-NEXT: [[T11:%.*]] = and <2 x i64> [[NOTT9]], [[C:%.*]]
-; CHECK-NEXT: [[T12:%.*]] = and <2 x i64> [[T9]], [[D:%.*]]
+; CHECK-NEXT: [[T11:%.*]] = and <2 x i64> [[C:%.*]], [[NOTT9]]
+; CHECK-NEXT: [[T12:%.*]] = and <2 x i64> [[D:%.*]], [[T9]]
; CHECK-NEXT: [[R:%.*]] = or disjoint <2 x i64> [[T11]], [[T12]]
; CHECK-NEXT: ret <2 x i64> [[R]]
;
@@ -1126,7 +1126,7 @@ define i1 @not_d_bools_negative_use2(i1 %c, i1 %x, i1 %y) {
define i1 @logical_and_or_with_not_op(i1 %a, i1 %b, i1 %c) {
; CHECK-LABEL: @logical_and_or_with_not_op(
; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[C:%.*]], true
-; CHECK-NEXT: [[OR:%.*]] = or i1 [[NOT]], [[B:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[B:%.*]], [[NOT]]
; CHECK-NEXT: [[AND:%.*]] = select i1 [[A:%.*]], i1 [[OR]], i1 false
; CHECK-NEXT: ret i1 [[AND]]
;
@@ -1217,7 +1217,7 @@ define i1 @logical_and_or_with_common_not_op_variant5(i1 %a) {
define i1 @logical_or_and_with_not_op(i1 %a, i1 %b, i1 %c) {
; CHECK-LABEL: @logical_or_and_with_not_op(
; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[C:%.*]], true
-; CHECK-NEXT: [[AND:%.*]] = and i1 [[NOT]], [[B:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i1 [[B:%.*]], [[NOT]]
; CHECK-NEXT: [[OR:%.*]] = select i1 [[A:%.*]], i1 true, i1 [[AND]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -1325,9 +1325,9 @@ define i1 @reduce_logical_and2(i1 %a, i1 %b, i1 %c) {
; CHECK-LABEL: @reduce_logical_and2(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[C:%.*]], true
-; CHECK-NEXT: [[B:%.*]] = and i1 [[TMP0]], [[B1:%.*]]
-; CHECK-NEXT: [[AND3:%.*]] = select i1 [[AND2:%.*]], i1 [[B]], i1 false
-; CHECK-NEXT: ret i1 [[AND3]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[B:%.*]], [[TMP0]]
+; CHECK-NEXT: [[AND2:%.*]] = select i1 [[A:%.*]], i1 [[TMP1]], i1 false
+; CHECK-NEXT: ret i1 [[AND2]]
;
bb:
%or = xor i1 %c, %b
@@ -1373,9 +1373,9 @@ bb:
define i1 @reduce_logical_or2(i1 %a, i1 %b, i1 %c) {
; CHECK-LABEL: @reduce_logical_or2(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[B:%.*]] = or i1 [[C:%.*]], [[B1:%.*]]
-; CHECK-NEXT: [[AND3:%.*]] = select i1 [[AND2:%.*]], i1 true, i1 [[B]]
-; CHECK-NEXT: ret i1 [[AND3]]
+; CHECK-NEXT: [[TMP0:%.*]] = or i1 [[C:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[AND2:%.*]] = select i1 [[A:%.*]], i1 true, i1 [[TMP0]]
+; CHECK-NEXT: ret i1 [[AND2]]
;
bb:
%or = xor i1 %c, %b
@@ -1493,7 +1493,7 @@ define i1 @reduce_bitwise_and1(i1 %a, i32 %b, i32 %c) {
; CHECK-NEXT: bb:
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[B:%.*]], 6
; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[C:%.*]], [[B]]
-; CHECK-NEXT: [[AND1:%.*]] = or i1 [[CMP1]], [[A:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = or i1 [[A:%.*]], [[CMP1]]
; CHECK-NEXT: [[AND2:%.*]] = and i1 [[AND1]], [[CMP]]
; CHECK-NEXT: ret i1 [[AND2]]
;
diff --git a/llvm/test/Transforms/InstCombine/lshr-and-negC-icmpeq-zero.ll b/llvm/test/Transforms/InstCombine/lshr-and-negC-icmpeq-zero.ll
index 5d058b20be720..89522a00d7894 100644
--- a/llvm/test/Transforms/InstCombine/lshr-and-negC-icmpeq-zero.ll
+++ b/llvm/test/Transforms/InstCombine/lshr-and-negC-icmpeq-zero.ll
@@ -252,7 +252,7 @@ define i1 @scalar_i32_lshr_and_negC_eq_nonzero(i32 %x, i32 %y) {
define i1 @scalar_i8_lshr_and_negC_eq_not_negatedPowerOf2(i8 %x, i8 %y) {
; CHECK-LABEL: @scalar_i8_lshr_and_negC_eq_not_negatedPowerOf2(
; CHECK-NEXT: [[TMP1:%.*]] = shl i8 -3, [[Y:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP2]], 0
; CHECK-NEXT: ret i1 [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll
index fa92c1c4b3be4..4d7a0a28c3cea 100644
--- a/llvm/test/Transforms/InstCombine/lshr.ll
+++ b/llvm/test/Transforms/InstCombine/lshr.ll
@@ -1284,7 +1284,7 @@ define i2 @bool_add_lshr(i1 %a, i1 %b) {
define i4 @not_bool_add_lshr(i2 %a, i2 %b) {
; CHECK-LABEL: @not_bool_add_lshr(
; CHECK-NEXT: [[TMP1:%.*]] = xor i2 [[A:%.*]], -1
-; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i2 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ugt i2 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i4
; CHECK-NEXT: ret i4 [[LSHR]]
;
diff --git a/llvm/test/Transforms/InstCombine/masked-merge-add.ll b/llvm/test/Transforms/InstCombine/masked-merge-add.ll
index 0484369e99d6a..5ef53ad515013 100644
--- a/llvm/test/Transforms/InstCombine/masked-merge-add.ll
+++ b/llvm/test/Transforms/InstCombine/masked-merge-add.ll
@@ -20,7 +20,7 @@ define i32 @p(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]]
; CHECK-NEXT: ret i32 [[RET]]
;
@@ -35,7 +35,7 @@ define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> noundef %m) {
; CHECK-LABEL: @p_splatvec(
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[M]], <i32 -1, i32 -1>
-; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]]
; CHECK-NEXT: ret <2 x i32> [[RET]]
;
@@ -65,7 +65,7 @@ define <3 x i32> @p_vec_poison(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m)
; CHECK-LABEL: @p_vec_poison(
; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], <i32 -1, i32 poison, i32 -1>
-; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint <3 x i32> [[AND]], [[AND1]]
; CHECK-NEXT: ret <3 x i32> [[RET]]
;
@@ -199,7 +199,7 @@ define i32 @p_commutative0(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p_commutative0(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]]
; CHECK-NEXT: ret i32 [[RET]]
;
@@ -231,7 +231,7 @@ define i32 @p_commutative2(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p_commutative2(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]]
; CHECK-NEXT: ret i32 [[RET]]
;
@@ -263,7 +263,7 @@ define i32 @p_commutative4(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p_commutative4(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]]
; CHECK-NEXT: ret i32 [[RET]]
;
@@ -333,7 +333,7 @@ define i32 @n0_oneuse(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @n0_oneuse(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]]
; CHECK-NEXT: call void @use32(i32 [[AND]])
; CHECK-NEXT: call void @use32(i32 [[NEG]])
@@ -390,7 +390,7 @@ define i32 @n2_badmask(i32 %x, i32 %y, i32 %m1, i32 %m2) {
; CHECK-LABEL: @n2_badmask(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M1:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M2:%.*]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = add i32 [[AND]], [[AND1]]
; CHECK-NEXT: ret i32 [[RET]]
;
diff --git a/llvm/test/Transforms/InstCombine/masked-merge-and-of-ors.ll b/llvm/test/Transforms/InstCombine/masked-merge-and-of-ors.ll
index dc76743c565ed..639478dfcc6fe 100644
--- a/llvm/test/Transforms/InstCombine/masked-merge-and-of-ors.ll
+++ b/llvm/test/Transforms/InstCombine/masked-merge-and-of-ors.ll
@@ -17,7 +17,7 @@
define i32 @p(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @p(
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]]
; CHECK-NEXT: ret i32 [[RET]]
@@ -32,7 +32,7 @@ define i32 @p(i32 %x, i32 %y, i32 %m) {
define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) {
; CHECK-LABEL: @p_splatvec(
; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[M:%.*]], <i32 -1, i32 -1>
-; CHECK-NEXT: [[OR:%.*]] = or <2 x i32> [[NEG]], [[X:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or <2 x i32> [[X:%.*]], [[NEG]]
; CHECK-NEXT: [[OR1:%.*]] = or <2 x i32> [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and <2 x i32> [[OR]], [[OR1]]
; CHECK-NEXT: ret <2 x i32> [[RET]]
@@ -125,7 +125,7 @@ declare i32 @gen32()
define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @p_commutative0(
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]]
; CHECK-NEXT: ret i32 [[RET]]
@@ -141,8 +141,8 @@ define i32 @p_commutative1(i32 %x, i32 %m) {
; CHECK-LABEL: @p_commutative1(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y]], [[M]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[M]], [[Y]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]]
; CHECK-NEXT: ret i32 [[RET]]
;
@@ -157,7 +157,7 @@ define i32 @p_commutative1(i32 %x, i32 %m) {
define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @p_commutative2(
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR1]], [[OR]]
; CHECK-NEXT: ret i32 [[RET]]
@@ -173,8 +173,8 @@ define i32 @p_commutative3(i32 %x, i32 %m) {
; CHECK-LABEL: @p_commutative3(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y]], [[M]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[M]], [[Y]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]]
; CHECK-NEXT: ret i32 [[RET]]
;
@@ -189,7 +189,7 @@ define i32 @p_commutative3(i32 %x, i32 %m) {
define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @p_commutative4(
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR1]], [[OR]]
; CHECK-NEXT: ret i32 [[RET]]
@@ -205,8 +205,8 @@ define i32 @p_commutative5(i32 %x, i32 %m) {
; CHECK-LABEL: @p_commutative5(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y]], [[M]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[M]], [[Y]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR1]], [[OR]]
; CHECK-NEXT: ret i32 [[RET]]
;
@@ -222,8 +222,8 @@ define i32 @p_commutative6(i32 %x, i32 %m) {
; CHECK-LABEL: @p_commutative6(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y]], [[M]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[M]], [[Y]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR1]], [[OR]]
; CHECK-NEXT: ret i32 [[RET]]
;
@@ -259,7 +259,7 @@ declare void @use32(i32)
define i32 @n0_oneuse_of_neg_is_ok_0(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @n0_oneuse_of_neg_is_ok_0(
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]]
; CHECK-NEXT: call void @use32(i32 [[NEG]])
@@ -276,7 +276,7 @@ define i32 @n0_oneuse_of_neg_is_ok_0(i32 %x, i32 %y, i32 %m) {
define i32 @n0_oneuse_1(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @n0_oneuse_1(
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]]
; CHECK-NEXT: call void @use32(i32 [[OR]])
@@ -293,7 +293,7 @@ define i32 @n0_oneuse_1(i32 %x, i32 %y, i32 %m) {
define i32 @n0_oneuse_2(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @n0_oneuse_2(
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]]
; CHECK-NEXT: call void @use32(i32 [[OR1]])
@@ -310,7 +310,7 @@ define i32 @n0_oneuse_2(i32 %x, i32 %y, i32 %m) {
define i32 @n0_oneuse_3(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @n0_oneuse_3(
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]]
; CHECK-NEXT: call void @use32(i32 [[NEG]])
@@ -329,7 +329,7 @@ define i32 @n0_oneuse_3(i32 %x, i32 %y, i32 %m) {
define i32 @n0_oneuse_4(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @n0_oneuse_4(
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]]
; CHECK-NEXT: call void @use32(i32 [[NEG]])
@@ -348,7 +348,7 @@ define i32 @n0_oneuse_4(i32 %x, i32 %y, i32 %m) {
define i32 @n0_oneuse_5(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @n0_oneuse_5(
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]]
; CHECK-NEXT: call void @use32(i32 [[NEG]])
@@ -369,7 +369,7 @@ define i32 @n0_oneuse_5(i32 %x, i32 %y, i32 %m) {
define i32 @n0_oneuse_6(i32 %x, i32 %y, i32 %m) {
; CHECK-LABEL: @n0_oneuse_6(
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]]
; CHECK-NEXT: call void @use32(i32 [[OR]])
@@ -456,7 +456,7 @@ define i32 @n1_badxor(i32 %x, i32 %y, i32 %m) {
define i32 @n2_badmask(i32 %x, i32 %y, i32 %m1, i32 %m2) {
; CHECK-LABEL: @n2_badmask(
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M2:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[M1:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]]
; CHECK-NEXT: ret i32 [[RET]]
diff --git a/llvm/test/Transforms/InstCombine/masked-merge-or.ll b/llvm/test/Transforms/InstCombine/masked-merge-or.ll
index 0531a532fc7e0..dd2ac6dfe5109 100644
--- a/llvm/test/Transforms/InstCombine/masked-merge-or.ll
+++ b/llvm/test/Transforms/InstCombine/masked-merge-or.ll
@@ -20,7 +20,7 @@ define i32 @p(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]]
; CHECK-NEXT: ret i32 [[RET]]
;
@@ -35,7 +35,7 @@ define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> noundef %m) {
; CHECK-LABEL: @p_splatvec(
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[M]], <i32 -1, i32 -1>
-; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]]
; CHECK-NEXT: ret <2 x i32> [[RET]]
;
@@ -65,7 +65,7 @@ define <3 x i32> @p_vec_poison(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m)
; CHECK-LABEL: @p_vec_poison(
; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], <i32 -1, i32 poison, i32 -1>
-; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint <3 x i32> [[AND]], [[AND1]]
; CHECK-NEXT: ret <3 x i32> [[RET]]
;
@@ -199,7 +199,7 @@ define i32 @p_commutative0(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p_commutative0(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]]
; CHECK-NEXT: ret i32 [[RET]]
;
@@ -231,7 +231,7 @@ define i32 @p_commutative2(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p_commutative2(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]]
; CHECK-NEXT: ret i32 [[RET]]
;
@@ -263,7 +263,7 @@ define i32 @p_commutative4(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p_commutative4(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]]
; CHECK-NEXT: ret i32 [[RET]]
;
@@ -333,7 +333,7 @@ define i32 @n0_oneuse(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @n0_oneuse(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]]
; CHECK-NEXT: call void @use32(i32 [[AND]])
; CHECK-NEXT: call void @use32(i32 [[NEG]])
@@ -390,7 +390,7 @@ define i32 @n2_badmask(i32 %x, i32 %y, i32 %m1, i32 %m2) {
; CHECK-LABEL: @n2_badmask(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M1:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M2:%.*]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]]
; CHECK-NEXT: ret i32 [[RET]]
;
diff --git a/llvm/test/Transforms/InstCombine/masked-merge-xor.ll b/llvm/test/Transforms/InstCombine/masked-merge-xor.ll
index 74cc7625aebff..7ed1f3fdfdab6 100644
--- a/llvm/test/Transforms/InstCombine/masked-merge-xor.ll
+++ b/llvm/test/Transforms/InstCombine/masked-merge-xor.ll
@@ -20,7 +20,7 @@ define i32 @p(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]]
; CHECK-NEXT: ret i32 [[RET]]
;
@@ -35,7 +35,7 @@ define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> noundef %m) {
; CHECK-LABEL: @p_splatvec(
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[M]], <i32 -1, i32 -1>
-; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]]
; CHECK-NEXT: ret <2 x i32> [[RET]]
;
@@ -65,7 +65,7 @@ define <3 x i32> @p_vec_poison(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m)
; CHECK-LABEL: @p_vec_poison(
; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], <i32 -1, i32 poison, i32 -1>
-; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint <3 x i32> [[AND]], [[AND1]]
; CHECK-NEXT: ret <3 x i32> [[RET]]
;
@@ -84,8 +84,8 @@ define i32 @p_constmask(i32 %x, i32 %y) {
; CHECK-LABEL: @p_constmask(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 65280
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281
-; CHECK-NEXT: [[RET1:%.*]] = or disjoint i32 [[AND]], [[AND1]]
-; CHECK-NEXT: ret i32 [[RET1]]
+; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]]
+; CHECK-NEXT: ret i32 [[RET]]
;
%and = and i32 %x, 65280
%and1 = and i32 %y, -65281
@@ -97,8 +97,8 @@ define <2 x i32> @p_constmask_splatvec(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @p_constmask_splatvec(
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], <i32 65280, i32 65280>
; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[Y:%.*]], <i32 -65281, i32 -65281>
-; CHECK-NEXT: [[RET1:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]]
-; CHECK-NEXT: ret <2 x i32> [[RET1]]
+; CHECK-NEXT: [[RET:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]]
+; CHECK-NEXT: ret <2 x i32> [[RET]]
;
%and = and <2 x i32> %x, <i32 65280, i32 65280>
%and1 = and <2 x i32> %y, <i32 -65281, i32 -65281>
@@ -140,8 +140,8 @@ define i32 @p_constmask2(i32 %x, i32 %y) {
; CHECK-LABEL: @p_constmask2(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 61440
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281
-; CHECK-NEXT: [[RET1:%.*]] = or disjoint i32 [[AND]], [[AND1]]
-; CHECK-NEXT: ret i32 [[RET1]]
+; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]]
+; CHECK-NEXT: ret i32 [[RET]]
;
%and = and i32 %x, 61440
%and1 = and i32 %y, -65281
@@ -153,8 +153,8 @@ define <2 x i32> @p_constmask2_splatvec(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @p_constmask2_splatvec(
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], <i32 61440, i32 61440>
; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[Y:%.*]], <i32 -65281, i32 -65281>
-; CHECK-NEXT: [[RET1:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]]
-; CHECK-NEXT: ret <2 x i32> [[RET1]]
+; CHECK-NEXT: [[RET:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]]
+; CHECK-NEXT: ret <2 x i32> [[RET]]
;
%and = and <2 x i32> %x, <i32 61440, i32 61440>
%and1 = and <2 x i32> %y, <i32 -65281, i32 -65281>
@@ -199,7 +199,7 @@ define i32 @p_commutative0(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p_commutative0(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]]
; CHECK-NEXT: ret i32 [[RET]]
;
@@ -231,7 +231,7 @@ define i32 @p_commutative2(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p_commutative2(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]]
; CHECK-NEXT: ret i32 [[RET]]
;
@@ -263,7 +263,7 @@ define i32 @p_commutative4(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @p_commutative4(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]]
; CHECK-NEXT: ret i32 [[RET]]
;
@@ -312,8 +312,8 @@ define i32 @p_constmask_commutative(i32 %x, i32 %y) {
; CHECK-LABEL: @p_constmask_commutative(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 65280
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281
-; CHECK-NEXT: [[RET1:%.*]] = or disjoint i32 [[AND1]], [[AND]]
-; CHECK-NEXT: ret i32 [[RET1]]
+; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]]
+; CHECK-NEXT: ret i32 [[RET]]
;
%and = and i32 %x, 65280
%and1 = and i32 %y, -65281
@@ -333,7 +333,7 @@ define i32 @n0_oneuse(i32 %x, i32 %y, i32 noundef %m) {
; CHECK-LABEL: @n0_oneuse(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]]
; CHECK-NEXT: call void @use32(i32 [[AND]])
; CHECK-NEXT: call void @use32(i32 [[NEG]])
@@ -354,10 +354,10 @@ define i32 @n0_constmask_oneuse(i32 %x, i32 %y) {
; CHECK-LABEL: @n0_constmask_oneuse(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 65280
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281
-; CHECK-NEXT: [[RET1:%.*]] = or disjoint i32 [[AND]], [[AND1]]
+; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]]
; CHECK-NEXT: call void @use32(i32 [[AND]])
; CHECK-NEXT: call void @use32(i32 [[AND1]])
-; CHECK-NEXT: ret i32 [[RET1]]
+; CHECK-NEXT: ret i32 [[RET]]
;
%and = and i32 %x, 65280
%and1 = and i32 %y, -65281
@@ -390,7 +390,7 @@ define i32 @n2_badmask(i32 %x, i32 %y, i32 %m1, i32 %m2) {
; CHECK-LABEL: @n2_badmask(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M1:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M2:%.*]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[AND]], [[AND1]]
; CHECK-NEXT: ret i32 [[RET]]
;
diff --git a/llvm/test/Transforms/InstCombine/minmax-fold.ll b/llvm/test/Transforms/InstCombine/minmax-fold.ll
index 3e870c695cf1a..26cd4996e687d 100644
--- a/llvm/test/Transforms/InstCombine/minmax-fold.ll
+++ b/llvm/test/Transforms/InstCombine/minmax-fold.ll
@@ -99,7 +99,7 @@ define i32 @t8(i64 %a, i32 %b) {
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.smin.i64(i64 [[A:%.*]], i64 -32767)
; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i32 [[B:%.*]], 42
-; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP2]], [[B]]
+; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i32 [[B]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP3]], i1 true, i1 [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i32
; CHECK-NEXT: ret i32 [[TMP6]]
@@ -1360,11 +1360,11 @@ define i8 @PR14613_smax(i8 %x) {
define i8 @PR46271(<2 x i8> %x) {
; CHECK-LABEL: @PR46271(
-; CHECK-NEXT: [[TMP3:%.*]] = xor <2 x i8> [[X:%.*]], <i8 poison, i8 -1>
+; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[X:%.*]], <i8 poison, i8 -1>
; CHECK-NEXT: [[A_INV:%.*]] = icmp slt <2 x i8> [[X]], zeroinitializer
-; CHECK-NEXT: [[TMP1:%.*]] = select <2 x i1> [[A_INV]], <2 x i8> <i8 poison, i8 0>, <2 x i8> [[TMP3]]
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i8> [[TMP1]], i64 1
-; CHECK-NEXT: ret i8 [[TMP2]]
+; CHECK-NEXT: [[NOT:%.*]] = select <2 x i1> [[A_INV]], <2 x i8> <i8 poison, i8 0>, <2 x i8> [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = extractelement <2 x i8> [[NOT]], i64 1
+; CHECK-NEXT: ret i8 [[R]]
;
%a = icmp sgt <2 x i8> %x, <i8 -1, i8 -1>
%b = select <2 x i1> %a, <2 x i8> %x, <2 x i8> <i8 poison, i8 -1>
diff --git a/llvm/test/Transforms/InstCombine/minmax-of-xor-x.ll b/llvm/test/Transforms/InstCombine/minmax-of-xor-x.ll
index b8430da451f9a..8b896632b8adc 100644
--- a/llvm/test/Transforms/InstCombine/minmax-of-xor-x.ll
+++ b/llvm/test/Transforms/InstCombine/minmax-of-xor-x.ll
@@ -76,8 +76,8 @@ define i8 @smin_xor_Cpow2_neg(i8 %x) {
define i8 @umax_xor_pow2(i8 %x, i8 %y) {
; CHECK-LABEL: @umax_xor_pow2(
; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[Y:%.*]]
-; CHECK-NEXT: [[YP2:%.*]] = and i8 [[NY]], [[Y]]
-; CHECK-NEXT: [[R:%.*]] = or i8 [[YP2]], [[X:%.*]]
+; CHECK-NEXT: [[YP2:%.*]] = and i8 [[Y]], [[NY]]
+; CHECK-NEXT: [[R:%.*]] = or i8 [[X:%.*]], [[YP2]]
; CHECK-NEXT: ret i8 [[R]]
;
%ny = sub i8 0, %y
@@ -90,9 +90,9 @@ define i8 @umax_xor_pow2(i8 %x, i8 %y) {
define <2 x i8> @umin_xor_pow2(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @umin_xor_pow2(
; CHECK-NEXT: [[NY:%.*]] = sub <2 x i8> zeroinitializer, [[Y:%.*]]
-; CHECK-NEXT: [[YP2:%.*]] = and <2 x i8> [[NY]], [[Y]]
+; CHECK-NEXT: [[YP2:%.*]] = and <2 x i8> [[Y]], [[NY]]
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[YP2]], <i8 -1, i8 -1>
-; CHECK-NEXT: [[R:%.*]] = and <2 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = and <2 x i8> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i8> [[R]]
;
%ny = sub <2 x i8> <i8 0, i8 0>, %y
@@ -105,8 +105,8 @@ define <2 x i8> @umin_xor_pow2(<2 x i8> %x, <2 x i8> %y) {
define i8 @smax_xor_pow2_unk(i8 %x, i8 %y) {
; CHECK-LABEL: @smax_xor_pow2_unk(
; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[Y:%.*]]
-; CHECK-NEXT: [[YP2:%.*]] = and i8 [[NY]], [[Y]]
-; CHECK-NEXT: [[X_XOR:%.*]] = xor i8 [[YP2]], [[X:%.*]]
+; CHECK-NEXT: [[YP2:%.*]] = and i8 [[Y]], [[NY]]
+; CHECK-NEXT: [[X_XOR:%.*]] = xor i8 [[X:%.*]], [[YP2]]
; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.smax.i8(i8 [[X]], i8 [[X_XOR]])
; CHECK-NEXT: ret i8 [[R]]
;
@@ -120,8 +120,8 @@ define i8 @smax_xor_pow2_unk(i8 %x, i8 %y) {
define <2 x i8> @smin_xor_pow2_unk(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @smin_xor_pow2_unk(
; CHECK-NEXT: [[NY:%.*]] = sub <2 x i8> zeroinitializer, [[Y:%.*]]
-; CHECK-NEXT: [[YP2:%.*]] = and <2 x i8> [[NY]], [[Y]]
-; CHECK-NEXT: [[X_XOR:%.*]] = xor <2 x i8> [[YP2]], [[X:%.*]]
+; CHECK-NEXT: [[YP2:%.*]] = and <2 x i8> [[Y]], [[NY]]
+; CHECK-NEXT: [[X_XOR:%.*]] = xor <2 x i8> [[X:%.*]], [[YP2]]
; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.smin.v2i8(<2 x i8> [[X]], <2 x i8> [[X_XOR]])
; CHECK-NEXT: ret <2 x i8> [[R]]
;
@@ -159,12 +159,12 @@ pos:
define i8 @smin_xor_pow2_pos(i8 %x, i8 %y) {
; CHECK-LABEL: @smin_xor_pow2_pos(
; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[Y:%.*]]
-; CHECK-NEXT: [[YP2:%.*]] = and i8 [[NY]], [[Y]]
+; CHECK-NEXT: [[YP2:%.*]] = and i8 [[Y]], [[NY]]
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[YP2]], 0
; CHECK-NEXT: br i1 [[CMP]], label [[NEG:%.*]], label [[POS:%.*]]
; CHECK: neg:
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[YP2]], -1
-; CHECK-NEXT: [[R:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = and i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i8 [[R]]
; CHECK: pos:
; CHECK-NEXT: call void @barrier()
diff --git a/llvm/test/Transforms/InstCombine/mul-masked-bits.ll b/llvm/test/Transforms/InstCombine/mul-masked-bits.ll
index e940ae3fec163..fd8ad88764f59 100644
--- a/llvm/test/Transforms/InstCombine/mul-masked-bits.ll
+++ b/llvm/test/Transforms/InstCombine/mul-masked-bits.ll
@@ -182,7 +182,7 @@ define i33 @squared_demanded_3_low_bits(i33 %x) {
define i64 @scalar_mul_bit_x0_y0(i64 %x, i64 %y) {
; CHECK-LABEL: @scalar_mul_bit_x0_y0(
; CHECK-NEXT: [[AND2:%.*]] = and i64 [[Y:%.*]], 1
-; CHECK-NEXT: [[MUL:%.*]] = and i64 [[AND2]], [[X:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = and i64 [[X:%.*]], [[AND2]]
; CHECK-NEXT: ret i64 [[MUL]]
;
%and1 = and i64 %x, 1
@@ -199,7 +199,7 @@ define i64 @scalar_mul_bit_x0_y0_uses(i64 %x, i64 %y) {
; CHECK-NEXT: call void @use(i64 [[AND1]])
; CHECK-NEXT: [[AND2:%.*]] = and i64 [[Y:%.*]], 1
; CHECK-NEXT: call void @use(i64 [[AND2]])
-; CHECK-NEXT: [[MUL:%.*]] = and i64 [[AND2]], [[X]]
+; CHECK-NEXT: [[MUL:%.*]] = and i64 [[X]], [[AND2]]
; CHECK-NEXT: ret i64 [[MUL]]
;
%and1 = and i64 %x, 1
@@ -241,7 +241,7 @@ define i64 @scalar_mul_bit_x0_yC(i64 %x, i64 %y, i64 %c) {
define <2 x i64> @vector_mul_bit_x0_y0(<2 x i64> %x, <2 x i64> %y) {
; CHECK-LABEL: @vector_mul_bit_x0_y0(
; CHECK-NEXT: [[AND2:%.*]] = and <2 x i64> [[Y:%.*]], <i64 1, i64 1>
-; CHECK-NEXT: [[MUL:%.*]] = and <2 x i64> [[AND2]], [[X:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = and <2 x i64> [[X:%.*]], [[AND2]]
; CHECK-NEXT: ret <2 x i64> [[MUL]]
;
%and1 = and <2 x i64> %x, <i64 1, i64 1>
diff --git a/llvm/test/Transforms/InstCombine/mul-pow2.ll b/llvm/test/Transforms/InstCombine/mul-pow2.ll
index c16fd710f309b..bc172f0152fe5 100644
--- a/llvm/test/Transforms/InstCombine/mul-pow2.ll
+++ b/llvm/test/Transforms/InstCombine/mul-pow2.ll
@@ -107,7 +107,7 @@ define <2 x i8> @mul_x_selectp2_vec(<2 x i8> %xx, i1 %c) {
define i8 @shl_add_log_may_cause_poison_pr62175_fail(i8 %x, i8 %y) {
; CHECK-LABEL: @shl_add_log_may_cause_poison_pr62175_fail(
; CHECK-NEXT: [[SHL:%.*]] = shl i8 4, [[X:%.*]]
-; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[SHL]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[Y:%.*]], [[SHL]]
; CHECK-NEXT: ret i8 [[MUL]]
;
%shl = shl i8 4, %x
diff --git a/llvm/test/Transforms/InstCombine/mul.ll b/llvm/test/Transforms/InstCombine/mul.ll
index 4fb3c0b1ad491..c2b9cf37c217f 100644
--- a/llvm/test/Transforms/InstCombine/mul.ll
+++ b/llvm/test/Transforms/InstCombine/mul.ll
@@ -289,7 +289,7 @@ define i32 @shl1_decrement_use(i32 %x, i32 %y) {
; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i32 -1, [[X:%.*]]
; CHECK-NEXT: [[X1:%.*]] = xor i32 [[NOTMASK]], -1
; CHECK-NEXT: call void @use32(i32 [[X1]])
-; CHECK-NEXT: [[M:%.*]] = mul i32 [[X1]], [[Y:%.*]]
+; CHECK-NEXT: [[M:%.*]] = mul i32 [[Y:%.*]], [[X1]]
; CHECK-NEXT: ret i32 [[M]]
;
%pow2x = shl i32 1, %x
@@ -1411,7 +1411,7 @@ define i32 @mul_nsw_shl_nsw_neg_onearg(i32 %x) {
define i32 @mul_use_mul_neg(i32 %x,i32 %y) {
; CHECK-LABEL: @mul_use_mul_neg(
; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]]
-; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[Y:%.*]], [[NEG]]
; CHECK-NEXT: call void @use32(i32 [[MUL]])
; CHECK-NEXT: [[MUL2:%.*]] = mul i32 [[MUL]], [[NEG]]
; CHECK-NEXT: ret i32 [[MUL2]]
@@ -2119,7 +2119,7 @@ define i32 @test_mul_sext_bool_commuted(i1 %x, i32 %y) {
define i32 @test_mul_sext_nonbool(i2 %x, i32 %y) {
; CHECK-LABEL: @test_mul_sext_nonbool(
; CHECK-NEXT: [[SEXT:%.*]] = sext i2 [[X:%.*]] to i32
-; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[SEXT]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[Y:%.*]], [[SEXT]]
; CHECK-NEXT: ret i32 [[MUL]]
;
%sext = sext i2 %x to i32
@@ -2131,7 +2131,7 @@ define i32 @test_mul_sext_multiuse(i1 %x, i32 %y) {
; CHECK-LABEL: @test_mul_sext_multiuse(
; CHECK-NEXT: [[SEXT:%.*]] = sext i1 [[X:%.*]] to i32
; CHECK-NEXT: tail call void @use(i32 [[SEXT]])
-; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[SEXT]], [[Y:%.*]]
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[Y:%.*]], [[SEXT]]
; CHECK-NEXT: ret i32 [[MUL]]
;
%sext = sext i1 %x to i32
diff --git a/llvm/test/Transforms/InstCombine/mul_fold.ll b/llvm/test/Transforms/InstCombine/mul_fold.ll
index a1fdec3c68cc4..e4a21db8a6ece 100644
--- a/llvm/test/Transforms/InstCombine/mul_fold.ll
+++ b/llvm/test/Transforms/InstCombine/mul_fold.ll
@@ -55,7 +55,7 @@ define i8 @mul8_low_A0_B1(i8 %p, i8 %in1) {
define i8 @mul8_low_A0_B2(i8 %in0, i8 %p) {
; CHECK-LABEL: @mul8_low_A0_B2(
; CHECK-NEXT: [[IN1:%.*]] = call i8 @use8(i8 [[P:%.*]])
-; CHECK-NEXT: [[RETLO:%.*]] = mul i8 [[IN1]], [[IN0:%.*]]
+; CHECK-NEXT: [[RETLO:%.*]] = mul i8 [[IN0:%.*]], [[IN1]]
; CHECK-NEXT: ret i8 [[RETLO]]
;
@@ -262,7 +262,7 @@ define i32 @mul32_low_A2_B2(i32 %in0, i32 %p) {
; CHECK-NEXT: [[IN1HI:%.*]] = lshr i32 [[IN1]], 16
; CHECK-NEXT: [[M10:%.*]] = mul nuw i32 [[IN0LO]], [[IN1HI]]
; CHECK-NEXT: call void @use32(i32 [[M10]])
-; CHECK-NEXT: [[RETLO:%.*]] = mul i32 [[IN1]], [[IN0]]
+; CHECK-NEXT: [[RETLO:%.*]] = mul i32 [[IN0]], [[IN1]]
; CHECK-NEXT: ret i32 [[RETLO]]
;
%in1 = call i32 @use32(i32 %p) ; thwart complexity-based canonicalization
@@ -287,7 +287,7 @@ define i32 @mul32_low_A2_B3(i32 %in0, i32 %p) {
; CHECK-NEXT: [[IN1HI:%.*]] = lshr i32 [[IN1]], 16
; CHECK-NEXT: [[M10:%.*]] = mul nuw i32 [[IN1HI]], [[IN0LO]]
; CHECK-NEXT: call void @use32(i32 [[M10]])
-; CHECK-NEXT: [[RETLO:%.*]] = mul i32 [[IN1]], [[IN0]]
+; CHECK-NEXT: [[RETLO:%.*]] = mul i32 [[IN0]], [[IN1]]
; CHECK-NEXT: ret i32 [[RETLO]]
;
%in1 = call i32 @use32(i32 %p) ; thwart complexity-based canonicalization
@@ -639,7 +639,7 @@ define i64 @mul64_low_no_and(i64 %in0, i64 %in1) {
; CHECK-NEXT: [[IN0HI:%.*]] = lshr i64 [[IN0:%.*]], 32
; CHECK-NEXT: [[IN1HI:%.*]] = lshr i64 [[IN1:%.*]], 32
; CHECK-NEXT: [[M10:%.*]] = mul i64 [[IN1HI]], [[IN0]]
-; CHECK-NEXT: [[M01:%.*]] = mul i64 [[IN0HI]], [[IN1]]
+; CHECK-NEXT: [[M01:%.*]] = mul i64 [[IN1]], [[IN0HI]]
; CHECK-NEXT: [[M00:%.*]] = mul i64 [[IN1]], [[IN0]]
; CHECK-NEXT: [[ADDC:%.*]] = add i64 [[M10]], [[M01]]
; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[ADDC]], 32
@@ -719,7 +719,7 @@ define i32 @mul32_low_extra_shl_use(i32 %in0, i32 %in1) {
; CHECK-NEXT: [[IN0HI:%.*]] = lshr i32 [[IN0:%.*]], 16
; CHECK-NEXT: [[IN1HI:%.*]] = lshr i32 [[IN1:%.*]], 16
; CHECK-NEXT: [[M10:%.*]] = mul i32 [[IN1HI]], [[IN0]]
-; CHECK-NEXT: [[M01:%.*]] = mul i32 [[IN0HI]], [[IN1]]
+; CHECK-NEXT: [[M01:%.*]] = mul i32 [[IN1]], [[IN0HI]]
; CHECK-NEXT: [[ADDC:%.*]] = add i32 [[M10]], [[M01]]
; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[ADDC]], 16
; CHECK-NEXT: call void @use32(i32 [[SHL]])
@@ -738,4 +738,4 @@ define i32 @mul32_low_extra_shl_use(i32 %in0, i32 %in1) {
call void @use32(i32 %shl)
%retLo = add i32 %shl, %m00
ret i32 %retLo
-}
\ No newline at end of file
+}
diff --git a/llvm/test/Transforms/InstCombine/mul_full_64.ll b/llvm/test/Transforms/InstCombine/mul_full_64.ll
index 7cddb63b9ba63..1bec5bb927604 100644
--- a/llvm/test/Transforms/InstCombine/mul_full_64.ll
+++ b/llvm/test/Transforms/InstCombine/mul_full_64.ll
@@ -459,7 +459,7 @@ define i64 @mullo(i64 %x, i64 %y) {
; CHECK-NEXT: [[YL:%.*]] = and i64 [[Y:%.*]], 4294967295
; CHECK-NEXT: [[YH:%.*]] = lshr i64 [[Y]], 32
; CHECK-NEXT: [[T0:%.*]] = mul nuw i64 [[YL]], [[XL]]
-; CHECK-NEXT: [[T1:%.*]] = mul i64 [[XH]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = mul i64 [[Y]], [[XH]]
; CHECK-NEXT: [[T2:%.*]] = mul i64 [[YH]], [[X]]
; CHECK-NEXT: [[T0L:%.*]] = and i64 [[T0]], 4294967295
; CHECK-NEXT: [[T0H:%.*]] = lshr i64 [[T0]], 32
@@ -526,7 +526,7 @@ define i64 @mullo_duplicate(i64 %x, i64 %y) {
; CHECK-NEXT: [[YL:%.*]] = and i64 [[Y]], 4294967295
; CHECK-NEXT: [[YH:%.*]] = lshr i64 [[Y]], 32
; CHECK-NEXT: [[T0:%.*]] = mul nuw i64 [[YL]], [[XL]]
-; CHECK-NEXT: [[T1:%.*]] = mul i64 [[XH]], [[Y]]
+; CHECK-NEXT: [[T1:%.*]] = mul i64 [[Y]], [[XH]]
; CHECK-NEXT: [[T2:%.*]] = mul i64 [[YH]], [[X]]
; CHECK-NEXT: [[T0L:%.*]] = and i64 [[T0]], 4294967295
; CHECK-NEXT: [[T0H:%.*]] = lshr i64 [[T0]], 32
diff --git a/llvm/test/Transforms/InstCombine/not-add.ll b/llvm/test/Transforms/InstCombine/not-add.ll
index 9ba37b6bba39e..aef618eeefd20 100644
--- a/llvm/test/Transforms/InstCombine/not-add.ll
+++ b/llvm/test/Transforms/InstCombine/not-add.ll
@@ -42,7 +42,7 @@ define i8 @basic_use_xor(i8 %x, i8 %y) {
define i8 @basic_use_add(i8 %x, i8 %y) {
; CHECK-LABEL: @basic_use_add(
; CHECK-NEXT: [[NOTX:%.*]] = xor i8 [[X:%.*]], -1
-; CHECK-NEXT: [[A:%.*]] = add i8 [[NOTX]], [[Y:%.*]]
+; CHECK-NEXT: [[A:%.*]] = add i8 [[Y:%.*]], [[NOTX]]
; CHECK-NEXT: call void @use(i8 [[A]])
; CHECK-NEXT: [[NOTA:%.*]] = sub i8 [[X]], [[Y]]
; CHECK-NEXT: ret i8 [[NOTA]]
@@ -58,7 +58,7 @@ define i8 @basic_use_both(i8 %x, i8 %y) {
; CHECK-LABEL: @basic_use_both(
; CHECK-NEXT: [[NOTX:%.*]] = xor i8 [[X:%.*]], -1
; CHECK-NEXT: call void @use(i8 [[NOTX]])
-; CHECK-NEXT: [[A:%.*]] = add i8 [[NOTX]], [[Y:%.*]]
+; CHECK-NEXT: [[A:%.*]] = add i8 [[Y:%.*]], [[NOTX]]
; CHECK-NEXT: call void @use(i8 [[A]])
; CHECK-NEXT: [[NOTA:%.*]] = sub i8 [[X]], [[Y]]
; CHECK-NEXT: ret i8 [[NOTA]]
@@ -143,8 +143,8 @@ define i32 @pr50308(i1 %c1, i32 %v1, i32 %v2, i32 %v3) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[C1:%.*]], label [[COND_TRUE:%.*]], label [[COND_END:%.*]]
; CHECK: cond.true:
-; CHECK-NEXT: [[ADD_NOT:%.*]] = sub i32 -2, [[V1:%.*]]
-; CHECK-NEXT: [[ADD1_NEG:%.*]] = xor i32 [[ADD_NOT]], [[V2:%.*]]
+; CHECK-NEXT: [[TMP0:%.*]] = sub i32 -2, [[V1:%.*]]
+; CHECK-NEXT: [[ADD1_NEG:%.*]] = xor i32 [[TMP0]], [[V2:%.*]]
; CHECK-NEXT: br label [[COND_END]]
; CHECK: cond.end:
; CHECK-NEXT: [[COND_NEG:%.*]] = phi i32 [ [[ADD1_NEG]], [[COND_TRUE]] ], [ 0, [[ENTRY:%.*]] ]
diff --git a/llvm/test/Transforms/InstCombine/not.ll b/llvm/test/Transforms/InstCombine/not.ll
index 0c2c6195e3240..3679976d9dc39 100644
--- a/llvm/test/Transforms/InstCombine/not.ll
+++ b/llvm/test/Transforms/InstCombine/not.ll
@@ -442,7 +442,7 @@ define i8 @not_or_neg_use1(i8 %x, i8 %y) {
; CHECK-LABEL: @not_or_neg_use1(
; CHECK-NEXT: [[S:%.*]] = sub i8 0, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[S]])
-; CHECK-NEXT: [[O:%.*]] = or i8 [[S]], [[X:%.*]]
+; CHECK-NEXT: [[O:%.*]] = or i8 [[X:%.*]], [[S]]
; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[O]], -1
; CHECK-NEXT: ret i8 [[NOT]]
;
@@ -458,7 +458,7 @@ define i8 @not_or_neg_use1(i8 %x, i8 %y) {
define i8 @not_or_neg_use2(i8 %x, i8 %y) {
; CHECK-LABEL: @not_or_neg_use2(
; CHECK-NEXT: [[S:%.*]] = sub i8 0, [[Y:%.*]]
-; CHECK-NEXT: [[O:%.*]] = or i8 [[S]], [[X:%.*]]
+; CHECK-NEXT: [[O:%.*]] = or i8 [[X:%.*]], [[S]]
; CHECK-NEXT: call void @use8(i8 [[O]])
; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[O]], -1
; CHECK-NEXT: ret i8 [[NOT]]
@@ -850,7 +850,7 @@ define i32 @test_zext(i32 %a, i32 %b){
; CHECK-LABEL: @test_zext(
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], 0
; CHECK-NEXT: [[SEXT:%.*]] = zext i1 [[CMP]] to i32
-; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SEXT]], [[B:%.*]]
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[B:%.*]], [[SEXT]]
; CHECK-NEXT: [[NOT:%.*]] = xor i32 [[ADD]], -1
; CHECK-NEXT: ret i32 [[NOT]]
;
@@ -864,11 +864,11 @@ define i32 @test_zext(i32 %a, i32 %b){
define void @test_invert_demorgan_or(i32 %a, i32 %b, i1 %cond) {
; CHECK-LABEL: @test_invert_demorgan_or(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 [[B:%.*]], 0
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i32 [[B1:%.*]], 0
-; CHECK-NEXT: [[OR_NOT1:%.*]] = and i1 [[CMP2]], [[CMP3]]
-; CHECK-NEXT: [[MERGE:%.*]] = and i1 [[OR_NOT1]], [[COND:%.*]]
-; CHECK-NEXT: br i1 [[MERGE]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[A:%.*]], 0
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[B:%.*]], 0
+; CHECK-NEXT: [[OR_NOT1:%.*]] = and i1 [[CMP1]], [[CMP2]]
+; CHECK-NEXT: [[MERGE_NOT:%.*]] = and i1 [[OR_NOT1]], [[COND:%.*]]
+; CHECK-NEXT: br i1 [[MERGE_NOT]], label [[IF_ELSE:%.*]], label [[IF_THEN:%.*]]
; CHECK: if.then:
; CHECK-NEXT: call void @f1()
; CHECK-NEXT: unreachable
@@ -897,8 +897,8 @@ define i1 @test_invert_demorgan_or2(i64 %a, i64 %b, i64 %c) {
; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[B:%.*]], 60
; CHECK-NEXT: [[OR1_NOT1:%.*]] = and i1 [[CMP1]], [[CMP2]]
; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i64 [[C:%.*]], 60
-; CHECK-NEXT: [[NOT:%.*]] = and i1 [[OR1_NOT1]], [[CMP3]]
-; CHECK-NEXT: ret i1 [[NOT]]
+; CHECK-NEXT: [[OR2_NOT:%.*]] = and i1 [[OR1_NOT1]], [[CMP3]]
+; CHECK-NEXT: ret i1 [[OR2_NOT]]
;
%cmp1 = icmp ugt i64 %a, 23
%cmp2 = icmp ugt i64 %b, 59
@@ -920,8 +920,8 @@ define i1 @test_invert_demorgan_or3(i32 %a, i32 %b) {
; CHECK-NEXT: [[CMP4:%.*]] = icmp ult i32 [[TMP3]], -196112
; CHECK-NEXT: [[OR1_NOT2:%.*]] = and i1 [[CMP1]], [[CMP2]]
; CHECK-NEXT: [[OR2_NOT1:%.*]] = and i1 [[OR1_NOT2]], [[CMP3]]
-; CHECK-NEXT: [[NOT:%.*]] = and i1 [[OR2_NOT1]], [[CMP4]]
-; CHECK-NEXT: ret i1 [[NOT]]
+; CHECK-NEXT: [[OR3_NOT:%.*]] = and i1 [[OR2_NOT1]], [[CMP4]]
+; CHECK-NEXT: ret i1 [[OR3_NOT]]
;
%cmp1 = icmp eq i32 %a, 178206
%v1 = add i32 %b, -195102
@@ -943,8 +943,8 @@ define i1 @test_invert_demorgan_logical_or(i64 %x, i64 %y) {
; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i64 [[Y:%.*]], 0
; CHECK-NEXT: [[SEL_NOT1:%.*]] = select i1 [[CMP1]], i1 [[CMP2]], i1 false
; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i64 [[X]], 0
-; CHECK-NEXT: [[NOT:%.*]] = and i1 [[CMP3]], [[SEL_NOT1]]
-; CHECK-NEXT: ret i1 [[NOT]]
+; CHECK-NEXT: [[OR_NOT:%.*]] = and i1 [[CMP3]], [[SEL_NOT1]]
+; CHECK-NEXT: ret i1 [[OR_NOT]]
;
%cmp1 = icmp eq i64 %x, 27
%cmp2 = icmp eq i64 %y, 0
@@ -958,11 +958,11 @@ define i1 @test_invert_demorgan_logical_or(i64 %x, i64 %y) {
define i1 @test_invert_demorgan_and(i32 %a, i32 %b, i1 %cond) {
; CHECK-LABEL: @test_invert_demorgan_and(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 [[B:%.*]], 0
-; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i32 [[B1:%.*]], 0
-; CHECK-NEXT: [[AND_NOT1:%.*]] = or i1 [[CMP2]], [[CMP3]]
-; CHECK-NEXT: [[MERGE:%.*]] = or i1 [[AND_NOT1]], [[COND:%.*]]
-; CHECK-NEXT: br i1 [[MERGE]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[A:%.*]], 0
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[B:%.*]], 0
+; CHECK-NEXT: [[AND_NOT1:%.*]] = or i1 [[CMP1]], [[CMP2]]
+; CHECK-NEXT: [[MERGE_NOT:%.*]] = or i1 [[AND_NOT1]], [[COND:%.*]]
+; CHECK-NEXT: br i1 [[MERGE_NOT]], label [[IF_ELSE:%.*]], label [[IF_THEN:%.*]]
; CHECK: if.then:
; CHECK-NEXT: call void @f1()
; CHECK-NEXT: unreachable
@@ -999,9 +999,9 @@ define i64 @test_invert_demorgan_and2(i64 %x) {
define i1 @test_invert_demorgan_and3(i32 %a, i32 %b) {
; CHECK-LABEL: @test_invert_demorgan_and3(
-; CHECK-NEXT: [[ADD:%.*]] = sub i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[ADD]], 4095
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 4095
+; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 4095
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP2]], 4095
; CHECK-NEXT: ret i1 [[CMP]]
;
%not = xor i32 %a, -1
@@ -1017,8 +1017,8 @@ define i1 @test_invert_demorgan_logical_and(i64 %x, i64 %y) {
; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i64 [[Y:%.*]], 0
; CHECK-NEXT: [[SEL_NOT1:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP2]]
; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i64 [[X]], 0
-; CHECK-NEXT: [[NOT:%.*]] = and i1 [[CMP3]], [[SEL_NOT1]]
-; CHECK-NEXT: ret i1 [[NOT]]
+; CHECK-NEXT: [[OR_NOT:%.*]] = and i1 [[CMP3]], [[SEL_NOT1]]
+; CHECK-NEXT: ret i1 [[OR_NOT]]
;
%cmp1 = icmp eq i64 %x, 27
%cmp2 = icmp eq i64 %y, 0
diff --git a/llvm/test/Transforms/InstCombine/onehot_merge.ll b/llvm/test/Transforms/InstCombine/onehot_merge.ll
index 228ad233c9763..d8ef66a4dd781 100644
--- a/llvm/test/Transforms/InstCombine/onehot_merge.ll
+++ b/llvm/test/Transforms/InstCombine/onehot_merge.ll
@@ -48,7 +48,7 @@ define i1 @foo1_and(i32 %k, i32 %c1, i32 %c2) {
; CHECK-NEXT: [[T:%.*]] = shl nuw i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T4:%.*]] = shl nuw i32 1, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T]], [[T4]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -68,7 +68,7 @@ define i1 @foo1_and_logical(i32 %k, i32 %c1, i32 %c2) {
; CHECK-NEXT: [[T4:%.*]] = shl nuw i32 1, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T4]]
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K:%.*]], [[TMP2]]
; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -87,7 +87,7 @@ define <2 x i1> @foo1_and_vector(<2 x i32> %k, <2 x i32> %c1, <2 x i32> %c2) {
; CHECK-NEXT: [[T:%.*]] = shl nuw <2 x i32> <i32 1, i32 1>, [[C1:%.*]]
; CHECK-NEXT: [[T4:%.*]] = shl nuw <2 x i32> <i32 1, i32 1>, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[T]], [[T4]]
-; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[K:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[K:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = icmp ne <2 x i32> [[TMP2]], [[TMP1]]
; CHECK-NEXT: ret <2 x i1> [[OR]]
;
@@ -213,7 +213,7 @@ define i1 @foo1_or(i32 %k, i32 %c1, i32 %c2) {
; CHECK-NEXT: [[T:%.*]] = shl nuw i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T4:%.*]] = shl nuw i32 1, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T]], [[T4]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = icmp eq i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -233,7 +233,7 @@ define i1 @foo1_or_logical(i32 %k, i32 %c1, i32 %c2) {
; CHECK-NEXT: [[T4:%.*]] = shl nuw i32 1, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T4]]
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K:%.*]], [[TMP2]]
; CHECK-NEXT: [[OR:%.*]] = icmp eq i32 [[TMP3]], [[TMP2]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -252,7 +252,7 @@ define <2 x i1> @foo1_or_vector(<2 x i32> %k, <2 x i32> %c1, <2 x i32> %c2) {
; CHECK-NEXT: [[T:%.*]] = shl nuw <2 x i32> <i32 1, i32 1>, [[C1:%.*]]
; CHECK-NEXT: [[T4:%.*]] = shl nuw <2 x i32> <i32 1, i32 1>, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[T]], [[T4]]
-; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[K:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[K:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = icmp eq <2 x i32> [[TMP2]], [[TMP1]]
; CHECK-NEXT: ret <2 x i1> [[OR]]
;
@@ -336,7 +336,7 @@ define i1 @foo1_and_signbit_lshr(i32 %k, i32 %c1, i32 %c2) {
; CHECK-NEXT: [[T:%.*]] = shl nuw i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T4:%.*]] = lshr exact i32 -2147483648, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T]], [[T4]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -356,7 +356,7 @@ define i1 @foo1_and_signbit_lshr_logical(i32 %k, i32 %c1, i32 %c2) {
; CHECK-NEXT: [[T4:%.*]] = lshr exact i32 -2147483648, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T4]]
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K:%.*]], [[TMP2]]
; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -375,7 +375,7 @@ define <2 x i1> @foo1_and_signbit_lshr_vector(<2 x i32> %k, <2 x i32> %c1, <2 x
; CHECK-NEXT: [[T:%.*]] = shl nuw <2 x i32> <i32 1, i32 1>, [[C1:%.*]]
; CHECK-NEXT: [[T4:%.*]] = lshr exact <2 x i32> <i32 -2147483648, i32 -2147483648>, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[T]], [[T4]]
-; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[K:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[K:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = icmp ne <2 x i32> [[TMP2]], [[TMP1]]
; CHECK-NEXT: ret <2 x i1> [[OR]]
;
@@ -394,7 +394,7 @@ define i1 @foo1_or_signbit_lshr(i32 %k, i32 %c1, i32 %c2) {
; CHECK-NEXT: [[T:%.*]] = shl nuw i32 1, [[C1:%.*]]
; CHECK-NEXT: [[T4:%.*]] = lshr exact i32 -2147483648, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T]], [[T4]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = icmp eq i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -414,7 +414,7 @@ define i1 @foo1_or_signbit_lshr_logical(i32 %k, i32 %c1, i32 %c2) {
; CHECK-NEXT: [[T4:%.*]] = lshr exact i32 -2147483648, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T4]]
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K:%.*]], [[TMP2]]
; CHECK-NEXT: [[OR:%.*]] = icmp eq i32 [[TMP3]], [[TMP2]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -433,7 +433,7 @@ define <2 x i1> @foo1_or_signbit_lshr_vector(<2 x i32> %k, <2 x i32> %c1, <2 x i
; CHECK-NEXT: [[T:%.*]] = shl nuw <2 x i32> <i32 1, i32 1>, [[C1:%.*]]
; CHECK-NEXT: [[T4:%.*]] = lshr exact <2 x i32> <i32 -2147483648, i32 -2147483648>, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[T]], [[T4]]
-; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[K:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[K:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = icmp eq <2 x i32> [[TMP2]], [[TMP1]]
; CHECK-NEXT: ret <2 x i1> [[OR]]
;
@@ -618,7 +618,7 @@ define i1 @foo1_and_extra_use_shl(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-NEXT: store i32 [[T0]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[T1:%.*]] = shl nuw i32 1, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -640,7 +640,7 @@ define i1 @foo1_and_extra_use_shl_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-NEXT: [[T1:%.*]] = shl nuw i32 1, [[C2:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T1]]
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K:%.*]], [[TMP2]]
; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -663,7 +663,7 @@ define i1 @foo1_and_extra_use_and(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-NEXT: [[T2:%.*]] = and i32 [[T0]], [[K:%.*]]
; CHECK-NEXT: store i32 [[T2]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -686,7 +686,7 @@ define i1 @foo1_and_extra_use_and_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-NEXT: store i32 [[T2]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T1]]
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K]], [[TMP2]]
; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -710,7 +710,7 @@ define i1 @foo1_and_extra_use_cmp(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-NEXT: [[T3:%.*]] = icmp eq i32 [[T2]], 0
; CHECK-NEXT: store i1 [[T3]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -734,7 +734,7 @@ define i1 @foo1_and_extra_use_cmp_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-NEXT: store i1 [[T3]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T1]]
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K]], [[TMP2]]
; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -756,7 +756,7 @@ define i1 @foo1_and_extra_use_shl2(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-NEXT: [[T1:%.*]] = shl nuw i32 1, [[C2:%.*]]
; CHECK-NEXT: store i32 [[T1]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K:%.*]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -778,7 +778,7 @@ define i1 @foo1_and_extra_use_shl2_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T1]]
; CHECK-NEXT: store i32 [[TMP1]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K:%.*]], [[TMP2]]
; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -801,7 +801,7 @@ define i1 @foo1_and_extra_use_and2(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-NEXT: [[T4:%.*]] = and i32 [[T1]], [[K:%.*]]
; CHECK-NEXT: store i32 [[T4]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -824,7 +824,7 @@ define i1 @foo1_and_extra_use_and2_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-NEXT: [[T4:%.*]] = and i32 [[TMP1]], [[K:%.*]]
; CHECK-NEXT: store i32 [[T4]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K]], [[TMP2]]
; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -848,7 +848,7 @@ define i1 @foo1_and_extra_use_cmp2(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-NEXT: [[T5:%.*]] = icmp eq i32 [[T4]], 0
; CHECK-NEXT: store i1 [[T5]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K]], [[TMP1]]
; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
; CHECK-NEXT: ret i1 [[OR]]
;
@@ -872,7 +872,7 @@ define i1 @foo1_and_extra_use_cmp2_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) {
; CHECK-NEXT: [[T5:%.*]] = icmp eq i32 [[T4]], 0
; CHECK-NEXT: store i1 [[T5]], ptr [[P:%.*]], align 1
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K]], [[TMP2]]
; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]]
; CHECK-NEXT: ret i1 [[OR]]
;
diff --git a/llvm/test/Transforms/InstCombine/or-xor-xor.ll b/llvm/test/Transforms/InstCombine/or-xor-xor.ll
index 327d5f8d6220a..c3f1aedb1879a 100644
--- a/llvm/test/Transforms/InstCombine/or-xor-xor.ll
+++ b/llvm/test/Transforms/InstCombine/or-xor-xor.ll
@@ -98,7 +98,7 @@ define i3 @or_xor_xor_normal_multiple_uses_and(i3 %a, i3 %b) {
define i32 @or_xor_xor_negative_multiple_uses_xor1(i32 %a, i32 %b) {
; CHECK-LABEL: @or_xor_xor_negative_multiple_uses_xor1(
; CHECK-NEXT: [[AND1:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: [[XOR1:%.*]] = and i32 [[AND1]], [[B:%.*]]
+; CHECK-NEXT: [[XOR1:%.*]] = and i32 [[B:%.*]], [[AND1]]
; CHECK-NEXT: call void @use.i32(i32 [[XOR1]])
; CHECK-NEXT: [[OR:%.*]] = xor i32 [[A]], [[B]]
; CHECK-NEXT: ret i32 [[OR]]
@@ -114,7 +114,7 @@ define i32 @or_xor_xor_negative_multiple_uses_xor1(i32 %a, i32 %b) {
define i5 @or_xor_xor_negative_multiple_uses_xor2(i5 %a, i5 %b) {
; CHECK-LABEL: @or_xor_xor_negative_multiple_uses_xor2(
; CHECK-NEXT: [[A1:%.*]] = xor i5 [[B:%.*]], -1
-; CHECK-NEXT: [[XOR2:%.*]] = and i5 [[A1]], [[A:%.*]]
+; CHECK-NEXT: [[XOR2:%.*]] = and i5 [[A:%.*]], [[A1]]
; CHECK-NEXT: call void @use.i5(i5 [[XOR2]])
; CHECK-NEXT: [[OR:%.*]] = xor i5 [[A]], [[B]]
; CHECK-NEXT: ret i5 [[OR]]
diff --git a/llvm/test/Transforms/InstCombine/or-xor.ll b/llvm/test/Transforms/InstCombine/or-xor.ll
index cf6b9000182d2..f4ddbb5abc463 100644
--- a/llvm/test/Transforms/InstCombine/or-xor.ll
+++ b/llvm/test/Transforms/InstCombine/or-xor.ll
@@ -8,7 +8,7 @@ declare void @use(i8)
define i32 @test1(i32 %x, i32 %y) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], -1
-; CHECK-NEXT: [[Z:%.*]] = or i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[Z:%.*]] = or i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[Z]]
;
%or = or i32 %x, %y
@@ -23,7 +23,7 @@ define i32 @test1(i32 %x, i32 %y) {
define i32 @test2(i32 %x, i32 %y) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[Z:%.*]] = or i32 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[Z:%.*]] = or i32 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[Z]]
;
%or = or i32 %x, %y
@@ -37,7 +37,7 @@ define i32 @test2(i32 %x, i32 %y) {
define i32 @test3(i32 %x, i32 %y) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], -1
-; CHECK-NEXT: [[Z:%.*]] = or i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[Z:%.*]] = or i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[Z]]
;
%xor = xor i32 %x, %y
@@ -52,7 +52,7 @@ define i32 @test3(i32 %x, i32 %y) {
define i32 @test4(i32 %x, i32 %y) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[Z:%.*]] = or i32 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[Z:%.*]] = or i32 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[Z]]
;
%xor = xor i32 %x, %y
@@ -206,7 +206,7 @@ define i8 @xor_common_op_commute3(i8 %p, i8 %q) {
define i32 @test8(i32 %x, i32 %y) {
; CHECK-LABEL: @test8(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[Z:%.*]] = or i32 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[Z:%.*]] = or i32 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[Z]]
;
%not = xor i32 %y, -1
@@ -218,7 +218,7 @@ define i32 @test8(i32 %x, i32 %y) {
define i32 @test9(i32 %x, i32 %y) {
; CHECK-LABEL: @test9(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], -1
-; CHECK-NEXT: [[Z:%.*]] = or i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[Z:%.*]] = or i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[Z]]
;
%not = xor i32 %x, -1
@@ -306,7 +306,7 @@ define i32 @test10_canonical(i32 %A, i32 %B) {
; (x | y) & ((~x) ^ y) -> (x & y)
define i32 @test11(i32 %x, i32 %y) {
; CHECK-LABEL: @test11(
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i32 [[AND]]
;
%or = or i32 %x, %y
@@ -319,7 +319,7 @@ define i32 @test11(i32 %x, i32 %y) {
; ((~x) ^ y) & (x | y) -> (x & y)
define i32 @test12(i32 %x, i32 %y) {
; CHECK-LABEL: @test12(
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i32 [[AND]]
;
%neg = xor i32 %x, -1
@@ -331,7 +331,7 @@ define i32 @test12(i32 %x, i32 %y) {
define i32 @test12_commuted(i32 %x, i32 %y) {
; CHECK-LABEL: @test12_commuted(
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i32 [[AND]]
;
%neg = xor i32 %x, -1
@@ -344,7 +344,7 @@ define i32 @test12_commuted(i32 %x, i32 %y) {
; ((x | y) ^ (x ^ y)) -> (x & y)
define i32 @test13(i32 %x, i32 %y) {
; CHECK-LABEL: @test13(
-; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i32 [[TMP1]]
;
%1 = xor i32 %y, %x
@@ -800,7 +800,7 @@ define i4 @or_not_xor_common_op_commute0(i4 %x, i4 %y, i4 %z) {
; CHECK-LABEL: @or_not_xor_common_op_commute0(
; CHECK-NEXT: [[TMP1:%.*]] = and i4 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[NAND:%.*]] = xor i4 [[TMP1]], -1
-; CHECK-NEXT: [[O2:%.*]] = or i4 [[NAND]], [[Z:%.*]]
+; CHECK-NEXT: [[O2:%.*]] = or i4 [[Z:%.*]], [[NAND]]
; CHECK-NEXT: ret i4 [[O2]]
;
%notx = xor i4 %x, -1
@@ -816,7 +816,7 @@ define i8 @or_not_xor_common_op_commute1(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: call void @use(i8 [[NOTX]])
; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], [[Y:%.*]]
; CHECK-NEXT: [[NAND:%.*]] = xor i8 [[TMP1]], -1
-; CHECK-NEXT: [[O2:%.*]] = or i8 [[NAND]], [[Z:%.*]]
+; CHECK-NEXT: [[O2:%.*]] = or i8 [[Z:%.*]], [[NAND]]
; CHECK-NEXT: ret i8 [[O2]]
;
%notx = xor i8 %x, -1
@@ -863,7 +863,7 @@ define <2 x i4> @or_not_xor_common_op_commute4(<2 x i4> %x, <2 x i4> %y, <2 x i4
; CHECK-LABEL: @or_not_xor_common_op_commute4(
; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i4> [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NAND:%.*]] = xor <2 x i4> [[TMP1]], <i4 -1, i4 -1>
-; CHECK-NEXT: [[O2:%.*]] = or <2 x i4> [[NAND]], [[Z:%.*]]
+; CHECK-NEXT: [[O2:%.*]] = or <2 x i4> [[Z:%.*]], [[NAND]]
; CHECK-NEXT: ret <2 x i4> [[O2]]
;
%notx = xor <2 x i4> %x, <i4 -1, i4 -1>
@@ -877,7 +877,7 @@ define i8 @or_not_xor_common_op_commute5(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @or_not_xor_common_op_commute5(
; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NAND:%.*]] = xor i8 [[TMP1]], -1
-; CHECK-NEXT: [[O2:%.*]] = or i8 [[NAND]], [[Z:%.*]]
+; CHECK-NEXT: [[O2:%.*]] = or i8 [[Z:%.*]], [[NAND]]
; CHECK-NEXT: ret i8 [[O2]]
;
%notx = xor i8 %x, -1
@@ -926,7 +926,7 @@ define i8 @or_not_xor_common_op_use1(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[NOTX:%.*]] = xor i8 [[X:%.*]], -1
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X]], [[Y:%.*]]
; CHECK-NEXT: call void @use(i8 [[XOR]])
-; CHECK-NEXT: [[O1:%.*]] = or i8 [[NOTX]], [[Z:%.*]]
+; CHECK-NEXT: [[O1:%.*]] = or i8 [[Z:%.*]], [[NOTX]]
; CHECK-NEXT: [[O2:%.*]] = or i8 [[XOR]], [[O1]]
; CHECK-NEXT: ret i8 [[O2]]
;
@@ -944,7 +944,7 @@ define i8 @or_not_xor_common_op_use2(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @or_not_xor_common_op_use2(
; CHECK-NEXT: [[NOTX:%.*]] = xor i8 [[X:%.*]], -1
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X]], [[Y:%.*]]
-; CHECK-NEXT: [[O1:%.*]] = or i8 [[NOTX]], [[Z:%.*]]
+; CHECK-NEXT: [[O1:%.*]] = or i8 [[Z:%.*]], [[NOTX]]
; CHECK-NEXT: call void @use(i8 [[O1]])
; CHECK-NEXT: [[O2:%.*]] = or i8 [[XOR]], [[O1]]
; CHECK-NEXT: ret i8 [[O2]]
@@ -1098,7 +1098,7 @@ define i32 @PR75692_3(i32 %x, i32 %y) {
define i32 @or_xor_not(i32 %x, i32 %y) {
; CHECK-LABEL: @or_xor_not(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[OR1]]
;
%not = xor i32 %y, -1
@@ -1112,7 +1112,7 @@ define i32 @or_xor_not_uses1(i32 %x, i32 %y) {
; CHECK-NEXT: [[NOT:%.*]] = xor i32 [[Y:%.*]], -1
; CHECK-NEXT: call void @use(i32 [[NOT]])
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y]], [[TMP1]]
; CHECK-NEXT: ret i32 [[OR1]]
;
%not = xor i32 %y, -1
@@ -1127,7 +1127,7 @@ define i32 @or_xor_not_uses2(i32 %x, i32 %y) {
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[TMP1]], -1
; CHECK-NEXT: call void @use(i32 [[XOR]])
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[XOR]], [[Y]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y]], [[XOR]]
; CHECK-NEXT: ret i32 [[OR1]]
;
%not = xor i32 %y, -1
diff --git a/llvm/test/Transforms/InstCombine/or.ll b/llvm/test/Transforms/InstCombine/or.ll
index 6e2085a8bb6c7..9bcad034b363e 100644
--- a/llvm/test/Transforms/InstCombine/or.ll
+++ b/llvm/test/Transforms/InstCombine/or.ll
@@ -696,7 +696,7 @@ define i32 @test39d(i32 %a, float %b) {
define i32 @test40(i32 %a, i32 %b) {
; CHECK-LABEL: @test40(
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[XOR]], [[B:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[B:%.*]], [[XOR]]
; CHECK-NEXT: ret i32 [[OR]]
;
%and = and i32 %a, %b
@@ -708,7 +708,7 @@ define i32 @test40(i32 %a, i32 %b) {
define i32 @test40b(i32 %a, i32 %b) {
; CHECK-LABEL: @test40b(
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[XOR]], [[B:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[B:%.*]], [[XOR]]
; CHECK-NEXT: ret i32 [[OR]]
;
%and = and i32 %b, %a
@@ -720,7 +720,7 @@ define i32 @test40b(i32 %a, i32 %b) {
define i32 @test40c(i32 %a, i32 %b) {
; CHECK-LABEL: @test40c(
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[XOR]], [[B:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[B:%.*]], [[XOR]]
; CHECK-NEXT: ret i32 [[OR]]
;
%and = and i32 %b, %a
@@ -732,7 +732,7 @@ define i32 @test40c(i32 %a, i32 %b) {
define i32 @test40d(i32 %a, i32 %b) {
; CHECK-LABEL: @test40d(
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[XOR]], [[B:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[B:%.*]], [[XOR]]
; CHECK-NEXT: ret i32 [[OR]]
;
%and = and i32 %a, %b
@@ -743,7 +743,7 @@ define i32 @test40d(i32 %a, i32 %b) {
define i32 @test45(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @test45(
-; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[Z:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], [[Z:%.*]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[TMP1]], [[Y:%.*]]
; CHECK-NEXT: ret i32 [[OR1]]
;
@@ -757,7 +757,7 @@ define i32 @test45_uses1(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @test45_uses1(
; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], [[Z:%.*]]
; CHECK-NEXT: call void @use(i32 [[OR]])
-; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[Z]], [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], [[Z]]
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[TMP1]], [[Y]]
; CHECK-NEXT: ret i32 [[OR1]]
;
@@ -771,7 +771,7 @@ define i32 @test45_uses1(i32 %x, i32 %y, i32 %z) {
define i32 @test45_uses2(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @test45_uses2(
; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], [[Z:%.*]]
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[OR]], [[X:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[OR]]
; CHECK-NEXT: call void @use(i32 [[AND]])
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[AND]], [[Y]]
; CHECK-NEXT: ret i32 [[OR1]]
@@ -1605,7 +1605,7 @@ define i32 @mul_no_common_bits_commute2(i32 %p1, i32 %p2) {
define i32 @mul_no_common_bits_disjoint(i32 %x, i32 %y) {
; CHECK-LABEL: @mul_no_common_bits_disjoint(
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[Y:%.*]], 1
-; CHECK-NEXT: [[R:%.*]] = mul i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = mul i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[R]]
;
%m = mul i32 %x, %y
@@ -1976,7 +1976,7 @@ define i32 @or_xor_and_uses1(i32 %x, i32 %y, i32 %z) {
define i32 @or_xor_and_uses2(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @or_xor_and_uses2(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[Y:%.*]], [[Z:%.*]]
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[AND]], [[X:%.*]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[X:%.*]], [[AND]]
; CHECK-NEXT: call void @use(i32 [[XOR]])
; CHECK-NEXT: [[OR1:%.*]] = or i32 [[X]], [[Y]]
; CHECK-NEXT: ret i32 [[OR1]]
@@ -2019,7 +2019,7 @@ define i32 @or_xor_and_commuted2(i32 %x, i32 %y, i32 %z) {
define i32 @or_xor_and_commuted3(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @or_xor_and_commuted3(
; CHECK-NEXT: [[YY:%.*]] = mul i32 [[Y:%.*]], [[Y]]
-; CHECK-NEXT: [[OR1:%.*]] = or i32 [[YY]], [[X:%.*]]
+; CHECK-NEXT: [[OR1:%.*]] = or i32 [[X:%.*]], [[YY]]
; CHECK-NEXT: ret i32 [[OR1]]
;
%yy = mul i32 %y, %y ; thwart complexity-based ordering
diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll
index 5ed7d641df65b..469375633b60e 100644
--- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll
+++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll
@@ -210,7 +210,7 @@ define i32 @n5_extrause0(i64 %x, i32 %nbits) {
; CHECK-NEXT: call void @use64(i64 [[T2]])
; CHECK-NEXT: call void @use64(i64 [[T3]])
; CHECK-NEXT: call void @use32(i32 [[T4]])
-; CHECK-NEXT: [[T5:%.*]] = and i64 [[T3]], [[X:%.*]]
+; CHECK-NEXT: [[T5:%.*]] = and i64 [[X:%.*]], [[T3]]
; CHECK-NEXT: call void @use64(i64 [[T5]])
; CHECK-NEXT: [[T6:%.*]] = trunc i64 [[T5]] to i32
; CHECK-NEXT: [[T7:%.*]] = shl i32 [[T6]], [[T4]]
@@ -246,7 +246,7 @@ define i32 @n6_extrause1(i64 %x, i32 %nbits) {
; CHECK-NEXT: call void @use64(i64 [[T2]])
; CHECK-NEXT: call void @use64(i64 [[T3]])
; CHECK-NEXT: call void @use32(i32 [[T4]])
-; CHECK-NEXT: [[T5:%.*]] = and i64 [[T3]], [[X:%.*]]
+; CHECK-NEXT: [[T5:%.*]] = and i64 [[X:%.*]], [[T3]]
; CHECK-NEXT: [[T6:%.*]] = trunc i64 [[T5]] to i32
; CHECK-NEXT: call void @use32(i32 [[T6]])
; CHECK-NEXT: [[T7:%.*]] = shl i32 [[T6]], [[T4]]
@@ -282,7 +282,7 @@ define i32 @n7_extrause2(i64 %x, i32 %nbits) {
; CHECK-NEXT: call void @use64(i64 [[T2]])
; CHECK-NEXT: call void @use64(i64 [[T3]])
; CHECK-NEXT: call void @use32(i32 [[T4]])
-; CHECK-NEXT: [[T5:%.*]] = and i64 [[T3]], [[X:%.*]]
+; CHECK-NEXT: [[T5:%.*]] = and i64 [[X:%.*]], [[T3]]
; CHECK-NEXT: call void @use64(i64 [[T5]])
; CHECK-NEXT: [[T6:%.*]] = trunc i64 [[T5]] to i32
; CHECK-NEXT: call void @use32(i32 [[T6]])
diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll
index 1debf111b18cd..bce2a1c3f7e50 100644
--- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll
+++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll
@@ -132,7 +132,7 @@ define i32 @n3_extrause(i32 %x, i32 %nbits) {
; CHECK-NEXT: [[T0:%.*]] = add i32 [[NBITS:%.*]], -1
; CHECK-NEXT: [[T1:%.*]] = shl nsw i32 -1, [[T0]]
; CHECK-NEXT: [[T2:%.*]] = xor i32 [[T1]], -1
-; CHECK-NEXT: [[T3:%.*]] = and i32 [[T2]], [[X:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = and i32 [[X:%.*]], [[T2]]
; CHECK-NEXT: [[T4:%.*]] = sub i32 32, [[NBITS]]
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
diff --git a/llvm/test/Transforms/InstCombine/phi.ll b/llvm/test/Transforms/InstCombine/phi.ll
index 7eb508ebb5537..4ad3afa2abddb 100644
--- a/llvm/test/Transforms/InstCombine/phi.ll
+++ b/llvm/test/Transforms/InstCombine/phi.ll
@@ -1395,7 +1395,7 @@ define i1 @phi_knownnonzero_eq_oricmp_commuted(i32 %n, i32 %s, ptr %P, i32 %val)
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 1, [[IF_THEN]] ], [ [[N]], [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[ORPHI:%.*]] = or i32 [[PHI]], [[VAL:%.*]]
+; CHECK-NEXT: [[ORPHI:%.*]] = or i32 [[VAL:%.*]], [[PHI]]
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[ORPHI]], 0
; CHECK-NEXT: ret i1 [[CMP1]]
;
@@ -1485,7 +1485,7 @@ define i1 @phi_knownnonzero_ne_oricmp_commuted(i32 %n, i32 %s, ptr %P, i32 %val)
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 1, [[IF_THEN]] ], [ [[N]], [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[ORPHI:%.*]] = or i32 [[PHI]], [[VAL:%.*]]
+; CHECK-NEXT: [[ORPHI:%.*]] = or i32 [[VAL:%.*]], [[PHI]]
; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[ORPHI]], 0
; CHECK-NEXT: ret i1 [[CMP1]]
;
@@ -1559,7 +1559,7 @@ define i1 @phi_knownnonzero_ne_multiuse_oricmp_commuted(i32 %n, i32 %s, ptr %P,
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 1, [[IF_THEN]] ], [ [[N]], [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[ORPHI:%.*]] = or i32 [[PHI]], [[VAL:%.*]]
+; CHECK-NEXT: [[ORPHI:%.*]] = or i32 [[VAL:%.*]], [[PHI]]
; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[ORPHI]], 0
; CHECK-NEXT: br i1 [[CMP1]], label [[NEXT:%.*]], label [[CLEANUP:%.*]]
; CHECK: next:
@@ -1601,7 +1601,7 @@ define i1 @phi_knownnonzero_eq_multiuse_andicmp(i32 %n, i32 %s, ptr %P, i32 %val
; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
; CHECK: if.then:
; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[P:%.*]], align 4
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[LOAD]], [[N]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[N]], [[LOAD]]
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 1, i32 2
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
@@ -1648,7 +1648,7 @@ define i1 @phi_knownnonzero_ne_multiuse_andicmp(i32 %n, i32 %s, ptr %P, i32 %val
; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
; CHECK: if.then:
; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[P:%.*]], align 4
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[LOAD]], [[N]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[N]], [[LOAD]]
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 1, i32 2
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
diff --git a/llvm/test/Transforms/InstCombine/pr14365.ll b/llvm/test/Transforms/InstCombine/pr14365.ll
index 3a09b55aba309..5e8dca13fa1b4 100644
--- a/llvm/test/Transforms/InstCombine/pr14365.ll
+++ b/llvm/test/Transforms/InstCombine/pr14365.ll
@@ -31,7 +31,7 @@ define i32 @test1(i32 %a0) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[A0:%.*]], 1
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 1431655765
-; CHECK-NEXT: [[TMP3:%.*]] = sub nsw i32 [[A0]], [[TMP2]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[A0]], [[TMP2]]
; CHECK-NEXT: ret i32 [[TMP3]]
;
%1 = ashr i32 %a0, 1
@@ -46,7 +46,7 @@ define <4 x i32> @test1_vec(<4 x i32> %a0) {
; CHECK-LABEL: @test1_vec(
; CHECK-NEXT: [[TMP1:%.*]] = lshr <4 x i32> [[A0:%.*]], <i32 1, i32 1, i32 1, i32 1>
; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i32> [[TMP1]], <i32 1431655765, i32 1431655765, i32 1431655765, i32 1431655765>
-; CHECK-NEXT: [[TMP3:%.*]] = sub nsw <4 x i32> [[A0]], [[TMP2]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub <4 x i32> [[A0]], [[TMP2]]
; CHECK-NEXT: ret <4 x i32> [[TMP3]]
;
%1 = ashr <4 x i32> %a0, <i32 1, i32 1, i32 1, i32 1>
diff --git a/llvm/test/Transforms/InstCombine/pr44242.ll b/llvm/test/Transforms/InstCombine/pr44242.ll
index e86c17057fe27..bce22734127da 100644
--- a/llvm/test/Transforms/InstCombine/pr44242.ll
+++ b/llvm/test/Transforms/InstCombine/pr44242.ll
@@ -12,7 +12,7 @@ define float @sitofp(float %x) {
; CHECK: loop_header:
; CHECK-NEXT: [[VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[VAL_INCR_CASTED:%.*]], [[LOOP:%.*]] ]
; CHECK-NEXT: [[VAL_CASTED:%.*]] = bitcast i32 [[VAL]] to float
-; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float [[VAL_CASTED]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], [[VAL_CASTED]]
; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[LOOP]]
; CHECK: loop:
; CHECK-NEXT: [[VAL_INCR:%.*]] = fadd float [[VAL_CASTED]], 1.000000e+00
@@ -46,7 +46,7 @@ define <2 x i16> @bitcast(float %x) {
; CHECK: loop_header:
; CHECK-NEXT: [[VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[VAL_INCR_CASTED:%.*]], [[LOOP:%.*]] ]
; CHECK-NEXT: [[VAL_CASTED:%.*]] = bitcast i32 [[VAL]] to float
-; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float [[VAL_CASTED]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], [[VAL_CASTED]]
; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[LOOP]]
; CHECK: loop:
; CHECK-NEXT: [[VAL_INCR:%.*]] = fadd float [[VAL_CASTED]], 1.000000e+00
@@ -82,7 +82,7 @@ define void @store_volatile(float %x) {
; CHECK: loop_header:
; CHECK-NEXT: [[VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[VAL_INCR_CASTED:%.*]], [[LOOP:%.*]] ]
; CHECK-NEXT: [[VAL_CASTED:%.*]] = bitcast i32 [[VAL]] to float
-; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float [[VAL_CASTED]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], [[VAL_CASTED]]
; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[LOOP]]
; CHECK: loop:
; CHECK-NEXT: [[VAL_INCR:%.*]] = fadd float [[VAL_CASTED]], 1.000000e+00
@@ -149,7 +149,7 @@ define i32 @multiple_phis(float %x) {
; CHECK: loop_header:
; CHECK-NEXT: [[VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[VAL2:%.*]], [[LOOP_END:%.*]] ]
; CHECK-NEXT: [[VAL_CASTED:%.*]] = bitcast i32 [[VAL]] to float
-; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float [[VAL_CASTED]], [[X:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], [[VAL_CASTED]]
; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[CMP2:%.*]] = fcmp ogt float [[VAL_CASTED]], 2.000000e+00
diff --git a/llvm/test/Transforms/InstCombine/pr49688.ll b/llvm/test/Transforms/InstCombine/pr49688.ll
index 284b098b02afa..902aea262f537 100644
--- a/llvm/test/Transforms/InstCombine/pr49688.ll
+++ b/llvm/test/Transforms/InstCombine/pr49688.ll
@@ -7,7 +7,7 @@ define i1 @f(i32 %i1) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[I1:%.*]], 0
; CHECK-NEXT: [[SHR:%.*]] = lshr i32 7, [[I1]]
-; CHECK-NEXT: [[CMP4:%.*]] = icmp slt i32 [[SHR]], [[I1]]
+; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[I1]], [[SHR]]
; CHECK-NEXT: [[I2:%.*]] = select i1 [[CMP]], i1 true, i1 [[CMP4]]
; CHECK-NEXT: ret i1 [[I2]]
;
@@ -24,7 +24,7 @@ define i32 @f2(i32 signext %g, i32 zeroext %h) {
; CHECK-LABEL: @f2(
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[G:%.*]], 0
; CHECK-NEXT: [[SHR:%.*]] = lshr i32 7, [[H:%.*]]
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[SHR]], [[G]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[G]], [[SHR]]
; CHECK-NEXT: [[DOT0:%.*]] = select i1 [[CMP]], i1 true, i1 [[CMP1]]
; CHECK-NEXT: [[LOR_EXT:%.*]] = zext i1 [[DOT0]] to i32
; CHECK-NEXT: ret i32 [[LOR_EXT]]
diff --git a/llvm/test/Transforms/InstCombine/pr75369.ll b/llvm/test/Transforms/InstCombine/pr75369.ll
index 2f90753504b36..3855880047d6b 100644
--- a/llvm/test/Transforms/InstCombine/pr75369.ll
+++ b/llvm/test/Transforms/InstCombine/pr75369.ll
@@ -5,7 +5,7 @@ define i32 @main(ptr %a, i8 %a0, i32 %conv, i8 %a1) {
; CHECK-LABEL: define i32 @main(
; CHECK-SAME: ptr [[A:%.*]], i8 [[A0:%.*]], i32 [[CONV:%.*]], i8 [[A1:%.*]]) {
; CHECK-NEXT: [[A3:%.*]] = trunc i32 [[CONV]] to i8
-; CHECK-NEXT: [[OR11:%.*]] = or i8 [[A3]], [[A0]]
+; CHECK-NEXT: [[OR11:%.*]] = or i8 [[A0]], [[A3]]
; CHECK-NEXT: store i8 [[OR11]], ptr [[A]], align 1
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[A1]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
diff --git a/llvm/test/Transforms/InstCombine/ptr-int-ptr-icmp.ll b/llvm/test/Transforms/InstCombine/ptr-int-ptr-icmp.ll
index 5249aa4269e87..eec78063805a1 100644
--- a/llvm/test/Transforms/InstCombine/ptr-int-ptr-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/ptr-int-ptr-icmp.ll
@@ -8,7 +8,7 @@ target triple = "x86_64-unknown-linux-gnu"
define i1 @func(ptr %X, ptr %Y) {
; CHECK-LABEL: @func(
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%i = ptrtoint ptr %X to i64
@@ -19,7 +19,7 @@ define i1 @func(ptr %X, ptr %Y) {
define <2 x i1> @func_vec(<2 x ptr> %X, <2 x ptr> %Y) {
; CHECK-LABEL: @func_vec(
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x ptr> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x ptr> [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%i = ptrtoint <2 x ptr> %X to <2 x i64>
@@ -30,7 +30,7 @@ define <2 x i1> @func_vec(<2 x ptr> %X, <2 x ptr> %Y) {
define <vscale x 2 x i1> @func_svec(<vscale x 2 x ptr> %X, <vscale x 2 x ptr> %Y) {
; CHECK-LABEL: @func_svec(
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 2 x ptr> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 2 x ptr> [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret <vscale x 2 x i1> [[CMP]]
;
%i = ptrtoint <vscale x 2 x ptr> %X to <vscale x 2 x i64>
@@ -41,7 +41,7 @@ define <vscale x 2 x i1> @func_svec(<vscale x 2 x ptr> %X, <vscale x 2 x ptr> %Y
define i1 @func_pointer_different_types(ptr %X, ptr %Y) {
; CHECK-LABEL: @func_pointer_different_types(
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%i = ptrtoint ptr %X to i64
@@ -72,7 +72,7 @@ define i1 @func_integer_type_too_small(ptr %X, ptr %Y) {
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[X:%.*]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], 4294967295
; CHECK-NEXT: [[P:%.*]] = inttoptr i64 [[TMP2]] to ptr
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[P]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[Y:%.*]], [[P]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%i = ptrtoint ptr %X to i32
@@ -87,7 +87,7 @@ define i1 @func_ptr_different_addrspace(ptr %X, ptr addrspace(3) %Y){
; CHECK-LABEL: @func_ptr_different_addrspace(
; CHECK-NEXT: [[I:%.*]] = ptrtoint ptr [[X:%.*]] to i64
; CHECK-NEXT: [[P:%.*]] = inttoptr i64 [[I]] to ptr addrspace(3)
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(3) [[P]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(3) [[Y:%.*]], [[P]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%i = ptrtoint ptr %X to i64
@@ -103,7 +103,7 @@ define i1 @func_ptr_different_addrspace1(ptr addrspace(2) %X, ptr %Y){
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(2) [[X:%.*]] to i32
; CHECK-NEXT: [[I:%.*]] = zext i32 [[TMP1]] to i64
; CHECK-NEXT: [[P:%.*]] = inttoptr i64 [[I]] to ptr
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[P]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[Y:%.*]], [[P]]
; CHECK-NEXT: ret i1 [[CMP]]
;
%i = ptrtoint ptr addrspace(2) %X to i64
diff --git a/llvm/test/Transforms/InstCombine/ptrmask.ll b/llvm/test/Transforms/InstCombine/ptrmask.ll
index 4631b81cd1ce1..24777b1b7f208 100644
--- a/llvm/test/Transforms/InstCombine/ptrmask.ll
+++ b/llvm/test/Transforms/InstCombine/ptrmask.ll
@@ -155,7 +155,7 @@ define i64 @ptrtoint_of_ptrmask(ptr %p, i64 %m) {
; CHECK-LABEL: define i64 @ptrtoint_of_ptrmask
; CHECK-SAME: (ptr [[P:%.*]], i64 [[M:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64
-; CHECK-NEXT: [[R:%.*]] = and i64 [[TMP1]], [[M]]
+; CHECK-NEXT: [[R:%.*]] = and i64 [[M]], [[TMP1]]
; CHECK-NEXT: ret i64 [[R]]
;
%pm = call ptr @llvm.ptrmask.p0.i64(ptr %p, i64 %m)
@@ -168,7 +168,7 @@ define i32 @ptrtoint_of_ptrmask2(ptr %p, i64 %m) {
; CHECK-LABEL: define i32 @ptrtoint_of_ptrmask2
; CHECK-SAME: (ptr [[P:%.*]], i64 [[M:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], [[M]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[M]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = trunc i64 [[TMP2]] to i32
; CHECK-NEXT: ret i32 [[R]]
;
@@ -181,7 +181,7 @@ define <2 x i64> @ptrtoint_of_ptrmask_vec(<2 x ptr> %p, <2 x i64> %m) {
; CHECK-LABEL: define <2 x i64> @ptrtoint_of_ptrmask_vec
; CHECK-SAME: (<2 x ptr> [[P:%.*]], <2 x i64> [[M:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint <2 x ptr> [[P]] to <2 x i64>
-; CHECK-NEXT: [[R:%.*]] = and <2 x i64> [[TMP1]], [[M]]
+; CHECK-NEXT: [[R:%.*]] = and <2 x i64> [[M]], [[TMP1]]
; CHECK-NEXT: ret <2 x i64> [[R]]
;
%pm = call <2 x ptr> @llvm.ptrmask.v2p0.v2i64(<2 x ptr> %p, <2 x i64> %m)
@@ -193,7 +193,7 @@ define <2 x i32> @ptrtoint_of_ptrmask_vec2(<2 x ptr> %p, <2 x i64> %m) {
; CHECK-LABEL: define <2 x i32> @ptrtoint_of_ptrmask_vec2
; CHECK-SAME: (<2 x ptr> [[P:%.*]], <2 x i64> [[M:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint <2 x ptr> [[P]] to <2 x i64>
-; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i64> [[TMP1]], [[M]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i64> [[M]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = trunc <2 x i64> [[TMP2]] to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[R]]
;
@@ -374,10 +374,10 @@ define ptr @ptrmask_to_modified_gep6(ptr align 16 %p) {
define ptr @ptrmask_to_modified_gep_indirect0(ptr align 16 %p) {
; CHECK-LABEL: define ptr @ptrmask_to_modified_gep_indirect0
; CHECK-SAME: (ptr align 16 [[P:%.*]]) {
-; 44 from 4*sizeof(i32) + (31 & -4)
-; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i8, ptr [[P]], i64 44
-; CHECK-NEXT: ret ptr [[GEP1]]
+; CHECK-NEXT: [[GEP11:%.*]] = getelementptr i8, ptr [[P]], i64 44
+; CHECK-NEXT: ret ptr [[GEP11]]
;
+; 44 from 4*sizeof(i32) + (31 & -4)
%gep0 = getelementptr i32, ptr %p, i32 4
%gep1 = getelementptr i8, ptr %gep0, i32 31
%pm = call ptr @llvm.ptrmask.p0.i64(ptr %gep1, i64 -4)
@@ -387,11 +387,11 @@ define ptr @ptrmask_to_modified_gep_indirect0(ptr align 16 %p) {
define ptr @ptrmask_to_modified_gep_indirect1(ptr %p) {
; CHECK-LABEL: define ptr @ptrmask_to_modified_gep_indirect1
; CHECK-SAME: (ptr [[P:%.*]]) {
-
-; CHECK-NEXT: [[R:%.*]] = call align 16 ptr @llvm.ptrmask.p0.i64(ptr [[P]], i64 -16)
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[R]], i64 32
-; CHECK-NEXT: ret ptr [[GEP]]
+; CHECK-NEXT: [[PM0:%.*]] = call align 16 ptr @llvm.ptrmask.p0.i64(ptr [[P]], i64 -16)
+; CHECK-NEXT: [[PGEP1:%.*]] = getelementptr i8, ptr [[PM0]], i64 32
+; CHECK-NEXT: ret ptr [[PGEP1]]
;
+
%pm0 = call ptr @llvm.ptrmask.p0.i64(ptr %p, i64 -16)
%pgep = getelementptr i8, ptr %pm0, i64 33
%r = call ptr @llvm.ptrmask.p0.i64(ptr %pgep, i64 -16)
diff --git a/llvm/test/Transforms/InstCombine/range-check.ll b/llvm/test/Transforms/InstCombine/range-check.ll
index 0d138b6ba7e79..5d67cc8232c5a 100644
--- a/llvm/test/Transforms/InstCombine/range-check.ll
+++ b/llvm/test/Transforms/InstCombine/range-check.ll
@@ -7,7 +7,7 @@
define i1 @test_and1(i32 %x, i32 %n) {
; CHECK-LABEL: @test_and1(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: ret i1 [[C]]
;
%nn = and i32 %n, 2147483647
@@ -21,7 +21,7 @@ define i1 @test_and1_logical(i32 %x, i32 %n) {
; CHECK-LABEL: @test_and1_logical(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
; CHECK-NEXT: [[A:%.*]] = icmp sgt i32 [[X:%.*]], -1
-; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[NN]], [[X]]
+; CHECK-NEXT: [[B:%.*]] = icmp slt i32 [[X]], [[NN]]
; CHECK-NEXT: [[C:%.*]] = select i1 [[A]], i1 [[B]], i1 false
; CHECK-NEXT: ret i1 [[C]]
;
@@ -35,7 +35,7 @@ define i1 @test_and1_logical(i32 %x, i32 %n) {
define i1 @test_and2(i32 %x, i32 %n) {
; CHECK-LABEL: @test_and2(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[C:%.*]] = icmp uge i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ule i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: ret i1 [[C]]
;
%nn = and i32 %n, 2147483647
@@ -49,7 +49,7 @@ define i1 @test_and2_logical(i32 %x, i32 %n) {
; CHECK-LABEL: @test_and2_logical(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
; CHECK-NEXT: [[A:%.*]] = icmp sgt i32 [[X:%.*]], -1
-; CHECK-NEXT: [[B:%.*]] = icmp sge i32 [[NN]], [[X]]
+; CHECK-NEXT: [[B:%.*]] = icmp sle i32 [[X]], [[NN]]
; CHECK-NEXT: [[C:%.*]] = select i1 [[A]], i1 [[B]], i1 false
; CHECK-NEXT: ret i1 [[C]]
;
@@ -63,7 +63,7 @@ define i1 @test_and2_logical(i32 %x, i32 %n) {
define i1 @test_and3(i32 %x, i32 %n) {
; CHECK-LABEL: @test_and3(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: ret i1 [[C]]
;
%nn = and i32 %n, 2147483647
@@ -76,7 +76,7 @@ define i1 @test_and3(i32 %x, i32 %n) {
define i1 @test_and3_logical(i32 %x, i32 %n) {
; CHECK-LABEL: @test_and3_logical(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: ret i1 [[C]]
;
%nn = and i32 %n, 2147483647
@@ -89,7 +89,7 @@ define i1 @test_and3_logical(i32 %x, i32 %n) {
define i1 @test_and4(i32 %x, i32 %n) {
; CHECK-LABEL: @test_and4(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[C:%.*]] = icmp uge i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ule i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: ret i1 [[C]]
;
%nn = and i32 %n, 2147483647
@@ -102,7 +102,7 @@ define i1 @test_and4(i32 %x, i32 %n) {
define i1 @test_and4_logical(i32 %x, i32 %n) {
; CHECK-LABEL: @test_and4_logical(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[C:%.*]] = icmp uge i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ule i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: ret i1 [[C]]
;
%nn = and i32 %n, 2147483647
@@ -115,7 +115,7 @@ define i1 @test_and4_logical(i32 %x, i32 %n) {
define i1 @test_or1(i32 %x, i32 %n) {
; CHECK-LABEL: @test_or1(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[C:%.*]] = icmp ule i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp uge i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: ret i1 [[C]]
;
%nn = and i32 %n, 2147483647
@@ -129,7 +129,7 @@ define i1 @test_or1_logical(i32 %x, i32 %n) {
; CHECK-LABEL: @test_or1_logical(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
; CHECK-NEXT: [[A:%.*]] = icmp slt i32 [[X:%.*]], 0
-; CHECK-NEXT: [[B:%.*]] = icmp sle i32 [[NN]], [[X]]
+; CHECK-NEXT: [[B:%.*]] = icmp sge i32 [[X]], [[NN]]
; CHECK-NEXT: [[C:%.*]] = select i1 [[A]], i1 true, i1 [[B]]
; CHECK-NEXT: ret i1 [[C]]
;
@@ -143,7 +143,7 @@ define i1 @test_or1_logical(i32 %x, i32 %n) {
define i1 @test_or2(i32 %x, i32 %n) {
; CHECK-LABEL: @test_or2(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: ret i1 [[C]]
;
%nn = and i32 %n, 2147483647
@@ -157,7 +157,7 @@ define i1 @test_or2_logical(i32 %x, i32 %n) {
; CHECK-LABEL: @test_or2_logical(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
; CHECK-NEXT: [[A:%.*]] = icmp slt i32 [[X:%.*]], 0
-; CHECK-NEXT: [[B:%.*]] = icmp slt i32 [[NN]], [[X]]
+; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[X]], [[NN]]
; CHECK-NEXT: [[C:%.*]] = select i1 [[A]], i1 true, i1 [[B]]
; CHECK-NEXT: ret i1 [[C]]
;
@@ -171,7 +171,7 @@ define i1 @test_or2_logical(i32 %x, i32 %n) {
define i1 @test_or3(i32 %x, i32 %n) {
; CHECK-LABEL: @test_or3(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[C:%.*]] = icmp ule i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp uge i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: ret i1 [[C]]
;
%nn = and i32 %n, 2147483647
@@ -184,7 +184,7 @@ define i1 @test_or3(i32 %x, i32 %n) {
define i1 @test_or3_logical(i32 %x, i32 %n) {
; CHECK-LABEL: @test_or3_logical(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[C:%.*]] = icmp ule i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp uge i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: ret i1 [[C]]
;
%nn = and i32 %n, 2147483647
@@ -197,7 +197,7 @@ define i1 @test_or3_logical(i32 %x, i32 %n) {
define i1 @test_or4(i32 %x, i32 %n) {
; CHECK-LABEL: @test_or4(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: ret i1 [[C]]
;
%nn = and i32 %n, 2147483647
@@ -210,7 +210,7 @@ define i1 @test_or4(i32 %x, i32 %n) {
define i1 @test_or4_logical(i32 %x, i32 %n) {
; CHECK-LABEL: @test_or4_logical(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: ret i1 [[C]]
;
%nn = and i32 %n, 2147483647
@@ -225,7 +225,7 @@ define i1 @test_or4_logical(i32 %x, i32 %n) {
define i1 @negative1(i32 %x, i32 %n) {
; CHECK-LABEL: @negative1(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[A:%.*]] = icmp sgt i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[A:%.*]] = icmp slt i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[X]], 0
; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]]
; CHECK-NEXT: ret i1 [[C]]
@@ -240,7 +240,7 @@ define i1 @negative1(i32 %x, i32 %n) {
define i1 @negative1_logical(i32 %x, i32 %n) {
; CHECK-LABEL: @negative1_logical(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[A:%.*]] = icmp sgt i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[A:%.*]] = icmp slt i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[X]], 0
; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]]
; CHECK-NEXT: ret i1 [[C]]
@@ -281,7 +281,7 @@ define i1 @negative2_logical(i32 %x, i32 %n) {
define i1 @negative3(i32 %x, i32 %y, i32 %n) {
; CHECK-LABEL: @negative3(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[A:%.*]] = icmp sgt i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[A:%.*]] = icmp slt i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[Y:%.*]], -1
; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]]
; CHECK-NEXT: ret i1 [[C]]
@@ -296,7 +296,7 @@ define i1 @negative3(i32 %x, i32 %y, i32 %n) {
define i1 @negative3_logical(i32 %x, i32 %y, i32 %n) {
; CHECK-LABEL: @negative3_logical(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[A:%.*]] = icmp sgt i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[A:%.*]] = icmp slt i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[Y:%.*]], -1
; CHECK-NEXT: [[C:%.*]] = select i1 [[A]], i1 [[B]], i1 false
; CHECK-NEXT: ret i1 [[C]]
@@ -311,7 +311,7 @@ define i1 @negative3_logical(i32 %x, i32 %y, i32 %n) {
define i1 @negative4(i32 %x, i32 %n) {
; CHECK-LABEL: @negative4(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[A:%.*]] = icmp ne i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[A:%.*]] = icmp ne i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[X]], -1
; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]]
; CHECK-NEXT: ret i1 [[C]]
@@ -326,7 +326,7 @@ define i1 @negative4(i32 %x, i32 %n) {
define i1 @negative4_logical(i32 %x, i32 %n) {
; CHECK-LABEL: @negative4_logical(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[A:%.*]] = icmp ne i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[A:%.*]] = icmp ne i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[X]], -1
; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]]
; CHECK-NEXT: ret i1 [[C]]
@@ -341,7 +341,7 @@ define i1 @negative4_logical(i32 %x, i32 %n) {
define i1 @negative5(i32 %x, i32 %n) {
; CHECK-LABEL: @negative5(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[A:%.*]] = icmp sgt i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[A:%.*]] = icmp slt i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[X]], -1
; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
; CHECK-NEXT: ret i1 [[C]]
@@ -356,7 +356,7 @@ define i1 @negative5(i32 %x, i32 %n) {
define i1 @negative5_logical(i32 %x, i32 %n) {
; CHECK-LABEL: @negative5_logical(
; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647
-; CHECK-NEXT: [[A:%.*]] = icmp sgt i32 [[NN]], [[X:%.*]]
+; CHECK-NEXT: [[A:%.*]] = icmp slt i32 [[X:%.*]], [[NN]]
; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[X]], -1
; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
; CHECK-NEXT: ret i1 [[C]]
diff --git a/llvm/test/Transforms/InstCombine/reassociate-nuw.ll b/llvm/test/Transforms/InstCombine/reassociate-nuw.ll
index 9718739ed8ab2..99f07c0a8e0ad 100644
--- a/llvm/test/Transforms/InstCombine/reassociate-nuw.ll
+++ b/llvm/test/Transforms/InstCombine/reassociate-nuw.ll
@@ -132,7 +132,7 @@ define i32 @tryFactorization_add_nuw_mul(i32 %x) {
define i32 @tryFactorization_add_nuw_mul_nuw_mul_nuw_var(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @tryFactorization_add_nuw_mul_nuw_mul_nuw_var(
; CHECK-NEXT: [[MUL21:%.*]] = add i32 [[Y:%.*]], [[Z:%.*]]
-; CHECK-NEXT: [[ADD1:%.*]] = mul nuw i32 [[MUL21]], [[X:%.*]]
+; CHECK-NEXT: [[ADD1:%.*]] = mul nuw i32 [[X:%.*]], [[MUL21]]
; CHECK-NEXT: ret i32 [[ADD1]]
;
%mul1 = mul nuw i32 %x, %y
@@ -144,7 +144,7 @@ define i32 @tryFactorization_add_nuw_mul_nuw_mul_nuw_var(i32 %x, i32 %y, i32 %z)
define i32 @tryFactorization_add_nuw_mul_mul_nuw_var(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @tryFactorization_add_nuw_mul_mul_nuw_var(
; CHECK-NEXT: [[MUL21:%.*]] = add i32 [[Y:%.*]], [[Z:%.*]]
-; CHECK-NEXT: [[ADD1:%.*]] = mul i32 [[MUL21]], [[X:%.*]]
+; CHECK-NEXT: [[ADD1:%.*]] = mul i32 [[X:%.*]], [[MUL21]]
; CHECK-NEXT: ret i32 [[ADD1]]
;
%mul1 = mul i32 %x, %y
@@ -156,7 +156,7 @@ define i32 @tryFactorization_add_nuw_mul_mul_nuw_var(i32 %x, i32 %y, i32 %z) {
define i32 @tryFactorization_add_nuw_mul_nuw_mul_var(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @tryFactorization_add_nuw_mul_nuw_mul_var(
; CHECK-NEXT: [[MUL21:%.*]] = add i32 [[Y:%.*]], [[Z:%.*]]
-; CHECK-NEXT: [[ADD1:%.*]] = mul i32 [[MUL21]], [[X:%.*]]
+; CHECK-NEXT: [[ADD1:%.*]] = mul i32 [[X:%.*]], [[MUL21]]
; CHECK-NEXT: ret i32 [[ADD1]]
;
%mul1 = mul nuw i32 %x, %y
@@ -168,7 +168,7 @@ define i32 @tryFactorization_add_nuw_mul_nuw_mul_var(i32 %x, i32 %y, i32 %z) {
define i32 @tryFactorization_add_mul_nuw_mul_var(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @tryFactorization_add_mul_nuw_mul_var(
; CHECK-NEXT: [[MUL21:%.*]] = add i32 [[Y:%.*]], [[Z:%.*]]
-; CHECK-NEXT: [[ADD1:%.*]] = mul i32 [[MUL21]], [[X:%.*]]
+; CHECK-NEXT: [[ADD1:%.*]] = mul i32 [[X:%.*]], [[MUL21]]
; CHECK-NEXT: ret i32 [[ADD1]]
;
%mul1 = mul nuw i32 %x, %y
diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll
index cb6775e689b8c..8c61e24a97f1d 100644
--- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll
+++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll
@@ -20,7 +20,7 @@ define i32 @t0_basic(i64 %x, i32 %nbits) {
; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]]
; CHECK-NEXT: [[T2:%.*]] = xor i64 [[T1]], -1
; CHECK-NEXT: [[T3:%.*]] = sub i32 32, [[NBITS]]
-; CHECK-NEXT: [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
+; CHECK-NEXT: [[T4:%.*]] = and i64 [[X:%.*]], [[T2]]
; CHECK-NEXT: call void @use32(i32 [[NBITS]])
; CHECK-NEXT: call void @use64(i64 [[T0]])
; CHECK-NEXT: call void @use64(i64 [[T1]])
@@ -60,7 +60,7 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
; CHECK-NEXT: [[T1:%.*]] = shl nsw <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
; CHECK-NEXT: [[T2:%.*]] = xor <8 x i64> [[T1]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
; CHECK-NEXT: [[T3:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, [[NBITS]]
-; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
+; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[X:%.*]], [[T2]]
; CHECK-NEXT: call void @use8xi32(<8 x i32> [[NBITS]])
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]])
@@ -95,7 +95,7 @@ define <8 x i32> @t2_vec_splat_poison(<8 x i64> %x, <8 x i32> %nbits) {
; CHECK-NEXT: [[T1:%.*]] = shl nsw <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 poison, i64 -1>, [[T0]]
; CHECK-NEXT: [[T2:%.*]] = xor <8 x i64> [[T1]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 poison, i64 -1>
; CHECK-NEXT: [[T3:%.*]] = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 poison, i32 32>, [[NBITS]]
-; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
+; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[X:%.*]], [[T2]]
; CHECK-NEXT: call void @use8xi32(<8 x i32> [[NBITS]])
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]])
@@ -131,7 +131,7 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
; CHECK-NEXT: [[T2:%.*]] = shl nsw <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T1]]
; CHECK-NEXT: [[T3:%.*]] = xor <8 x i64> [[T2]], <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
; CHECK-NEXT: [[T4:%.*]] = sub <8 x i32> <i32 33, i32 32, i32 33, i32 32, i32 32, i32 32, i32 32, i32 32>, [[NBITS]]
-; CHECK-NEXT: [[T5:%.*]] = and <8 x i64> [[T3]], [[X:%.*]]
+; CHECK-NEXT: [[T5:%.*]] = and <8 x i64> [[X:%.*]], [[T3]]
; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]])
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]])
; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T2]])
@@ -206,7 +206,7 @@ define i32 @n5_extrause(i64 %x, i32 %nbits) {
; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]]
; CHECK-NEXT: [[T2:%.*]] = xor i64 [[T1]], -1
; CHECK-NEXT: [[T3:%.*]] = sub i32 32, [[NBITS]]
-; CHECK-NEXT: [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
+; CHECK-NEXT: [[T4:%.*]] = and i64 [[X:%.*]], [[T2]]
; CHECK-NEXT: call void @use32(i32 [[NBITS]])
; CHECK-NEXT: call void @use64(i64 [[T0]])
; CHECK-NEXT: call void @use64(i64 [[T1]])
diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll
index 4b955a894fcfe..e3c0981389116 100644
--- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll
+++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll
@@ -19,7 +19,7 @@ define i32 @t0_basic(i32 %x, i32 %nbits) {
; CHECK-LABEL: @t0_basic(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i32 [[T0]], -1
-; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = and i32 [[X:%.*]], [[T1]]
; CHECK-NEXT: [[T3:%.*]] = sub i32 32, [[NBITS]]
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
@@ -44,7 +44,7 @@ define i32 @t1_bigger_shift(i32 %x, i32 %nbits) {
; CHECK-LABEL: @t1_bigger_shift(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i32 [[T0]], -1
-; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = and i32 [[X:%.*]], [[T1]]
; CHECK-NEXT: [[T3:%.*]] = sub i32 33, [[NBITS]]
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
@@ -70,7 +70,7 @@ define i32 @t2_bigger_mask(i32 %x, i32 %nbits) {
; CHECK-NEXT: [[T0:%.*]] = add i32 [[NBITS:%.*]], 1
; CHECK-NEXT: [[T1:%.*]] = shl nsw i32 -1, [[T0]]
; CHECK-NEXT: [[T2:%.*]] = xor i32 [[T1]], -1
-; CHECK-NEXT: [[T3:%.*]] = and i32 [[T2]], [[X:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = and i32 [[X:%.*]], [[T2]]
; CHECK-NEXT: [[T4:%.*]] = sub i32 32, [[NBITS]]
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
@@ -102,7 +102,7 @@ define <3 x i32> @t3_vec_splat(<3 x i32> %x, <3 x i32> %nbits) {
; CHECK-LABEL: @t3_vec_splat(
; CHECK-NEXT: [[T1:%.*]] = shl nsw <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]]
; CHECK-NEXT: [[T2:%.*]] = xor <3 x i32> [[T1]], <i32 -1, i32 -1, i32 -1>
-; CHECK-NEXT: [[T3:%.*]] = and <3 x i32> [[T2]], [[X:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = and <3 x i32> [[X:%.*]], [[T2]]
; CHECK-NEXT: [[T4:%.*]] = sub <3 x i32> <i32 32, i32 32, i32 32>, [[NBITS]]
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[NBITS]])
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]])
@@ -131,7 +131,7 @@ define <3 x i32> @t4_vec_nonsplat(<3 x i32> %x, <3 x i32> %nbits) {
; CHECK-NEXT: [[T0:%.*]] = add <3 x i32> [[NBITS:%.*]], <i32 -1, i32 0, i32 1>
; CHECK-NEXT: [[T1:%.*]] = shl nsw <3 x i32> <i32 -1, i32 -1, i32 -1>, [[T0]]
; CHECK-NEXT: [[T2:%.*]] = xor <3 x i32> [[T1]], <i32 -1, i32 -1, i32 -1>
-; CHECK-NEXT: [[T3:%.*]] = and <3 x i32> [[T2]], [[X:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = and <3 x i32> [[X:%.*]], [[T2]]
; CHECK-NEXT: [[T4:%.*]] = sub <3 x i32> <i32 33, i32 32, i32 32>, [[NBITS]]
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]])
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]])
@@ -159,7 +159,7 @@ define <3 x i32> @t5_vec_poison(<3 x i32> %x, <3 x i32> %nbits) {
; CHECK-LABEL: @t5_vec_poison(
; CHECK-NEXT: [[T1:%.*]] = shl nsw <3 x i32> <i32 -1, i32 poison, i32 -1>, [[NBITS:%.*]]
; CHECK-NEXT: [[T2:%.*]] = xor <3 x i32> [[T1]], <i32 -1, i32 poison, i32 -1>
-; CHECK-NEXT: [[T3:%.*]] = and <3 x i32> [[T2]], [[X:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = and <3 x i32> [[X:%.*]], [[T2]]
; CHECK-NEXT: [[T4:%.*]] = sub <3 x i32> <i32 32, i32 poison, i32 32>, [[NBITS]]
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[NBITS]])
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]])
@@ -285,7 +285,7 @@ define i32 @t9_nuw(i32 %x, i32 %nbits) {
; CHECK-LABEL: @t9_nuw(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i32 [[T0]], -1
-; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = and i32 [[X:%.*]], [[T1]]
; CHECK-NEXT: [[T3:%.*]] = sub i32 32, [[NBITS]]
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
@@ -310,7 +310,7 @@ define i32 @t10_nsw(i32 %x, i32 %nbits) {
; CHECK-LABEL: @t10_nsw(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i32 [[T0]], -1
-; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = and i32 [[X:%.*]], [[T1]]
; CHECK-NEXT: [[T3:%.*]] = sub i32 32, [[NBITS]]
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
@@ -335,7 +335,7 @@ define i32 @t11_nuw_nsw(i32 %x, i32 %nbits) {
; CHECK-LABEL: @t11_nuw_nsw(
; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i32 [[T0]], -1
-; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = and i32 [[X:%.*]], [[T1]]
; CHECK-NEXT: [[T3:%.*]] = sub i32 32, [[NBITS]]
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
@@ -362,7 +362,7 @@ define i32 @n12_not_minus_one(i32 %x, i32 %nbits) {
; CHECK-LABEL: @n12_not_minus_one(
; CHECK-NEXT: [[T0:%.*]] = shl i32 -2, [[NBITS:%.*]]
; CHECK-NEXT: [[T1:%.*]] = xor i32 [[T0]], -1
-; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = and i32 [[X:%.*]], [[T1]]
; CHECK-NEXT: [[T3:%.*]] = sub i32 32, [[NBITS]]
; CHECK-NEXT: call void @use32(i32 [[T0]])
; CHECK-NEXT: call void @use32(i32 [[T1]])
diff --git a/llvm/test/Transforms/InstCombine/rem.ll b/llvm/test/Transforms/InstCombine/rem.ll
index ae390e72a4b73..df441c9effa73 100644
--- a/llvm/test/Transforms/InstCombine/rem.ll
+++ b/llvm/test/Transforms/InstCombine/rem.ll
@@ -239,7 +239,7 @@ define <2 x i1> @test3a_vec(<2 x i32> %A) {
define i32 @test4(i32 %X, i1 %C) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[C:%.*]], i32 0, i32 7
-; CHECK-NEXT: [[R:%.*]] = and i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = and i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[R]]
;
%V = select i1 %C, i32 1, i32 8
@@ -252,7 +252,7 @@ define i32 @test5(i32 %X, i8 %B) {
; CHECK-NEXT: [[SHIFT_UPGRD_1:%.*]] = zext nneg i8 [[B:%.*]] to i32
; CHECK-NEXT: [[AMT:%.*]] = shl nuw i32 32, [[SHIFT_UPGRD_1]]
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[AMT]], -1
-; CHECK-NEXT: [[V:%.*]] = and i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[V:%.*]] = and i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[V]]
;
%shift.upgrd.1 = zext i8 %B to i32
@@ -340,7 +340,7 @@ define i64 @test14(i64 %x, i32 %y) {
; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 1, [[Y:%.*]]
; CHECK-NEXT: [[ZEXT:%.*]] = zext i32 [[SHL]] to i64
; CHECK-NEXT: [[TMP1:%.*]] = add nsw i64 [[ZEXT]], -1
-; CHECK-NEXT: [[UREM:%.*]] = and i64 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[UREM:%.*]] = and i64 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i64 [[UREM]]
;
%shl = shl i32 1, %y
@@ -353,7 +353,7 @@ define i64 @test15(i32 %x, i32 %y) {
; CHECK-LABEL: @test15(
; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i32 -1, [[Y:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[NOTMASK]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[UREM:%.*]] = zext nneg i32 [[TMP2]] to i64
; CHECK-NEXT: ret i64 [[UREM]]
;
@@ -369,7 +369,7 @@ define i32 @test16(i32 %x, i32 %y) {
; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[Y:%.*]], 11
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHR]], 4
; CHECK-NEXT: [[TMP1:%.*]] = or disjoint i32 [[AND]], 3
-; CHECK-NEXT: [[REM:%.*]] = and i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[REM:%.*]] = and i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[REM]]
;
%shr = lshr i32 %y, 11
@@ -394,7 +394,7 @@ define i32 @test18(i16 %x, i32 %y) {
; CHECK-NEXT: [[TMP1:%.*]] = and i16 [[X:%.*]], 4
; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i16 [[TMP1]], 0
; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[DOTNOT]], i32 63, i32 31
-; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: ret i32 [[TMP3]]
;
%1 = and i16 %x, 4
@@ -411,7 +411,7 @@ define i32 @test19(i32 %x, i32 %y) {
; CHECK-NEXT: [[C:%.*]] = and i32 [[A]], [[B]]
; CHECK-NEXT: [[D:%.*]] = add i32 [[C]], [[A]]
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[D]], -1
-; CHECK-NEXT: [[E:%.*]] = and i32 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[E:%.*]] = and i32 [[Y]], [[TMP1]]
; CHECK-NEXT: ret i32 [[E]]
;
%A = shl i32 1, %x
@@ -429,7 +429,7 @@ define i32 @test19_commutative0(i32 %x, i32 %y) {
; CHECK-NEXT: [[C:%.*]] = and i32 [[B]], [[A]]
; CHECK-NEXT: [[D:%.*]] = add i32 [[C]], [[A]]
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[D]], -1
-; CHECK-NEXT: [[E:%.*]] = and i32 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[E:%.*]] = and i32 [[Y]], [[TMP1]]
; CHECK-NEXT: ret i32 [[E]]
;
%A = shl i32 1, %x
@@ -447,7 +447,7 @@ define i32 @test19_commutative1(i32 %x, i32 %y) {
; CHECK-NEXT: [[C:%.*]] = and i32 [[A]], [[B]]
; CHECK-NEXT: [[D:%.*]] = add i32 [[A]], [[C]]
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[D]], -1
-; CHECK-NEXT: [[E:%.*]] = and i32 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[E:%.*]] = and i32 [[Y]], [[TMP1]]
; CHECK-NEXT: ret i32 [[E]]
;
%A = shl i32 1, %x
@@ -465,7 +465,7 @@ define i32 @test19_commutative2(i32 %x, i32 %y) {
; CHECK-NEXT: [[C:%.*]] = and i32 [[B]], [[A]]
; CHECK-NEXT: [[D:%.*]] = add i32 [[A]], [[C]]
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[D]], -1
-; CHECK-NEXT: [[E:%.*]] = and i32 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[E:%.*]] = and i32 [[Y]], [[TMP1]]
; CHECK-NEXT: ret i32 [[E]]
;
%A = shl i32 1, %x
@@ -722,7 +722,7 @@ define i1 @test26(i32 %A, i32 %B) {
; CHECK-LABEL: @test26(
; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i32 -1, [[B:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[NOTMASK]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: [[E:%.*]] = icmp ne i32 [[TMP2]], 0
; CHECK-NEXT: ret i1 [[E]]
;
diff --git a/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll b/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll
index 107ef291bf439..8103d366d444d 100644
--- a/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll
+++ b/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll
@@ -49,7 +49,7 @@ define i1 @t1(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp = icmp slt i8 %base, 0
@@ -70,7 +70,7 @@ define i1 @t1_logical(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp = icmp slt i8 %base, 0
@@ -92,7 +92,7 @@ define i1 @t2(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp = icmp slt i8 %offset, 0
@@ -113,7 +113,7 @@ define i1 @t2_logical(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp = icmp slt i8 %offset, 0
@@ -137,7 +137,7 @@ define i1 @t3_oneuse0(i8 %base, i8 %offset) {
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp = icmp slt i8 %base, 0
@@ -161,7 +161,7 @@ define i1 @t3_oneuse0_logical(i8 %base, i8 %offset) {
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp = icmp slt i8 %base, 0
@@ -184,7 +184,7 @@ define i1 @t4_oneuse1(i8 %base, i8 %offset) {
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp = icmp slt i8 %base, 0
@@ -208,7 +208,7 @@ define i1 @t4_oneuse1_logical(i8 %base, i8 %offset) {
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp = icmp slt i8 %base, 0
@@ -281,7 +281,7 @@ define i1 @t6_commutativity0(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp = icmp slt i8 %base, 0
@@ -302,7 +302,7 @@ define i1 @t6_commutativity0_logical(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp = icmp slt i8 %base, 0
@@ -322,7 +322,7 @@ define i1 @t7_commutativity1(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp = icmp slt i8 %base, 0
@@ -343,7 +343,7 @@ define i1 @t7_commutativity1_logical(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp = icmp slt i8 %base, 0
@@ -363,7 +363,7 @@ define i1 @t7_commutativity3(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp = icmp slt i8 %base, 0
@@ -384,7 +384,7 @@ define i1 @t7_commutativity3_logical(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp = icmp slt i8 %base, 0
@@ -406,7 +406,7 @@ define i1 @t8(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[OFFSET]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp = icmp slt i8 %base, 0
@@ -427,7 +427,7 @@ define i1 @t8_logical(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[OFFSET]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp = icmp slt i8 %base, 0
@@ -449,7 +449,7 @@ define i1 @t9(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp = icmp slt i8 %base, 0
@@ -470,7 +470,7 @@ define i1 @t9_logical(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp = icmp slt i8 %base, 0
diff --git a/llvm/test/Transforms/InstCombine/result-of-add-of-negative-or-zero-is-non-zero-and-no-underflow.ll b/llvm/test/Transforms/InstCombine/result-of-add-of-negative-or-zero-is-non-zero-and-no-underflow.ll
index 0be4457ad3fc0..f967fcac367bb 100644
--- a/llvm/test/Transforms/InstCombine/result-of-add-of-negative-or-zero-is-non-zero-and-no-underflow.ll
+++ b/llvm/test/Transforms/InstCombine/result-of-add-of-negative-or-zero-is-non-zero-and-no-underflow.ll
@@ -11,7 +11,7 @@ define i1 @t0(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = add i8 %base, %offset
@@ -27,7 +27,7 @@ define i1 @t0_logical(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = add i8 %base, %offset
@@ -46,7 +46,7 @@ define i1 @t1_oneuse0(i8 %base, i8 %offset) {
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = add i8 %base, %offset
@@ -65,7 +65,7 @@ define i1 @t1_oneuse0_logical(i8 %base, i8 %offset) {
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = add i8 %base, %offset
@@ -83,7 +83,7 @@ define i1 @t2_oneuse1(i8 %base, i8 %offset) {
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[ADJUSTED]], [[BASE]]
; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = add i8 %base, %offset
@@ -102,7 +102,7 @@ define i1 @t2_oneuse1_logical(i8 %base, i8 %offset) {
; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[ADJUSTED]], [[BASE]]
; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = add i8 %base, %offset
@@ -160,7 +160,7 @@ define i1 @t4_commutativity0(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = add i8 %base, %offset
@@ -176,7 +176,7 @@ define i1 @t4_commutativity0_logical(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = add i8 %base, %offset
@@ -191,7 +191,7 @@ define i1 @t5_commutativity1(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = add i8 %base, %offset
@@ -207,7 +207,7 @@ define i1 @t5_commutativity1_logical(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = add i8 %base, %offset
@@ -222,7 +222,7 @@ define i1 @t6_commutativity3(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = add i8 %base, %offset
@@ -238,7 +238,7 @@ define i1 @t6_commutativity3_logical(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = add i8 %base, %offset
@@ -255,7 +255,7 @@ define i1 @t7(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[BASE]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = add i8 %base, %offset
@@ -271,7 +271,7 @@ define i1 @t7_logical(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]]
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[BASE]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = add i8 %base, %offset
@@ -288,7 +288,7 @@ define i1 @t8(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = add i8 %base, %offset
@@ -304,7 +304,7 @@ define i1 @t8_logical(i8 %base, i8 %offset) {
; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]]
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = add i8 %base, %offset
diff --git a/llvm/test/Transforms/InstCombine/result-of-usub-is-non-zero-and-no-overflow.ll b/llvm/test/Transforms/InstCombine/result-of-usub-is-non-zero-and-no-overflow.ll
index a8be8180b9118..30a5072c7edc8 100644
--- a/llvm/test/Transforms/InstCombine/result-of-usub-is-non-zero-and-no-overflow.ll
+++ b/llvm/test/Transforms/InstCombine/result-of-usub-is-non-zero-and-no-overflow.ll
@@ -509,11 +509,11 @@ define i1 @t9_commutative(i8 %base, i8 %offset) {
; CHECK-LABEL: @t9_commutative(
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[UNDERFLOW:%.*]] = icmp ugt i8 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[UNDERFLOW:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[UNDERFLOW]])
; CHECK-NEXT: [[NULL:%.*]] = icmp eq i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[NULL]])
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = sub i8 %base, %offset
@@ -530,11 +530,11 @@ define i1 @t9_commutative_logical(i8 %base, i8 %offset) {
; CHECK-LABEL: @t9_commutative_logical(
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[UNDERFLOW:%.*]] = icmp ugt i8 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[UNDERFLOW:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[UNDERFLOW]])
; CHECK-NEXT: [[NULL:%.*]] = icmp eq i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[NULL]])
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[R]]
;
%adjusted = sub i8 %base, %offset
@@ -554,11 +554,11 @@ define i1 @t10(i64 %base, ptr nonnull %offsetptr) {
; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
-; CHECK-NEXT: [[R:%.*]] = icmp ult i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[R]]
;
%offset = ptrtoint ptr %offsetptr to i64
@@ -578,11 +578,11 @@ define i1 @t10_logical(i64 %base, ptr nonnull %offsetptr) {
; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
-; CHECK-NEXT: [[R:%.*]] = icmp ult i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[R]]
;
%offset = ptrtoint ptr %offsetptr to i64
@@ -601,11 +601,11 @@ define i1 @t11_commutative(i64 %base, ptr nonnull %offsetptr) {
; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
-; CHECK-NEXT: [[R:%.*]] = icmp ult i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[R]]
;
%offset = ptrtoint ptr %offsetptr to i64
@@ -625,11 +625,11 @@ define i1 @t11_commutative_logical(i64 %base, ptr nonnull %offsetptr) {
; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
-; CHECK-NEXT: [[R:%.*]] = icmp ult i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[R]]
;
%offset = ptrtoint ptr %offsetptr to i64
@@ -649,11 +649,11 @@ define i1 @t12(i64 %base, ptr nonnull %offsetptr) {
; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
-; CHECK-NEXT: [[R:%.*]] = icmp uge i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[R]]
;
%offset = ptrtoint ptr %offsetptr to i64
@@ -673,11 +673,11 @@ define i1 @t12_logical(i64 %base, ptr nonnull %offsetptr) {
; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
-; CHECK-NEXT: [[R:%.*]] = icmp uge i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[R]]
;
%offset = ptrtoint ptr %offsetptr to i64
@@ -696,11 +696,11 @@ define i1 @t13(i64 %base, ptr nonnull %offsetptr) {
; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
-; CHECK-NEXT: [[R:%.*]] = icmp uge i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[R]]
;
%offset = ptrtoint ptr %offsetptr to i64
@@ -720,11 +720,11 @@ define i1 @t13_logical(i64 %base, ptr nonnull %offsetptr) {
; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
-; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
-; CHECK-NEXT: [[R:%.*]] = icmp uge i64 [[OFFSET]], [[BASE]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i64 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[R]]
;
%offset = ptrtoint ptr %offsetptr to i64
diff --git a/llvm/test/Transforms/InstCombine/saturating-add-sub.ll b/llvm/test/Transforms/InstCombine/saturating-add-sub.ll
index 57977a72cd08f..d5b7a1a08c9b2 100644
--- a/llvm/test/Transforms/InstCombine/saturating-add-sub.ll
+++ b/llvm/test/Transforms/InstCombine/saturating-add-sub.ll
@@ -1559,7 +1559,7 @@ define i32 @not_uadd_sat2(i32 %x, i32 %y) {
define i32 @uadd_sat_not(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_not(
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]])
; CHECK-NEXT: ret i32 [[R]]
;
%notx = xor i32 %x, -1
@@ -1572,7 +1572,7 @@ define i32 @uadd_sat_not(i32 %x, i32 %y) {
define i32 @uadd_sat_not_nonstrict(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_not_nonstrict(
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]])
; CHECK-NEXT: ret i32 [[R]]
;
%notx = xor i32 %x, -1
@@ -1602,7 +1602,7 @@ define i32 @uadd_sat_not_commute_add(i32 %xp, i32 %yp) {
define i32 @uadd_sat_not_ugt(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_not_ugt(
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]])
; CHECK-NEXT: ret i32 [[R]]
;
%notx = xor i32 %x, -1
@@ -1615,7 +1615,7 @@ define i32 @uadd_sat_not_ugt(i32 %x, i32 %y) {
define i32 @uadd_sat_not_uge(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_not_uge(
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]])
; CHECK-NEXT: ret i32 [[R]]
;
%notx = xor i32 %x, -1
@@ -1643,7 +1643,7 @@ define <2 x i32> @uadd_sat_not_ugt_commute_add(<2 x i32> %x, <2 x i32> %yp) {
define i32 @uadd_sat_not_commute_select(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_not_commute_select(
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]])
; CHECK-NEXT: ret i32 [[R]]
;
%notx = xor i32 %x, -1
@@ -1656,7 +1656,7 @@ define i32 @uadd_sat_not_commute_select(i32 %x, i32 %y) {
define i32 @uadd_sat_not_commute_select_nonstrict(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_not_commute_select_nonstrict(
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]])
; CHECK-NEXT: ret i32 [[R]]
;
%notx = xor i32 %x, -1
@@ -1701,7 +1701,7 @@ define <2 x i32> @uadd_sat_not_commute_select_ugt(<2 x i32> %xp, <2 x i32> %yp)
define i32 @uadd_sat_not_commute_select_ugt_commute_add(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_not_commute_select_ugt_commute_add(
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]])
; CHECK-NEXT: ret i32 [[R]]
;
%notx = xor i32 %x, -1
@@ -1714,7 +1714,7 @@ define i32 @uadd_sat_not_commute_select_ugt_commute_add(i32 %x, i32 %y) {
define i32 @uadd_sat_not_commute_select_uge_commute_add(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_not_commute_select_uge_commute_add(
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]])
; CHECK-NEXT: ret i32 [[R]]
;
%notx = xor i32 %x, -1
@@ -1894,7 +1894,7 @@ define i32 @unsigned_sat_variable_using_wrong_min(i32 %x) {
; CHECK-LABEL: @unsigned_sat_variable_using_wrong_min(
; CHECK-NEXT: [[Y:%.*]] = call i32 @get_i32()
; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y]], -1
-; CHECK-NEXT: [[S:%.*]] = call i32 @llvm.smin.i32(i32 [[NOTY]], i32 [[X:%.*]])
+; CHECK-NEXT: [[S:%.*]] = call i32 @llvm.smin.i32(i32 [[X:%.*]], i32 [[NOTY]])
; CHECK-NEXT: [[R:%.*]] = add i32 [[Y]], [[S]]
; CHECK-NEXT: ret i32 [[R]]
;
@@ -1912,8 +1912,8 @@ define i32 @unsigned_sat_variable_using_wrong_value(i32 %x, i32 %z) {
; CHECK-LABEL: @unsigned_sat_variable_using_wrong_value(
; CHECK-NEXT: [[Y:%.*]] = call i32 @get_i32()
; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y]], -1
-; CHECK-NEXT: [[S:%.*]] = call i32 @llvm.umin.i32(i32 [[NOTY]], i32 [[X:%.*]])
-; CHECK-NEXT: [[R:%.*]] = add i32 [[S]], [[Z:%.*]]
+; CHECK-NEXT: [[S:%.*]] = call i32 @llvm.umin.i32(i32 [[X:%.*]], i32 [[NOTY]])
+; CHECK-NEXT: [[R:%.*]] = add i32 [[Z:%.*]], [[S]]
; CHECK-NEXT: ret i32 [[R]]
;
%y = call i32 @get_i32() ; thwart complexity-based canonicalization
@@ -2024,7 +2024,7 @@ define i32 @uadd_sat_via_add_swapped_cmp(i32 %x, i32 %y) {
define i32 @uadd_sat_via_add_swapped_cmp_nonstrict(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_via_add_swapped_cmp_nonstrict(
; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[C_NOT:%.*]] = icmp ugt i32 [[A]], [[Y]]
+; CHECK-NEXT: [[C_NOT:%.*]] = icmp ult i32 [[Y]], [[A]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[C_NOT]], i32 [[A]], i32 -1
; CHECK-NEXT: ret i32 [[R]]
;
@@ -2048,7 +2048,7 @@ define i32 @uadd_sat_via_add_swapped_cmp_nonstric(i32 %x, i32 %y) {
define i32 @uadd_sat_via_add_swapped_cmp_select_nonstrict(i32 %x, i32 %y) {
; CHECK-LABEL: @uadd_sat_via_add_swapped_cmp_select_nonstrict(
; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[A]], [[Y]]
+; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[Y]], [[A]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 -1
; CHECK-NEXT: ret i32 [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/scalarization-inseltpoison.ll b/llvm/test/Transforms/InstCombine/scalarization-inseltpoison.ll
index 424470aa929e1..29c0ac415ce7c 100644
--- a/llvm/test/Transforms/InstCombine/scalarization-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/scalarization-inseltpoison.ll
@@ -184,8 +184,8 @@ define float @extract_element_load(<4 x float> %x, ptr %ptr) {
;
; CHECK-LABEL: @extract_element_load(
; CHECK-NEXT: [[LOAD:%.*]] = load <4 x float>, ptr [[PTR:%.*]], align 16
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[LOAD]], i64 2
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[X:%.*]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[X:%.*]], i64 2
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[LOAD]], i64 2
; CHECK-NEXT: [[R:%.*]] = fadd float [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret float [[R]]
;
@@ -200,7 +200,7 @@ define float @extract_element_multi_Use_load(<4 x float> %x, ptr %ptr0, ptr %ptr
; CHECK-LABEL: @extract_element_multi_Use_load(
; CHECK-NEXT: [[LOAD:%.*]] = load <4 x float>, ptr [[PTR0:%.*]], align 16
; CHECK-NEXT: store <4 x float> [[LOAD]], ptr [[PTR1:%.*]], align 16
-; CHECK-NEXT: [[ADD:%.*]] = fadd <4 x float> [[LOAD]], [[X:%.*]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd <4 x float> [[X:%.*]], [[LOAD]]
; CHECK-NEXT: [[R:%.*]] = extractelement <4 x float> [[ADD]], i64 2
; CHECK-NEXT: ret float [[R]]
;
@@ -227,7 +227,7 @@ define float @extelt_binop_insertelt(<4 x float> %A, <4 x float> %B, float %f) {
;
; CHECK-LABEL: @extelt_binop_insertelt(
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
-; CHECK-NEXT: [[E:%.*]] = fmul nnan float [[TMP1]], [[F:%.*]]
+; CHECK-NEXT: [[E:%.*]] = fmul nnan float [[F:%.*]], [[TMP1]]
; CHECK-NEXT: ret float [[E]]
;
%C = insertelement <4 x float> %A, float %f, i32 0
@@ -243,7 +243,7 @@ define i32 @extelt_binop_binop_insertelt(<4 x i32> %A, <4 x i32> %B, i32 %f) {
;
; CHECK-LABEL: @extelt_binop_binop_insertelt(
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i32> [[B:%.*]], i64 0
-; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], [[F:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[F:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x i32> [[B]], i64 0
; CHECK-NEXT: [[E:%.*]] = mul nsw i32 [[TMP2]], [[TMP3]]
; CHECK-NEXT: ret i32 [[E]]
@@ -348,7 +348,7 @@ define i1 @extractelt_vector_fcmp_not_cheap_to_scalarize_multi_use(<2 x float> %
; CHECK-LABEL: @extractelt_vector_fcmp_not_cheap_to_scalarize_multi_use(
; CHECK-NEXT: [[ADD:%.*]] = fadd <2 x float> [[ARG1:%.*]], [[ARG2:%.*]]
; CHECK-NEXT: store volatile <2 x float> [[ADD]], ptr undef, align 8
-; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <2 x float> [[ADD]], [[ARG0:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <2 x float> [[ARG0:%.*]], [[ADD]]
; CHECK-NEXT: [[EXT:%.*]] = extractelement <2 x i1> [[CMP]], i64 0
; CHECK-NEXT: ret i1 [[EXT]]
;
diff --git a/llvm/test/Transforms/InstCombine/scalarization.ll b/llvm/test/Transforms/InstCombine/scalarization.ll
index 7e645ef7e883e..ba69b9293c9d4 100644
--- a/llvm/test/Transforms/InstCombine/scalarization.ll
+++ b/llvm/test/Transforms/InstCombine/scalarization.ll
@@ -184,8 +184,8 @@ define float @extract_element_load(<4 x float> %x, ptr %ptr) {
;
; CHECK-LABEL: @extract_element_load(
; CHECK-NEXT: [[LOAD:%.*]] = load <4 x float>, ptr [[PTR:%.*]], align 16
-; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[LOAD]], i64 2
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[X:%.*]], i64 2
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[X:%.*]], i64 2
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[LOAD]], i64 2
; CHECK-NEXT: [[R:%.*]] = fadd float [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret float [[R]]
;
@@ -200,7 +200,7 @@ define float @extract_element_multi_Use_load(<4 x float> %x, ptr %ptr0, ptr %ptr
; CHECK-LABEL: @extract_element_multi_Use_load(
; CHECK-NEXT: [[LOAD:%.*]] = load <4 x float>, ptr [[PTR0:%.*]], align 16
; CHECK-NEXT: store <4 x float> [[LOAD]], ptr [[PTR1:%.*]], align 16
-; CHECK-NEXT: [[ADD:%.*]] = fadd <4 x float> [[LOAD]], [[X:%.*]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd <4 x float> [[X:%.*]], [[LOAD]]
; CHECK-NEXT: [[R:%.*]] = extractelement <4 x float> [[ADD]], i64 2
; CHECK-NEXT: ret float [[R]]
;
@@ -227,7 +227,7 @@ define float @extelt_binop_insertelt(<4 x float> %A, <4 x float> %B, float %f) {
;
; CHECK-LABEL: @extelt_binop_insertelt(
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
-; CHECK-NEXT: [[E:%.*]] = fmul nnan float [[TMP1]], [[F:%.*]]
+; CHECK-NEXT: [[E:%.*]] = fmul nnan float [[F:%.*]], [[TMP1]]
; CHECK-NEXT: ret float [[E]]
;
%C = insertelement <4 x float> %A, float %f, i32 0
@@ -241,7 +241,7 @@ define i32 @extelt_binop_binop_insertelt(<4 x i32> %A, <4 x i32> %B, i32 %f) {
;
; CHECK-LABEL: @extelt_binop_binop_insertelt(
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i32> [[B:%.*]], i64 0
-; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], [[F:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[F:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x i32> [[B]], i64 0
; CHECK-NEXT: [[E:%.*]] = mul nsw i32 [[TMP2]], [[TMP3]]
; CHECK-NEXT: ret i32 [[E]]
@@ -344,8 +344,8 @@ define i1 @extractelt_vector_fcmp_constrhs_dynidx(<2 x float> %arg, i32 %idx) {
define i1 @extractelt_vector_fcmp_copy_flags(<4 x float> %x) {
; CHECK-LABEL: @extractelt_vector_fcmp_copy_flags(
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[X:%.*]], i64 2
-; CHECK-NEXT: [[TMP2:%.*]] = fcmp nsz arcp oeq float [[TMP1]], 0.000000e+00
-; CHECK-NEXT: ret i1 [[TMP2]]
+; CHECK-NEXT: [[R:%.*]] = fcmp nsz arcp oeq float [[TMP1]], 0.000000e+00
+; CHECK-NEXT: ret i1 [[R]]
;
%cmp = fcmp nsz arcp oeq <4 x float> %x, zeroinitializer
%r = extractelement <4 x i1> %cmp, i32 2
@@ -357,7 +357,7 @@ define i1 @extractelt_vector_fcmp_not_cheap_to_scalarize_multi_use(<2 x float> %
; CHECK-LABEL: @extractelt_vector_fcmp_not_cheap_to_scalarize_multi_use(
; CHECK-NEXT: [[ADD:%.*]] = fadd <2 x float> [[ARG1:%.*]], [[ARG2:%.*]]
; CHECK-NEXT: store volatile <2 x float> [[ADD]], ptr undef, align 8
-; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <2 x float> [[ADD]], [[ARG0:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <2 x float> [[ARG0:%.*]], [[ADD]]
; CHECK-NEXT: [[EXT:%.*]] = extractelement <2 x i1> [[CMP]], i64 0
; CHECK-NEXT: ret i1 [[EXT]]
;
diff --git a/llvm/test/Transforms/InstCombine/select-and-or.ll b/llvm/test/Transforms/InstCombine/select-and-or.ll
index 0f7acd4d56c06..7a0cbea78d9bc 100644
--- a/llvm/test/Transforms/InstCombine/select-and-or.ll
+++ b/llvm/test/Transforms/InstCombine/select-and-or.ll
@@ -502,7 +502,7 @@ define i1 @and_or2_commuted(i1 %a, i1 %b, i1 %c) {
define i1 @and_or1_multiuse(i1 %a, i1 %b, i1 %c) {
; CHECK-LABEL: @and_or1_multiuse(
; CHECK-NEXT: [[NOTA:%.*]] = xor i1 [[A:%.*]], true
-; CHECK-NEXT: [[COND:%.*]] = or i1 [[NOTA]], [[C:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = or i1 [[C:%.*]], [[NOTA]]
; CHECK-NEXT: call void @use(i1 [[COND]])
; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[A]], i1 [[B:%.*]]
; CHECK-NEXT: ret i1 [[R]]
@@ -517,7 +517,7 @@ define i1 @and_or1_multiuse(i1 %a, i1 %b, i1 %c) {
define i1 @and_or2_multiuse(i1 %a, i1 %b, i1 %c) {
; CHECK-LABEL: @and_or2_multiuse(
; CHECK-NEXT: [[NOTC:%.*]] = xor i1 [[C:%.*]], true
-; CHECK-NEXT: [[COND:%.*]] = and i1 [[NOTC]], [[B:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = and i1 [[B:%.*]], [[NOTC]]
; CHECK-NEXT: call void @use(i1 [[COND]])
; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[A:%.*]], i1 [[B]]
; CHECK-NEXT: ret i1 [[R]]
@@ -588,7 +588,7 @@ define <2 x i1> @and_or2_vec_commuted(<2 x i1> %a, <2 x i1> %b) {
define i1 @and_or1_wrong_operand(i1 %a, i1 %b, i1 %c, i1 %d) {
; CHECK-LABEL: @and_or1_wrong_operand(
; CHECK-NEXT: [[NOTA:%.*]] = xor i1 [[A:%.*]], true
-; CHECK-NEXT: [[COND:%.*]] = or i1 [[NOTA]], [[C:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = or i1 [[C:%.*]], [[NOTA]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[D:%.*]], i1 [[B:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -601,7 +601,7 @@ define i1 @and_or1_wrong_operand(i1 %a, i1 %b, i1 %c, i1 %d) {
define i1 @and_or2_wrong_operand(i1 %a, i1 %b, i1 %c, i1 %d) {
; CHECK-LABEL: @and_or2_wrong_operand(
; CHECK-NEXT: [[NOTC:%.*]] = xor i1 [[C:%.*]], true
-; CHECK-NEXT: [[COND:%.*]] = and i1 [[NOTC]], [[B:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = and i1 [[B:%.*]], [[NOTC]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[A:%.*]], i1 [[D:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -651,7 +651,7 @@ define i1 @and_or3_not_free_to_invert(i1 %a, i1 %b, i1 %c) {
define i1 @and_or3_multiuse(i1 %a, i1 %b, i32 %x, i32 %y) {
; CHECK-LABEL: @and_or3_multiuse(
; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[COND:%.*]] = and i1 [[C]], [[B:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = and i1 [[B:%.*]], [[C]]
; CHECK-NEXT: call void @use(i1 [[COND]])
; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[A:%.*]], i1 [[B]]
; CHECK-NEXT: ret i1 [[R]]
@@ -692,7 +692,7 @@ define <2 x i1> @and_or3_vec_commuted(<2 x i1> %a, <2 x i1> %b, <2 x i32> %x, <2
define i1 @and_or3_wrong_operand(i1 %a, i1 %b, i32 %x, i32 %y, i1 %d) {
; CHECK-LABEL: @and_or3_wrong_operand(
; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[COND:%.*]] = and i1 [[C]], [[B:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = and i1 [[B:%.*]], [[C]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[A:%.*]], i1 [[D:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -753,7 +753,7 @@ define i1 @or_and2_commuted(i1 %a, i1 %b, i1 %c) {
define i1 @or_and1_multiuse(i1 %a, i1 %b, i1 %c) {
; CHECK-LABEL: @or_and1_multiuse(
; CHECK-NEXT: [[NOTB:%.*]] = xor i1 [[B:%.*]], true
-; CHECK-NEXT: [[COND:%.*]] = and i1 [[NOTB]], [[C:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = and i1 [[C:%.*]], [[NOTB]]
; CHECK-NEXT: call void @use(i1 [[COND]])
; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[A:%.*]], i1 [[B]]
; CHECK-NEXT: ret i1 [[R]]
@@ -768,7 +768,7 @@ define i1 @or_and1_multiuse(i1 %a, i1 %b, i1 %c) {
define i1 @or_and2_multiuse(i1 %a, i1 %b, i1 %c) {
; CHECK-LABEL: @or_and2_multiuse(
; CHECK-NEXT: [[NOTC:%.*]] = xor i1 [[C:%.*]], true
-; CHECK-NEXT: [[COND:%.*]] = or i1 [[NOTC]], [[A:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = or i1 [[A:%.*]], [[NOTC]]
; CHECK-NEXT: call void @use(i1 [[COND]])
; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[A]], i1 [[B:%.*]]
; CHECK-NEXT: ret i1 [[R]]
@@ -839,7 +839,7 @@ define <2 x i1> @or_and2_vec_commuted(<2 x i1> %a, <2 x i1> %b) {
define i1 @or_and1_wrong_operand(i1 %a, i1 %b, i1 %c, i1 %d) {
; CHECK-LABEL: @or_and1_wrong_operand(
; CHECK-NEXT: [[NOTB:%.*]] = xor i1 [[B:%.*]], true
-; CHECK-NEXT: [[COND:%.*]] = and i1 [[NOTB]], [[C:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = and i1 [[C:%.*]], [[NOTB]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[A:%.*]], i1 [[D:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -852,7 +852,7 @@ define i1 @or_and1_wrong_operand(i1 %a, i1 %b, i1 %c, i1 %d) {
define i1 @or_and2_wrong_operand(i1 %a, i1 %b, i1 %c, i1 %d) {
; CHECK-LABEL: @or_and2_wrong_operand(
; CHECK-NEXT: [[NOTC:%.*]] = xor i1 [[C:%.*]], true
-; CHECK-NEXT: [[COND:%.*]] = or i1 [[NOTC]], [[A:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = or i1 [[A:%.*]], [[NOTC]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[D:%.*]], i1 [[B:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -915,7 +915,7 @@ define i1 @or_and3_not_free_to_invert(i1 %a, i1 %b, i1 %c) {
define i1 @or_and3_multiuse(i1 %a, i1 %b, i32 %x, i32 %y) {
; CHECK-LABEL: @or_and3_multiuse(
; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[COND:%.*]] = or i1 [[C]], [[A:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = or i1 [[A:%.*]], [[C]]
; CHECK-NEXT: call void @use(i1 [[COND]])
; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[A]], i1 [[B:%.*]]
; CHECK-NEXT: ret i1 [[R]]
@@ -956,7 +956,7 @@ define <2 x i1> @or_and3_vec_commuted(<2 x i1> %a, <2 x i1> %b, <2 x i32> %x, <2
define i1 @or_and3_wrong_operand(i1 %a, i1 %b, i32 %x, i32 %y, i1 %d) {
; CHECK-LABEL: @or_and3_wrong_operand(
; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[COND:%.*]] = or i1 [[C]], [[A:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = or i1 [[A:%.*]], [[C]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[D:%.*]], i1 [[B:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -1216,7 +1216,7 @@ define i8 @test_or_eq_different_operands(i8 %a, i8 %b, i8 %c) {
define i8 @test_or_eq_a_b_multi_use(i1 %other_cond, i8 %a, i8 %b) {
; CHECK-LABEL: @test_or_eq_a_b_multi_use(
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[COND:%.*]] = or i1 [[CMP]], [[OTHER_COND:%.*]]
+; CHECK-NEXT: [[COND:%.*]] = or i1 [[OTHER_COND:%.*]], [[CMP]]
; CHECK-NEXT: call void @use(i1 [[CMP]])
; CHECK-NEXT: call void @use(i1 [[COND]])
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[OTHER_COND]], i8 [[A]], i8 [[B]]
diff --git a/llvm/test/Transforms/InstCombine/select-binop-cmp.ll b/llvm/test/Transforms/InstCombine/select-binop-cmp.ll
index 1fa0c09a9e987..9aa048b4fbc27 100644
--- a/llvm/test/Transforms/InstCombine/select-binop-cmp.ll
+++ b/llvm/test/Transforms/InstCombine/select-binop-cmp.ll
@@ -1210,7 +1210,7 @@ define i32 @select_replace_nested(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @select_replace_nested(
; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[X:%.*]], 0
; CHECK-NEXT: [[ADD:%.*]] = select i1 [[C]], i32 [[Z:%.*]], i32 0
-; CHECK-NEXT: [[S:%.*]] = add i32 [[ADD]], [[Y:%.*]]
+; CHECK-NEXT: [[S:%.*]] = add i32 [[Y:%.*]], [[ADD]]
; CHECK-NEXT: ret i32 [[S]]
;
%c = icmp eq i32 %x, 0
diff --git a/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll b/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll
index 77ff16a8b2e3d..e5ad312bb85c1 100644
--- a/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll
+++ b/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll
@@ -15,7 +15,7 @@ define float @select_maybe_nan_fadd(i1 %cond, float %A, float %B) {
define float @select_fpclass_fadd(i1 %cond, float nofpclass(nan) %A, float %B) {
; CHECK-LABEL: @select_fpclass_fadd(
; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00
-; CHECK-NEXT: [[D:%.*]] = fadd float [[C]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fadd float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
%C = fadd float %A, %B
@@ -26,7 +26,7 @@ define float @select_fpclass_fadd(i1 %cond, float nofpclass(nan) %A, float %B) {
define float @select_nnan_fadd(i1 %cond, float %A, float %B) {
; CHECK-LABEL: @select_nnan_fadd(
; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00
-; CHECK-NEXT: [[D:%.*]] = fadd float [[C]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fadd float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
%C = fadd float %A, %B
@@ -37,7 +37,7 @@ define float @select_nnan_fadd(i1 %cond, float %A, float %B) {
define float @select_nnan_fadd_swapped(i1 %cond, float %A, float %B) {
; CHECK-LABEL: @select_nnan_fadd_swapped(
; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float -0.000000e+00, float [[B:%.*]]
-; CHECK-NEXT: [[D:%.*]] = fadd float [[C]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fadd float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
%C = fadd float %A, %B
@@ -48,7 +48,7 @@ define float @select_nnan_fadd_swapped(i1 %cond, float %A, float %B) {
define float @select_nnan_fadd_fast_math(i1 %cond, float %A, float %B) {
; CHECK-LABEL: @select_nnan_fadd_fast_math(
; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00
-; CHECK-NEXT: [[D:%.*]] = fadd fast float [[C]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fadd fast float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
%C = fadd fast float %A, %B
@@ -59,7 +59,7 @@ define float @select_nnan_fadd_fast_math(i1 %cond, float %A, float %B) {
define float @select_nnan_fadd_swapped_fast_math(i1 %cond, float %A, float %B) {
; CHECK-LABEL: @select_nnan_fadd_swapped_fast_math(
; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float -0.000000e+00, float [[B:%.*]]
-; CHECK-NEXT: [[D:%.*]] = fadd fast float [[C]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fadd fast float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
%C = fadd fast float %A, %B
@@ -70,7 +70,7 @@ define float @select_nnan_fadd_swapped_fast_math(i1 %cond, float %A, float %B) {
define <4 x float> @select_nnan_nsz_fadd_v4f32(<4 x i1> %cond, <4 x float> %A, <4 x float> %B) {
; CHECK-LABEL: @select_nnan_nsz_fadd_v4f32(
; CHECK-NEXT: [[C:%.*]] = select nnan nsz <4 x i1> [[COND:%.*]], <4 x float> [[B:%.*]], <4 x float> zeroinitializer
-; CHECK-NEXT: [[D:%.*]] = fadd nnan nsz <4 x float> [[C]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fadd nnan nsz <4 x float> [[A:%.*]], [[C]]
; CHECK-NEXT: ret <4 x float> [[D]]
;
%C = fadd nsz nnan <4 x float> %A, %B
@@ -81,7 +81,7 @@ define <4 x float> @select_nnan_nsz_fadd_v4f32(<4 x i1> %cond, <4 x float> %A, <
define <vscale x 4 x float> @select_nnan_nsz_fadd_nxv4f32(<vscale x 4 x i1> %cond, <vscale x 4 x float> %A, <vscale x 4 x float> %B) {
; CHECK-LABEL: @select_nnan_nsz_fadd_nxv4f32(
; CHECK-NEXT: [[C:%.*]] = select nnan nsz <vscale x 4 x i1> [[COND:%.*]], <vscale x 4 x float> [[B:%.*]], <vscale x 4 x float> zeroinitializer
-; CHECK-NEXT: [[D:%.*]] = fadd nnan nsz <vscale x 4 x float> [[C]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fadd nnan nsz <vscale x 4 x float> [[A:%.*]], [[C]]
; CHECK-NEXT: ret <vscale x 4 x float> [[D]]
;
%C = fadd nnan nsz <vscale x 4 x float> %A, %B
@@ -92,7 +92,7 @@ define <vscale x 4 x float> @select_nnan_nsz_fadd_nxv4f32(<vscale x 4 x i1> %con
define <vscale x 4 x float> @select_nnan_nsz_fadd_nxv4f32_swapops(<vscale x 4 x i1> %cond, <vscale x 4 x float> %A, <vscale x 4 x float> %B) {
; CHECK-LABEL: @select_nnan_nsz_fadd_nxv4f32_swapops(
; CHECK-NEXT: [[C:%.*]] = select fast <vscale x 4 x i1> [[COND:%.*]], <vscale x 4 x float> zeroinitializer, <vscale x 4 x float> [[B:%.*]]
-; CHECK-NEXT: [[D:%.*]] = fadd fast <vscale x 4 x float> [[C]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fadd fast <vscale x 4 x float> [[A:%.*]], [[C]]
; CHECK-NEXT: ret <vscale x 4 x float> [[D]]
;
%C = fadd fast <vscale x 4 x float> %A, %B
@@ -103,7 +103,7 @@ define <vscale x 4 x float> @select_nnan_nsz_fadd_nxv4f32_swapops(<vscale x 4 x
define float @select_nnan_fmul(i1 %cond, float %A, float %B) {
; CHECK-LABEL: @select_nnan_fmul(
; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00
-; CHECK-NEXT: [[D:%.*]] = fmul float [[C]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fmul float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
%C = fmul float %A, %B
@@ -114,7 +114,7 @@ define float @select_nnan_fmul(i1 %cond, float %A, float %B) {
define float @select_nnan_fmul_swapped(i1 %cond, float %A, float %B) {
; CHECK-LABEL: @select_nnan_fmul_swapped(
; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float 1.000000e+00, float [[B:%.*]]
-; CHECK-NEXT: [[D:%.*]] = fmul float [[C]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fmul float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
%C = fmul float %A, %B
@@ -125,7 +125,7 @@ define float @select_nnan_fmul_swapped(i1 %cond, float %A, float %B) {
define float @select_nnan_fmul_fast_math(i1 %cond, float %A, float %B) {
; CHECK-LABEL: @select_nnan_fmul_fast_math(
; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00
-; CHECK-NEXT: [[D:%.*]] = fmul fast float [[C]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fmul fast float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
%C = fmul fast float %A, %B
@@ -136,7 +136,7 @@ define float @select_nnan_fmul_fast_math(i1 %cond, float %A, float %B) {
define float @select_nnan_fmul_swapped_fast_math(i1 %cond, float %A, float %B) {
; CHECK-LABEL: @select_nnan_fmul_swapped_fast_math(
; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float 1.000000e+00, float [[B:%.*]]
-; CHECK-NEXT: [[D:%.*]] = fmul fast float [[C]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fmul fast float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
%C = fmul fast float %A, %B
diff --git a/llvm/test/Transforms/InstCombine/select-cmp.ll b/llvm/test/Transforms/InstCombine/select-cmp.ll
index 711fac542179f..1a8b0736274f4 100644
--- a/llvm/test/Transforms/InstCombine/select-cmp.ll
+++ b/llvm/test/Transforms/InstCombine/select-cmp.ll
@@ -14,7 +14,7 @@ define i1 @f(i1 %cond, i32 %x, i32 %x2) {
define i1 @icmp_ne_common_op00(i1 %c, i6 %x, i6 %y, i6 %z) {
; CHECK-LABEL: @icmp_ne_common_op00(
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp ne i6 [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ne i6 [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp ne i6 %x, %y
@@ -26,7 +26,7 @@ define i1 @icmp_ne_common_op00(i1 %c, i6 %x, i6 %y, i6 %z) {
define i1 @icmp_ne_common_op01(i1 %c, i3 %x, i3 %y, i3 %z) {
; CHECK-LABEL: @icmp_ne_common_op01(
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i3 [[Y:%.*]], i3 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp ne i3 [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ne i3 [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp ne i3 %x, %y
@@ -38,7 +38,7 @@ define i1 @icmp_ne_common_op01(i1 %c, i3 %x, i3 %y, i3 %z) {
define i1 @icmp_ne_common_op10(i1 %c, i4 %x, i4 %y, i4 %z) {
; CHECK-LABEL: @icmp_ne_common_op10(
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i4 [[Y:%.*]], i4 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp ne i4 [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ne i4 [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp ne i4 %y, %x
@@ -50,7 +50,7 @@ define i1 @icmp_ne_common_op10(i1 %c, i4 %x, i4 %y, i4 %z) {
define <3 x i1> @icmp_ne_common_op11(<3 x i1> %c, <3 x i17> %x, <3 x i17> %y, <3 x i17> %z) {
; CHECK-LABEL: @icmp_ne_common_op11(
; CHECK-NEXT: [[R_V:%.*]] = select <3 x i1> [[C:%.*]], <3 x i17> [[Y:%.*]], <3 x i17> [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp ne <3 x i17> [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ne <3 x i17> [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret <3 x i1> [[R]]
;
%cmp1 = icmp ne <3 x i17> %y, %x
@@ -62,7 +62,7 @@ define <3 x i1> @icmp_ne_common_op11(<3 x i1> %c, <3 x i17> %x, <3 x i17> %y, <3
define i1 @icmp_eq_common_op00(i1 %c, i5 %x, i5 %y, i5 %z) {
; CHECK-LABEL: @icmp_eq_common_op00(
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i5 [[Y:%.*]], i5 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp eq i5 [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i5 [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp eq i5 %x, %y
@@ -74,7 +74,7 @@ define i1 @icmp_eq_common_op00(i1 %c, i5 %x, i5 %y, i5 %z) {
define <5 x i1> @icmp_eq_common_op01(<5 x i1> %c, <5 x i7> %x, <5 x i7> %y, <5 x i7> %z) {
; CHECK-LABEL: @icmp_eq_common_op01(
; CHECK-NEXT: [[R_V:%.*]] = select <5 x i1> [[C:%.*]], <5 x i7> [[Y:%.*]], <5 x i7> [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp eq <5 x i7> [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq <5 x i7> [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret <5 x i1> [[R]]
;
%cmp1 = icmp eq <5 x i7> %x, %y
@@ -86,7 +86,7 @@ define <5 x i1> @icmp_eq_common_op01(<5 x i1> %c, <5 x i7> %x, <5 x i7> %y, <5 x
define i1 @icmp_eq_common_op10(i1 %c, i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @icmp_eq_common_op10(
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i32 [[Y:%.*]], i32 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp eq i32 %y, %x
@@ -98,7 +98,7 @@ define i1 @icmp_eq_common_op10(i1 %c, i32 %x, i32 %y, i32 %z) {
define i1 @icmp_eq_common_op11(i1 %c, i64 %x, i64 %y, i64 %z) {
; CHECK-LABEL: @icmp_eq_common_op11(
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i64 [[Y:%.*]], i64 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp eq i64 [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i64 [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp eq i64 %y, %x
@@ -112,7 +112,7 @@ define i1 @icmp_common_one_use_1(i1 %c, i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: call void @use(i1 [[CMP1]])
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i8 [[Y]], i8 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[R_V]], [[X]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp eq i8 %y, %x
@@ -125,7 +125,7 @@ define i1 @icmp_common_one_use_1(i1 %c, i8 %x, i8 %y, i8 %z) {
define i1 @icmp_slt_common(i1 %c, i6 %x, i6 %y, i6 %z) {
; CHECK-LABEL: @icmp_slt_common(
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp sgt i6 [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp slt i6 [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp slt i6 %x, %y
@@ -137,7 +137,7 @@ define i1 @icmp_slt_common(i1 %c, i6 %x, i6 %y, i6 %z) {
define i1 @icmp_sgt_common(i1 %c, i6 %x, i6 %y, i6 %z) {
; CHECK-LABEL: @icmp_sgt_common(
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp slt i6 [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sgt i6 [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp sgt i6 %x, %y
@@ -149,7 +149,7 @@ define i1 @icmp_sgt_common(i1 %c, i6 %x, i6 %y, i6 %z) {
define i1 @icmp_sle_common(i1 %c, i6 %x, i6 %y, i6 %z) {
; CHECK-LABEL: @icmp_sle_common(
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp sle i6 [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sge i6 [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp sle i6 %y, %x
@@ -161,7 +161,7 @@ define i1 @icmp_sle_common(i1 %c, i6 %x, i6 %y, i6 %z) {
define i1 @icmp_sge_common(i1 %c, i6 %x, i6 %y, i6 %z) {
; CHECK-LABEL: @icmp_sge_common(
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp sge i6 [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sle i6 [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp sge i6 %y, %x
@@ -173,7 +173,7 @@ define i1 @icmp_sge_common(i1 %c, i6 %x, i6 %y, i6 %z) {
define i1 @icmp_slt_sgt_common(i1 %c, i6 %x, i6 %y, i6 %z) {
; CHECK-LABEL: @icmp_slt_sgt_common(
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp sgt i6 [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp slt i6 [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp slt i6 %x, %y
@@ -185,7 +185,7 @@ define i1 @icmp_slt_sgt_common(i1 %c, i6 %x, i6 %y, i6 %z) {
define i1 @icmp_sle_sge_common(i1 %c, i6 %x, i6 %y, i6 %z) {
; CHECK-LABEL: @icmp_sle_sge_common(
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp sle i6 [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sge i6 [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp sle i6 %y, %x
@@ -197,7 +197,7 @@ define i1 @icmp_sle_sge_common(i1 %c, i6 %x, i6 %y, i6 %z) {
define i1 @icmp_ult_common(i1 %c, i6 %x, i6 %y, i6 %z) {
; CHECK-LABEL: @icmp_ult_common(
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i6 [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ult i6 [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp ult i6 %x, %y
@@ -209,7 +209,7 @@ define i1 @icmp_ult_common(i1 %c, i6 %x, i6 %y, i6 %z) {
define i1 @icmp_ule_common(i1 %c, i6 %x, i6 %y, i6 %z) {
; CHECK-LABEL: @icmp_ule_common(
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp ule i6 [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp uge i6 [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp ule i6 %y, %x
@@ -221,7 +221,7 @@ define i1 @icmp_ule_common(i1 %c, i6 %x, i6 %y, i6 %z) {
define i1 @icmp_ugt_common(i1 %c, i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @icmp_ugt_common(
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i8 [[Y:%.*]], i8 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp ugt i8 %y, %x
@@ -233,7 +233,7 @@ define i1 @icmp_ugt_common(i1 %c, i8 %x, i8 %y, i8 %z) {
define i1 @icmp_uge_common(i1 %c, i6 %x, i6 %y, i6 %z) {
; CHECK-LABEL: @icmp_uge_common(
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp uge i6 [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i6 [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp uge i6 %y, %x
@@ -245,7 +245,7 @@ define i1 @icmp_uge_common(i1 %c, i6 %x, i6 %y, i6 %z) {
define i1 @icmp_ult_ugt_common(i1 %c, i6 %x, i6 %y, i6 %z) {
; CHECK-LABEL: @icmp_ult_ugt_common(
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i6 [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ult i6 [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp ult i6 %x, %y
@@ -257,7 +257,7 @@ define i1 @icmp_ult_ugt_common(i1 %c, i6 %x, i6 %y, i6 %z) {
define i1 @icmp_ule_uge_common(i1 %c, i6 %x, i6 %y, i6 %z) {
; CHECK-LABEL: @icmp_ule_uge_common(
; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp ule i6 [[R_V]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp uge i6 [[X:%.*]], [[R_V]]
; CHECK-NEXT: ret i1 [[R]]
;
%cmp1 = icmp ule i6 %y, %x
diff --git a/llvm/test/Transforms/InstCombine/select-ctlz-to-cttz.ll b/llvm/test/Transforms/InstCombine/select-ctlz-to-cttz.ll
index 59d33ee3b39df..cc8f5d53fdddd 100644
--- a/llvm/test/Transforms/InstCombine/select-ctlz-to-cttz.ll
+++ b/llvm/test/Transforms/InstCombine/select-ctlz-to-cttz.ll
@@ -154,10 +154,10 @@ define i32 @select_clz_to_ctz_wrong_sub(i32 %a) {
define i64 @select_clz_to_ctz_i64_wrong_xor(i64 %a) {
; CHECK-LABEL: @select_clz_to_ctz_i64_wrong_xor(
; CHECK-NEXT: [[SUB:%.*]] = sub i64 0, [[A:%.*]]
-; CHECK-NEXT: [[AND:%.*]] = and i64 [[SUB]], [[A]]
+; CHECK-NEXT: [[AND:%.*]] = and i64 [[A]], [[SUB]]
; CHECK-NEXT: [[LZ:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[AND]], i1 true)
-; CHECK-NEXT: [[SUB11:%.*]] = or disjoint i64 [[LZ]], 64
-; CHECK-NEXT: ret i64 [[SUB11]]
+; CHECK-NEXT: [[SUB1:%.*]] = or disjoint i64 [[LZ]], 64
+; CHECK-NEXT: ret i64 [[SUB1]]
;
%sub = sub i64 0, %a
%and = and i64 %sub, %a
@@ -187,7 +187,7 @@ define i64 @select_clz_to_ctz_i64_wrong_icmp_cst(i64 %a) {
define i64 @select_clz_to_ctz_i64_wrong_icmp_pred(i64 %a) {
; CHECK-LABEL: @select_clz_to_ctz_i64_wrong_icmp_pred(
; CHECK-NEXT: [[SUB:%.*]] = sub i64 0, [[A:%.*]]
-; CHECK-NEXT: [[AND:%.*]] = and i64 [[SUB]], [[A]]
+; CHECK-NEXT: [[AND:%.*]] = and i64 [[A]], [[SUB]]
; CHECK-NEXT: [[LZ:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[AND]], i1 true)
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp slt i64 [[A]], 0
; CHECK-NEXT: [[SUB1:%.*]] = xor i64 [[LZ]], 63
@@ -206,7 +206,7 @@ define i64 @select_clz_to_ctz_i64_wrong_icmp_pred(i64 %a) {
define <2 x i32> @select_clz_to_ctz_vec_with_undef(<2 x i32> %a) {
; CHECK-LABEL: @select_clz_to_ctz_vec_with_undef(
; CHECK-NEXT: [[SUB:%.*]] = sub <2 x i32> zeroinitializer, [[A:%.*]]
-; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[SUB]], [[A]]
+; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[A]], [[SUB]]
; CHECK-NEXT: [[LZ:%.*]] = tail call range(i32 0, 33) <2 x i32> @llvm.ctlz.v2i32(<2 x i32> [[AND]], i1 true)
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq <2 x i32> [[A]], zeroinitializer
; CHECK-NEXT: [[SUB1:%.*]] = xor <2 x i32> [[LZ]], <i32 31, i32 undef>
@@ -225,7 +225,7 @@ define <2 x i32> @select_clz_to_ctz_vec_with_undef(<2 x i32> %a) {
define i32 @select_clz_to_ctz_wrong_constant_for_zero(i32 %a) {
; CHECK-LABEL: @select_clz_to_ctz_wrong_constant_for_zero(
; CHECK-NEXT: [[SUB:%.*]] = sub i32 0, [[A:%.*]]
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[SUB]], [[A]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[A]], [[SUB]]
; CHECK-NEXT: [[LZ:%.*]] = tail call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 [[AND]], i1 false)
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A]], 0
; CHECK-NEXT: [[SUB1:%.*]] = xor i32 [[LZ]], 31
diff --git a/llvm/test/Transforms/InstCombine/select-divrem.ll b/llvm/test/Transforms/InstCombine/select-divrem.ll
index e0c460c37451d..e11afd7b543b2 100644
--- a/llvm/test/Transforms/InstCombine/select-divrem.ll
+++ b/llvm/test/Transforms/InstCombine/select-divrem.ll
@@ -311,7 +311,7 @@ define i8 @rem_euclid_non_const_pow2(i8 %0, i8 %1) {
; CHECK-LABEL: @rem_euclid_non_const_pow2(
; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i8 -1, [[TMP0:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = xor i8 [[NOTMASK]], -1
-; CHECK-NEXT: [[SEL:%.*]] = and i8 [[TMP3]], [[TMP1:%.*]]
+; CHECK-NEXT: [[SEL:%.*]] = and i8 [[TMP1:%.*]], [[TMP3]]
; CHECK-NEXT: ret i8 [[SEL]]
;
%pow2 = shl i8 1, %0
diff --git a/llvm/test/Transforms/InstCombine/select-factorize.ll b/llvm/test/Transforms/InstCombine/select-factorize.ll
index 386c8e522759e..ab9d9f6b24754 100644
--- a/llvm/test/Transforms/InstCombine/select-factorize.ll
+++ b/llvm/test/Transforms/InstCombine/select-factorize.ll
@@ -230,7 +230,7 @@ define i1 @and_logic_and_logic_or_5(i1 %c, i1 %a, i1 %b) {
define i1 @and_logic_and_logic_or_6(i1 %c, i1 %a, i1 %b) {
; CHECK-LABEL: @and_logic_and_logic_or_6(
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[B:%.*]], i1 true, i1 [[A:%.*]]
-; CHECK-NEXT: [[OR:%.*]] = and i1 [[TMP1]], [[C:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = and i1 [[C:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[OR]]
;
%ac = and i1 %c, %a
@@ -254,7 +254,7 @@ define i1 @and_logic_and_logic_or_7(i1 %c, i1 %a, i1 %b) {
define i1 @and_logic_and_logic_or_8(i1 %c, i1 %a, i1 %b) {
; CHECK-LABEL: @and_logic_and_logic_or_8(
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[B:%.*]], i1 true, i1 [[A:%.*]]
-; CHECK-NEXT: [[OR:%.*]] = and i1 [[TMP1]], [[C:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = and i1 [[C:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[OR]]
;
%ac = and i1 %a, %c
@@ -319,7 +319,7 @@ define i1 @and_logic_and_logic_or_not_one_use(i1 %c, i1 %a, i1 %b) {
define i1 @and_and_logic_or_1(i1 %c, i1 %a, i1 %b) {
; CHECK-LABEL: @and_and_logic_or_1(
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[A:%.*]], i1 true, i1 [[B:%.*]]
-; CHECK-NEXT: [[OR:%.*]] = and i1 [[TMP1]], [[C:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = and i1 [[C:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[OR]]
;
%ac = and i1 %c, %a
@@ -331,7 +331,7 @@ define i1 @and_and_logic_or_1(i1 %c, i1 %a, i1 %b) {
define i1 @and_and_logic_or_2(i1 %c, i1 %a, i1 %b) {
; CHECK-LABEL: @and_and_logic_or_2(
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[B:%.*]], i1 true, i1 [[A:%.*]]
-; CHECK-NEXT: [[OR:%.*]] = and i1 [[TMP1]], [[C:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = and i1 [[C:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[OR]]
;
%ac = and i1 %a, %c
@@ -343,7 +343,7 @@ define i1 @and_and_logic_or_2(i1 %c, i1 %a, i1 %b) {
define <3 x i1> @and_and_logic_or_vector(<3 x i1> %c, <3 x i1> %a, <3 x i1> %b) {
; CHECK-LABEL: @and_and_logic_or_vector(
; CHECK-NEXT: [[TMP1:%.*]] = select <3 x i1> [[A:%.*]], <3 x i1> <i1 true, i1 true, i1 true>, <3 x i1> [[B:%.*]]
-; CHECK-NEXT: [[OR:%.*]] = and <3 x i1> [[TMP1]], [[C:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = and <3 x i1> [[C:%.*]], [[TMP1]]
; CHECK-NEXT: ret <3 x i1> [[OR]]
;
%ac = and <3 x i1> %c, %a
@@ -355,7 +355,7 @@ define <3 x i1> @and_and_logic_or_vector(<3 x i1> %c, <3 x i1> %a, <3 x i1> %b)
define <3 x i1> @and_and_logic_or_vector_poison(<3 x i1> %c, <3 x i1> %a, <3 x i1> %b) {
; CHECK-LABEL: @and_and_logic_or_vector_poison(
; CHECK-NEXT: [[TMP1:%.*]] = select <3 x i1> [[A:%.*]], <3 x i1> <i1 true, i1 true, i1 true>, <3 x i1> [[B:%.*]]
-; CHECK-NEXT: [[OR:%.*]] = and <3 x i1> [[TMP1]], [[C:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = and <3 x i1> [[C:%.*]], [[TMP1]]
; CHECK-NEXT: ret <3 x i1> [[OR]]
;
%ac = and <3 x i1> %c, %a
@@ -584,7 +584,7 @@ define i1 @or_logic_or_logic_and_3(i1 %c, i1 %a, i1 %b) {
define i1 @or_logic_or_logic_and_4(i1 %c, i1 %a, i1 %b) {
; CHECK-LABEL: @or_logic_or_logic_and_4(
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[B:%.*]], i1 [[A:%.*]], i1 false
-; CHECK-NEXT: [[OR:%.*]] = or i1 [[TMP1]], [[C:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[C:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[OR]]
;
%ac = or i1 %c, %a
@@ -632,7 +632,7 @@ define i1 @or_logic_or_logic_and_7(i1 %c, i1 %a, i1 %b) {
define i1 @or_logic_or_logic_and_8(i1 %c, i1 %a, i1 %b) {
; CHECK-LABEL: @or_logic_or_logic_and_8(
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[B:%.*]], i1 [[A:%.*]], i1 false
-; CHECK-NEXT: [[OR:%.*]] = or i1 [[TMP1]], [[C:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[C:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[OR]]
;
%ac = or i1 %a, %c
@@ -697,7 +697,7 @@ define i1 @or_logic_or_logic_and_not_one_use(i1 %c, i1 %a, i1 %b) {
define i1 @or_or_logic_and_1(i1 %c, i1 %a, i1 %b) {
; CHECK-LABEL: @or_or_logic_and_1(
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[A:%.*]], i1 [[B:%.*]], i1 false
-; CHECK-NEXT: [[OR:%.*]] = or i1 [[TMP1]], [[C:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[C:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[OR]]
;
%ac = or i1 %c, %a
@@ -709,7 +709,7 @@ define i1 @or_or_logic_and_1(i1 %c, i1 %a, i1 %b) {
define i1 @or_or_logic_and_2(i1 %c, i1 %a, i1 %b) {
; CHECK-LABEL: @or_or_logic_and_2(
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[B:%.*]], i1 [[A:%.*]], i1 false
-; CHECK-NEXT: [[OR:%.*]] = or i1 [[TMP1]], [[C:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[C:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[OR]]
;
%ac = or i1 %c, %a
@@ -721,7 +721,7 @@ define i1 @or_or_logic_and_2(i1 %c, i1 %a, i1 %b) {
define <3 x i1> @or_or_logic_and_vector(<3 x i1> %c, <3 x i1> %a, <3 x i1> %b) {
; CHECK-LABEL: @or_or_logic_and_vector(
; CHECK-NEXT: [[TMP1:%.*]] = select <3 x i1> [[A:%.*]], <3 x i1> [[B:%.*]], <3 x i1> zeroinitializer
-; CHECK-NEXT: [[OR:%.*]] = or <3 x i1> [[TMP1]], [[C:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or <3 x i1> [[C:%.*]], [[TMP1]]
; CHECK-NEXT: ret <3 x i1> [[OR]]
;
%ac = or <3 x i1> %c, %a
@@ -733,7 +733,7 @@ define <3 x i1> @or_or_logic_and_vector(<3 x i1> %c, <3 x i1> %a, <3 x i1> %b) {
define <3 x i1> @or_or_logic_and_vector_poison(<3 x i1> %c, <3 x i1> %a, <3 x i1> %b) {
; CHECK-LABEL: @or_or_logic_and_vector_poison(
; CHECK-NEXT: [[TMP1:%.*]] = select <3 x i1> [[A:%.*]], <3 x i1> [[B:%.*]], <3 x i1> zeroinitializer
-; CHECK-NEXT: [[OR:%.*]] = or <3 x i1> [[TMP1]], [[C:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or <3 x i1> [[C:%.*]], [[TMP1]]
; CHECK-NEXT: ret <3 x i1> [[OR]]
;
%ac = or <3 x i1> %c, %a
diff --git a/llvm/test/Transforms/InstCombine/select-masked_gather.ll b/llvm/test/Transforms/InstCombine/select-masked_gather.ll
index 70d798ecd5085..a232bdbca0df4 100644
--- a/llvm/test/Transforms/InstCombine/select-masked_gather.ll
+++ b/llvm/test/Transforms/InstCombine/select-masked_gather.ll
@@ -95,7 +95,7 @@ define <vscale x 2 x i32> @masked_gather_and_zero_inactive_7(<vscale x 2 x ptr>
define <vscale x 2 x float> @masked_gather_and_zero_inactive_8(<vscale x 2 x ptr> %ptr, <vscale x 2 x i1> %inv_mask, <vscale x 2 x i1> %cond) {
; CHECK-LABEL: @masked_gather_and_zero_inactive_8(
; CHECK-NEXT: [[MASK:%.*]] = xor <vscale x 2 x i1> [[INV_MASK:%.*]], shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer)
-; CHECK-NEXT: [[PG:%.*]] = and <vscale x 2 x i1> [[MASK]], [[COND:%.*]]
+; CHECK-NEXT: [[PG:%.*]] = and <vscale x 2 x i1> [[COND:%.*]], [[MASK]]
; CHECK-NEXT: [[GATHER:%.*]] = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> [[PTR:%.*]], i32 4, <vscale x 2 x i1> [[PG]], <vscale x 2 x float> zeroinitializer)
; CHECK-NEXT: ret <vscale x 2 x float> [[GATHER]]
;
diff --git a/llvm/test/Transforms/InstCombine/select-masked_load.ll b/llvm/test/Transforms/InstCombine/select-masked_load.ll
index 0e82def113e96..51525e5ee8346 100644
--- a/llvm/test/Transforms/InstCombine/select-masked_load.ll
+++ b/llvm/test/Transforms/InstCombine/select-masked_load.ll
@@ -92,7 +92,7 @@ define <4 x i32> @masked_load_and_zero_inactive_7(ptr %ptr, <4 x i1> %mask1, <4
define <4 x float> @masked_load_and_zero_inactive_8(ptr %ptr, <4 x i1> %inv_mask, <4 x i1> %cond) {
; CHECK-LABEL: @masked_load_and_zero_inactive_8(
; CHECK-NEXT: [[MASK:%.*]] = xor <4 x i1> [[INV_MASK:%.*]], <i1 true, i1 true, i1 true, i1 true>
-; CHECK-NEXT: [[PG:%.*]] = and <4 x i1> [[MASK]], [[COND:%.*]]
+; CHECK-NEXT: [[PG:%.*]] = and <4 x i1> [[COND:%.*]], [[MASK]]
; CHECK-NEXT: [[LOAD:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[PTR:%.*]], i32 4, <4 x i1> [[PG]], <4 x float> zeroinitializer)
; CHECK-NEXT: ret <4 x float> [[LOAD]]
;
diff --git a/llvm/test/Transforms/InstCombine/select-of-bittest.ll b/llvm/test/Transforms/InstCombine/select-of-bittest.ll
index e3eb76de459e2..f1f53c3ee7d30 100644
--- a/llvm/test/Transforms/InstCombine/select-of-bittest.ll
+++ b/llvm/test/Transforms/InstCombine/select-of-bittest.ll
@@ -158,7 +158,7 @@ define <3 x i32> @and_and_vec_poison(<3 x i32> %arg) {
define i32 @f_var0(i32 %arg, i32 %arg1) {
; CHECK-LABEL: @f_var0(
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARG1:%.*]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARG:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARG:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
; CHECK-NEXT: [[T5:%.*]] = zext i1 [[TMP3]] to i32
; CHECK-NEXT: ret i32 [[T5]]
@@ -175,7 +175,7 @@ define i32 @f_var0(i32 %arg, i32 %arg1) {
define i32 @f_var0_commutative_and(i32 %arg, i32 %arg1) {
; CHECK-LABEL: @f_var0_commutative_and(
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARG1:%.*]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARG:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARG:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
; CHECK-NEXT: [[T5:%.*]] = zext i1 [[TMP3]] to i32
; CHECK-NEXT: ret i32 [[T5]]
@@ -191,7 +191,7 @@ define i32 @f_var0_commutative_and(i32 %arg, i32 %arg1) {
define <2 x i32> @f_var0_splatvec(<2 x i32> %arg, <2 x i32> %arg1) {
; CHECK-LABEL: @f_var0_splatvec(
; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[ARG1:%.*]], <i32 2, i32 2>
-; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[ARG:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[ARG:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[T5:%.*]] = zext <2 x i1> [[TMP3]] to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[T5]]
@@ -207,7 +207,7 @@ define <2 x i32> @f_var0_splatvec(<2 x i32> %arg, <2 x i32> %arg1) {
define <2 x i32> @f_var0_vec(<2 x i32> %arg, <2 x i32> %arg1) {
; CHECK-LABEL: @f_var0_vec(
; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[ARG1:%.*]], <i32 2, i32 4>
-; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[ARG:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[ARG:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[T5:%.*]] = zext <2 x i1> [[TMP3]] to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[T5]]
@@ -223,7 +223,7 @@ define <2 x i32> @f_var0_vec(<2 x i32> %arg, <2 x i32> %arg1) {
define <3 x i32> @f_var0_vec_poison(<3 x i32> %arg, <3 x i32> %arg1) {
; CHECK-LABEL: @f_var0_vec_poison(
; CHECK-NEXT: [[TMP1:%.*]] = or <3 x i32> [[ARG1:%.*]], <i32 2, i32 poison, i32 2>
-; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[TMP1]], [[ARG:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[ARG:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <3 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[T5:%.*]] = zext <3 x i1> [[TMP3]] to <3 x i32>
; CHECK-NEXT: ret <3 x i32> [[T5]]
@@ -240,7 +240,7 @@ define <3 x i32> @f_var0_vec_poison(<3 x i32> %arg, <3 x i32> %arg1) {
define i32 @f_var1(i32 %arg, i32 %arg1) {
; CHECK-LABEL: @f_var1(
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARG1:%.*]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARG:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARG:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
; CHECK-NEXT: [[T4:%.*]] = zext i1 [[TMP3]] to i32
; CHECK-NEXT: ret i32 [[T4]]
@@ -256,7 +256,7 @@ define i32 @f_var1(i32 %arg, i32 %arg1) {
define i32 @f_var1_commutative_and(i32 %arg, i32 %arg1) {
; CHECK-LABEL: @f_var1_commutative_and(
; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARG1:%.*]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARG:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARG:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
; CHECK-NEXT: [[T4:%.*]] = zext i1 [[TMP3]] to i32
; CHECK-NEXT: ret i32 [[T4]]
@@ -271,7 +271,7 @@ define i32 @f_var1_commutative_and(i32 %arg, i32 %arg1) {
define <2 x i32> @f_var1_vec(<2 x i32> %arg, <2 x i32> %arg1) {
; CHECK-LABEL: @f_var1_vec(
; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[ARG1:%.*]], <i32 1, i32 1>
-; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[ARG:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[ARG:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[T4:%.*]] = zext <2 x i1> [[TMP3]] to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[T4]]
@@ -286,7 +286,7 @@ define <2 x i32> @f_var1_vec(<2 x i32> %arg, <2 x i32> %arg1) {
define <3 x i32> @f_var1_vec_poison(<3 x i32> %arg, <3 x i32> %arg1) {
; CHECK-LABEL: @f_var1_vec_poison(
; CHECK-NEXT: [[TMP1:%.*]] = or <3 x i32> [[ARG1:%.*]], <i32 1, i32 1, i32 1>
-; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[TMP1]], [[ARG:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[ARG:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <3 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[T4:%.*]] = zext <3 x i1> [[TMP3]] to <3 x i32>
; CHECK-NEXT: ret <3 x i32> [[T4]]
diff --git a/llvm/test/Transforms/InstCombine/select-safe-transforms.ll b/llvm/test/Transforms/InstCombine/select-safe-transforms.ll
index f0072e24161d4..19dfa5d641993 100644
--- a/llvm/test/Transforms/InstCombine/select-safe-transforms.ll
+++ b/llvm/test/Transforms/InstCombine/select-safe-transforms.ll
@@ -194,7 +194,7 @@ define i1 @andn_or_cmp_2_logical(i16 %a, i16 %b, i1 %y) {
define i1 @andn_or_cmp_2_partial_logical(i16 %a, i16 %b, i1 %y) {
; CHECK-LABEL: @andn_or_cmp_2_partial_logical(
; CHECK-NEXT: [[X_INV:%.*]] = icmp slt i16 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[AND:%.*]] = and i1 [[X_INV]], [[Y:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i1 [[Y:%.*]], [[X_INV]]
; CHECK-NEXT: ret i1 [[AND]]
;
%x = icmp sge i16 %a, %b
@@ -735,7 +735,7 @@ define i1 @orn_and_cmp_2_logical(i16 %a, i16 %b, i1 %y) {
define i1 @orn_and_cmp_2_partial_logical(i16 %a, i16 %b, i1 %y) {
; CHECK-LABEL: @orn_and_cmp_2_partial_logical(
; CHECK-NEXT: [[X_INV:%.*]] = icmp slt i16 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[OR:%.*]] = or i1 [[X_INV]], [[Y:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[Y:%.*]], [[X_INV]]
; CHECK-NEXT: ret i1 [[OR]]
;
%x = icmp sge i16 %a, %b
diff --git a/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll b/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll
index 416a6d71055b6..1647233595b37 100644
--- a/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll
+++ b/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll
@@ -10,7 +10,7 @@ define i32 @select_icmp_eq_and_1_0_or_2(i32 %x, i32 %y) {
; CHECK-LABEL: @select_icmp_eq_and_1_0_or_2(
; CHECK-NEXT: [[AND:%.*]] = shl i32 [[X:%.*]], 1
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 2
-; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[SELECT]]
;
%and = and i32 %x, 1
@@ -24,7 +24,7 @@ define <2 x i32> @select_icmp_eq_and_1_0_or_2_vec(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @select_icmp_eq_and_1_0_or_2_vec(
; CHECK-NEXT: [[AND:%.*]] = shl <2 x i32> [[X:%.*]], <i32 1, i32 1>
; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[AND]], <i32 2, i32 2>
-; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i32> [[SELECT]]
;
%and = and <2 x i32> %x, <i32 1, i32 1>
@@ -38,7 +38,7 @@ define <2 x i32> @select_icmp_eq_and_1_0_or_2_vec_poison1(<2 x i32> %x, <2 x i32
; CHECK-LABEL: @select_icmp_eq_and_1_0_or_2_vec_poison1(
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], <i32 1, i32 poison>
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw <2 x i32> [[AND]], <i32 1, i32 1>
-; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i32> [[SELECT]]
;
%and = and <2 x i32> %x, <i32 1, i32 poison>
@@ -52,7 +52,7 @@ define <2 x i32> @select_icmp_eq_and_1_0_or_2_vec_poison2(<2 x i32> %x, <2 x i32
; CHECK-LABEL: @select_icmp_eq_and_1_0_or_2_vec_poison2(
; CHECK-NEXT: [[AND:%.*]] = shl <2 x i32> [[X:%.*]], <i32 1, i32 1>
; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[AND]], <i32 2, i32 2>
-; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i32> [[SELECT]]
;
%and = and <2 x i32> %x, <i32 1, i32 1>
@@ -66,7 +66,7 @@ define <2 x i32> @select_icmp_eq_and_1_0_or_2_vec_poison3(<2 x i32> %x, <2 x i32
; CHECK-LABEL: @select_icmp_eq_and_1_0_or_2_vec_poison3(
; CHECK-NEXT: [[AND:%.*]] = shl <2 x i32> [[X:%.*]], <i32 1, i32 1>
; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[AND]], <i32 2, i32 2>
-; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i32> [[SELECT]]
;
%and = and <2 x i32> %x, <i32 1, i32 1>
@@ -80,7 +80,7 @@ define i32 @select_icmp_eq_and_1_0_xor_2(i32 %x, i32 %y) {
; CHECK-LABEL: @select_icmp_eq_and_1_0_xor_2(
; CHECK-NEXT: [[AND:%.*]] = shl i32 [[X:%.*]], 1
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 2
-; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[SELECT]]
;
%and = and i32 %x, 1
@@ -109,7 +109,7 @@ define i32 @select_icmp_eq_and_32_0_or_8(i32 %x, i32 %y) {
; CHECK-LABEL: @select_icmp_eq_and_32_0_or_8(
; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[X:%.*]], 2
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 8
-; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[SELECT]]
;
%and = and i32 %x, 32
@@ -123,7 +123,7 @@ define <2 x i32> @select_icmp_eq_and_32_0_or_8_vec(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @select_icmp_eq_and_32_0_or_8_vec(
; CHECK-NEXT: [[AND:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 2, i32 2>
; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[AND]], <i32 8, i32 8>
-; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i32> [[SELECT]]
;
%and = and <2 x i32> %x, <i32 32, i32 32>
@@ -137,7 +137,7 @@ define i32 @select_icmp_eq_and_32_0_xor_8(i32 %x, i32 %y) {
; CHECK-LABEL: @select_icmp_eq_and_32_0_xor_8(
; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[X:%.*]], 2
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 8
-; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[SELECT]]
;
%and = and i32 %x, 32
@@ -166,7 +166,7 @@ define i32 @select_icmp_ne_0_and_4096_or_4096(i32 %x, i32 %y) {
; CHECK-LABEL: @select_icmp_ne_0_and_4096_or_4096(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[AND]], 4096
-; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[SELECT]]
;
%and = and i32 %x, 4096
@@ -180,7 +180,7 @@ define <2 x i32> @select_icmp_ne_0_and_4096_or_4096_vec(<2 x i32> %x, <2 x i32>
; CHECK-LABEL: @select_icmp_ne_0_and_4096_or_4096_vec(
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], <i32 4096, i32 4096>
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i32> [[AND]], <i32 4096, i32 4096>
-; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i32> [[SELECT]]
;
%and = and <2 x i32> %x, <i32 4096, i32 4096>
@@ -222,7 +222,7 @@ define i32 @select_icmp_ne_0_and_4096_and_not_4096(i32 %x, i32 %y) {
define i32 @select_icmp_eq_and_4096_0_or_4096(i32 %x, i32 %y) {
; CHECK-LABEL: @select_icmp_eq_and_4096_0_or_4096(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096
-; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[AND]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[AND]]
; CHECK-NEXT: ret i32 [[SELECT]]
;
%and = and i32 %x, 4096
@@ -235,7 +235,7 @@ define i32 @select_icmp_eq_and_4096_0_or_4096(i32 %x, i32 %y) {
define <2 x i32> @select_icmp_eq_and_4096_0_or_4096_vec(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @select_icmp_eq_and_4096_0_or_4096_vec(
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], <i32 4096, i32 4096>
-; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[AND]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[AND]]
; CHECK-NEXT: ret <2 x i32> [[SELECT]]
;
%and = and <2 x i32> %x, <i32 4096, i32 4096>
@@ -248,7 +248,7 @@ define <2 x i32> @select_icmp_eq_and_4096_0_or_4096_vec(<2 x i32> %x, <2 x i32>
define i32 @select_icmp_eq_and_4096_0_xor_4096(i32 %x, i32 %y) {
; CHECK-LABEL: @select_icmp_eq_and_4096_0_xor_4096(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096
-; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[AND]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y:%.*]], [[AND]]
; CHECK-NEXT: ret i32 [[SELECT]]
;
%and = and i32 %x, 4096
@@ -277,7 +277,7 @@ define i32 @select_icmp_eq_0_and_1_or_1(i64 %x, i32 %y) {
; CHECK-LABEL: @select_icmp_eq_0_and_1_or_1(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[X:%.*]] to i32
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 1
-; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: ret i32 [[SELECT]]
;
%and = and i64 %x, 1
@@ -291,7 +291,7 @@ define <2 x i32> @select_icmp_eq_0_and_1_or_1_vec(<2 x i64> %x, <2 x i32> %y) {
; CHECK-LABEL: @select_icmp_eq_0_and_1_or_1_vec(
; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i64> [[X:%.*]] to <2 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], <i32 1, i32 1>
-; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: ret <2 x i32> [[SELECT]]
;
%and = and <2 x i64> %x, <i64 1, i64 1>
@@ -305,7 +305,7 @@ define i32 @select_icmp_eq_0_and_1_xor_1(i64 %x, i32 %y) {
; CHECK-LABEL: @select_icmp_eq_0_and_1_xor_1(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[X:%.*]] to i32
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 1
-; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: ret i32 [[SELECT]]
;
%and = and i64 %x, 1
@@ -335,7 +335,7 @@ define i32 @select_icmp_ne_0_and_4096_or_32(i32 %x, i32 %y) {
; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[X:%.*]], 7
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 32
; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], 32
-; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: ret i32 [[SELECT]]
;
%and = and i32 %x, 4096
@@ -380,7 +380,7 @@ define i32 @select_icmp_ne_0_and_32_or_4096(i32 %x, i32 %y) {
; CHECK-NEXT: [[AND:%.*]] = shl i32 [[X:%.*]], 7
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 4096
; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], 4096
-; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: ret i32 [[SELECT]]
;
%and = and i32 %x, 32
@@ -395,7 +395,7 @@ define <2 x i32> @select_icmp_ne_0_and_32_or_4096_vec(<2 x i32> %x, <2 x i32> %y
; CHECK-NEXT: [[AND:%.*]] = shl <2 x i32> [[X:%.*]], <i32 7, i32 7>
; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[AND]], <i32 4096, i32 4096>
; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i32> [[TMP1]], <i32 4096, i32 4096>
-; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: ret <2 x i32> [[SELECT]]
;
%and = and <2 x i32> %x, <i32 32, i32 32>
@@ -570,7 +570,7 @@ define i64 @select_icmp_x_and_8_eq_0_y_xor_8(i32 %x, i64 %y) {
; CHECK-LABEL: @select_icmp_x_and_8_eq_0_y_xor_8(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 8
; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[AND]] to i64
-; CHECK-NEXT: [[Y_XOR:%.*]] = xor i64 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[Y_XOR:%.*]] = xor i64 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i64 [[Y_XOR]]
;
%and = and i32 %x, 8
@@ -585,7 +585,7 @@ define i64 @select_icmp_x_and_8_ne_0_y_xor_8(i32 %x, i64 %y) {
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 8
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[AND]], 8
; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
-; CHECK-NEXT: [[XOR_Y:%.*]] = xor i64 [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[XOR_Y:%.*]] = xor i64 [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: ret i64 [[XOR_Y]]
;
%and = and i32 %x, 8
@@ -600,7 +600,7 @@ define i64 @select_icmp_x_and_8_ne_0_y_or_8(i32 %x, i64 %y) {
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 8
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[AND]], 8
; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
-; CHECK-NEXT: [[OR_Y:%.*]] = or i64 [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[OR_Y:%.*]] = or i64 [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: ret i64 [[OR_Y]]
;
%and = and i32 %x, 8
@@ -615,7 +615,7 @@ define <2 x i64> @select_icmp_x_and_8_ne_0_y_or_8_vec(<2 x i32> %x, <2 x i64> %y
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], <i32 8, i32 8>
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i32> [[AND]], <i32 8, i32 8>
; CHECK-NEXT: [[TMP2:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
-; CHECK-NEXT: [[OR_Y:%.*]] = or <2 x i64> [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[OR_Y:%.*]] = or <2 x i64> [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: ret <2 x i64> [[OR_Y]]
;
%and = and <2 x i32> %x, <i32 8, i32 8>
@@ -680,7 +680,7 @@ define i32 @test68(i32 %x, i32 %y) {
; CHECK-LABEL: @test68(
; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[X:%.*]], 6
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 2
-; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[SELECT]]
;
%and = and i32 %x, 128
@@ -694,7 +694,7 @@ define <2 x i32> @test68vec(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @test68vec(
; CHECK-NEXT: [[AND:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 6, i32 6>
; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[AND]], <i32 2, i32 2>
-; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i32> [[SELECT]]
;
%and = and <2 x i32> %x, <i32 128, i32 128>
@@ -708,7 +708,7 @@ define i32 @test68_xor(i32 %x, i32 %y) {
; CHECK-LABEL: @test68_xor(
; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[X:%.*]], 6
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 2
-; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[SELECT]]
;
%and = and i32 %x, 128
@@ -738,7 +738,7 @@ define i32 @test69(i32 %x, i32 %y) {
; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[X:%.*]], 6
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 2
; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], 2
-; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: ret i32 [[SELECT]]
;
%and = and i32 %x, 128
@@ -753,7 +753,7 @@ define <2 x i32> @test69vec(<2 x i32> %x, <2 x i32> %y) {
; CHECK-NEXT: [[AND:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 6, i32 6>
; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[AND]], <i32 2, i32 2>
; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i32> [[TMP1]], <i32 2, i32 2>
-; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: ret <2 x i32> [[SELECT]]
;
%and = and <2 x i32> %x, <i32 128, i32 128>
@@ -797,7 +797,7 @@ define i8 @test70(i8 %x, i8 %y) {
; CHECK-LABEL: @test70(
; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 [[X:%.*]], 6
; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], 2
-; CHECK-NEXT: [[SELECT:%.*]] = or i8 [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or i8 [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: ret i8 [[SELECT]]
;
%cmp = icmp slt i8 %x, 0
@@ -826,7 +826,7 @@ define i32 @shift_no_xor_multiuse_or(i32 %x, i32 %y) {
; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], 2
; CHECK-NEXT: [[AND:%.*]] = shl i32 [[X:%.*]], 1
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 2
-; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y]], [[TMP1]]
; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[OR]]
; CHECK-NEXT: ret i32 [[RES]]
;
@@ -843,7 +843,7 @@ define i32 @shift_no_xor_multiuse_xor(i32 %x, i32 %y) {
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[Y:%.*]], 2
; CHECK-NEXT: [[AND:%.*]] = shl i32 [[X:%.*]], 1
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 2
-; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y]], [[TMP1]]
; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[XOR]]
; CHECK-NEXT: ret i32 [[RES]]
;
@@ -876,7 +876,7 @@ define i32 @no_shift_no_xor_multiuse_or(i32 %x, i32 %y) {
; CHECK-LABEL: @no_shift_no_xor_multiuse_or(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096
; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], 4096
-; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[AND]], [[Y]]
+; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y]], [[AND]]
; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[OR]]
; CHECK-NEXT: ret i32 [[RES]]
;
@@ -892,7 +892,7 @@ define i32 @no_shift_no_xor_multiuse_xor(i32 %x, i32 %y) {
; CHECK-LABEL: @no_shift_no_xor_multiuse_xor(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[Y:%.*]], 4096
-; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[AND]], [[Y]]
+; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y]], [[AND]]
; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[XOR]]
; CHECK-NEXT: ret i32 [[RES]]
;
@@ -926,7 +926,7 @@ define i32 @no_shift_xor_multiuse_or(i32 %x, i32 %y) {
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096
; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], 4096
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[AND]], 4096
-; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y]], [[TMP1]]
; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[OR]]
; CHECK-NEXT: ret i32 [[RES]]
;
@@ -1028,7 +1028,7 @@ define i32 @shift_no_xor_multiuse_cmp(i32 %x, i32 %y, i32 %z, i32 %w) {
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i32 [[AND]], 1
-; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: [[SELECT2:%.*]] = select i1 [[CMP]], i32 [[Z:%.*]], i32 [[W:%.*]]
; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[SELECT2]]
; CHECK-NEXT: ret i32 [[RES]]
@@ -1047,7 +1047,7 @@ define i32 @shift_no_xor_multiuse_cmp_with_xor(i32 %x, i32 %y, i32 %z, i32 %w) {
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i32 [[AND]], 1
-; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: [[SELECT2:%.*]] = select i1 [[CMP]], i32 [[Z:%.*]], i32 [[W:%.*]]
; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[SELECT2]]
; CHECK-NEXT: ret i32 [[RES]]
@@ -1084,7 +1084,7 @@ define i32 @no_shift_no_xor_multiuse_cmp(i32 %x, i32 %y, i32 %z, i32 %w) {
; CHECK-LABEL: @no_shift_no_xor_multiuse_cmp(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
-; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[AND]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[AND]]
; CHECK-NEXT: [[SELECT2:%.*]] = select i1 [[CMP]], i32 [[Z:%.*]], i32 [[W:%.*]]
; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[SELECT2]]
; CHECK-NEXT: ret i32 [[RES]]
@@ -1102,7 +1102,7 @@ define i32 @no_shift_no_xor_multiuse_cmp_with_xor(i32 %x, i32 %y, i32 %z, i32 %w
; CHECK-LABEL: @no_shift_no_xor_multiuse_cmp_with_xor(
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
-; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[AND]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y:%.*]], [[AND]]
; CHECK-NEXT: [[SELECT2:%.*]] = select i1 [[CMP]], i32 [[Z:%.*]], i32 [[W:%.*]]
; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[SELECT2]]
; CHECK-NEXT: ret i32 [[RES]]
@@ -1140,7 +1140,7 @@ define i32 @no_shift_xor_multiuse_cmp(i32 %x, i32 %y, i32 %z, i32 %w) {
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[AND]], 4096
-; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: [[SELECT2:%.*]] = select i1 [[CMP_NOT]], i32 [[W:%.*]], i32 [[Z:%.*]]
; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[SELECT2]]
; CHECK-NEXT: ret i32 [[RES]]
@@ -1317,7 +1317,7 @@ define i32 @no_shift_no_xor_multiuse_cmp_or(i32 %x, i32 %y, i32 %z, i32 %w) {
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], 4096
-; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[AND]], [[Y]]
+; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y]], [[AND]]
; CHECK-NEXT: [[SELECT2:%.*]] = select i1 [[CMP]], i32 [[Z:%.*]], i32 [[W:%.*]]
; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[SELECT2]]
; CHECK-NEXT: [[RES2:%.*]] = mul i32 [[RES]], [[OR]]
@@ -1338,7 +1338,7 @@ define i32 @no_shift_no_xor_multiuse_cmp_xor(i32 %x, i32 %y, i32 %z, i32 %w) {
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[Y:%.*]], 4096
-; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[AND]], [[Y]]
+; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y]], [[AND]]
; CHECK-NEXT: [[SELECT2:%.*]] = select i1 [[CMP]], i32 [[Z:%.*]], i32 [[W:%.*]]
; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[SELECT2]]
; CHECK-NEXT: [[RES2:%.*]] = mul i32 [[RES]], [[XOR]]
@@ -1641,7 +1641,7 @@ define i64 @xor_i8_to_i64_shl_save_and_ne(i8 %x, i64 %y) {
; CHECK-LABEL: @xor_i8_to_i64_shl_save_and_ne(
; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[X:%.*]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP1]], 63
-; CHECK-NEXT: [[R:%.*]] = xor i64 [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = xor i64 [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: ret i64 [[R]]
;
%xx = and i8 %x, 1
diff --git a/llvm/test/Transforms/InstCombine/select.ll b/llvm/test/Transforms/InstCombine/select.ll
index 2ade6faa99be3..90a3822294260 100644
--- a/llvm/test/Transforms/InstCombine/select.ll
+++ b/llvm/test/Transforms/InstCombine/select.ll
@@ -221,7 +221,7 @@ define i32 @test11(i32 %a) {
define i32 @test12(i1 %cond, i32 %a) {
; CHECK-LABEL: @test12(
; CHECK-NEXT: [[B:%.*]] = zext i1 [[COND:%.*]] to i32
-; CHECK-NEXT: [[C:%.*]] = or i32 [[B]], [[A:%.*]]
+; CHECK-NEXT: [[C:%.*]] = or i32 [[A:%.*]], [[B]]
; CHECK-NEXT: ret i32 [[C]]
;
%b = or i32 %a, 1
@@ -232,7 +232,7 @@ define i32 @test12(i1 %cond, i32 %a) {
define <2 x i32> @test12vec(<2 x i1> %cond, <2 x i32> %a) {
; CHECK-LABEL: @test12vec(
; CHECK-NEXT: [[B:%.*]] = zext <2 x i1> [[COND:%.*]] to <2 x i32>
-; CHECK-NEXT: [[C:%.*]] = or <2 x i32> [[B]], [[A:%.*]]
+; CHECK-NEXT: [[C:%.*]] = or <2 x i32> [[A:%.*]], [[B]]
; CHECK-NEXT: ret <2 x i32> [[C]]
;
%b = or <2 x i32> %a, <i32 1, i32 1>
@@ -686,7 +686,7 @@ define i1 @test40(i1 %cond) {
define i32 @test41(i1 %cond, i32 %x, i32 %y) {
; CHECK-LABEL: @test41(
-; CHECK-NEXT: [[R:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = and i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i32 [[R]]
;
%z = and i32 %x, %y
@@ -699,7 +699,7 @@ define i32 @test42(i32 %x, i32 %y) {
; CHECK-LABEL: @test42(
; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[X:%.*]], 0
; CHECK-NEXT: [[B:%.*]] = sext i1 [[COND]] to i32
-; CHECK-NEXT: [[C:%.*]] = add i32 [[B]], [[Y:%.*]]
+; CHECK-NEXT: [[C:%.*]] = add i32 [[Y:%.*]], [[B]]
; CHECK-NEXT: ret i32 [[C]]
;
%b = add i32 %y, -1
@@ -712,7 +712,7 @@ define <2 x i32> @test42vec(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @test42vec(
; CHECK-NEXT: [[COND:%.*]] = icmp eq <2 x i32> [[X:%.*]], zeroinitializer
; CHECK-NEXT: [[B:%.*]] = sext <2 x i1> [[COND]] to <2 x i32>
-; CHECK-NEXT: [[C:%.*]] = add <2 x i32> [[B]], [[Y:%.*]]
+; CHECK-NEXT: [[C:%.*]] = add <2 x i32> [[Y:%.*]], [[B]]
; CHECK-NEXT: ret <2 x i32> [[C]]
;
%b = add <2 x i32> %y, <i32 -1, i32 -1>
@@ -1569,7 +1569,7 @@ define i8 @test88(i1 %cond, i8 %w, i8 %x, i8 %y, i8 %z) {
; select(C, Z, binop(W, select(C, X, Y))) -> select(C, binop(X, W), Z)
define i8 @test89(i1 %cond, i8 %w, i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @test89(
-; CHECK-NEXT: [[B:%.*]] = and i8 [[X:%.*]], [[W:%.*]]
+; CHECK-NEXT: [[B:%.*]] = and i8 [[W:%.*]], [[X:%.*]]
; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], i8 [[B]], i8 [[Z:%.*]]
; CHECK-NEXT: ret i8 [[C]]
;
@@ -1582,7 +1582,7 @@ define i8 @test89(i1 %cond, i8 %w, i8 %x, i8 %y, i8 %z) {
; select(C, Z, binop(W, select(C, X, Y))) -> select(C, Z, binop(W, Y))
define i8 @test90(i1 %cond, i8 %w, i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @test90(
-; CHECK-NEXT: [[B:%.*]] = or i8 [[Y:%.*]], [[W:%.*]]
+; CHECK-NEXT: [[B:%.*]] = or i8 [[W:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], i8 [[Z:%.*]], i8 [[B]]
; CHECK-NEXT: ret i8 [[C]]
;
@@ -2911,7 +2911,7 @@ define i8 @select_replacement_loop3(i32 noundef %x) {
; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[X:%.*]] to i8
; CHECK-NEXT: [[REV:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[TRUNC]])
; CHECK-NEXT: [[EXT:%.*]] = zext i8 [[REV]] to i32
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[EXT]], [[X]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], [[EXT]]
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i8 [[TRUNC]], i8 0
; CHECK-NEXT: ret i8 [[SEL]]
;
@@ -2952,7 +2952,7 @@ define ptr @select_replacement_gep_inbounds(ptr %base, i64 %offset) {
define i8 @replace_false_op_eq_shl_or_disjoint(i8 %x) {
; CHECK-LABEL: @replace_false_op_eq_shl_or_disjoint(
; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[X:%.*]], 3
-; CHECK-NEXT: [[OR:%.*]] = or i8 [[SHL]], [[X]]
+; CHECK-NEXT: [[OR:%.*]] = or i8 [[X]], [[SHL]]
; CHECK-NEXT: ret i8 [[OR]]
;
%eq0 = icmp eq i8 %x, -1
@@ -2993,7 +2993,7 @@ define <2 x i1> @partial_false_undef_condval(<2 x i1> %x) {
define i32 @mul_select_eq_zero(i32 %x, i32 %y) {
; CHECK-LABEL: @mul_select_eq_zero(
; CHECK-NEXT: [[Y_FR:%.*]] = freeze i32 [[Y:%.*]]
-; CHECK-NEXT: [[M:%.*]] = mul i32 [[Y_FR]], [[X:%.*]]
+; CHECK-NEXT: [[M:%.*]] = mul i32 [[X:%.*]], [[Y_FR]]
; CHECK-NEXT: ret i32 [[M]]
;
%c = icmp eq i32 %x, 0
@@ -3019,7 +3019,7 @@ define i32 @mul_select_eq_zero_commute(i32 %x, i32 %y) {
define i32 @mul_select_eq_zero_copy_flags(i32 %x, i32 %y) {
; CHECK-LABEL: @mul_select_eq_zero_copy_flags(
; CHECK-NEXT: [[Y_FR:%.*]] = freeze i32 [[Y:%.*]]
-; CHECK-NEXT: [[M:%.*]] = mul nuw nsw i32 [[Y_FR]], [[X:%.*]]
+; CHECK-NEXT: [[M:%.*]] = mul nuw nsw i32 [[X:%.*]], [[Y_FR]]
; CHECK-NEXT: ret i32 [[M]]
;
%c = icmp eq i32 %x, 0
@@ -3034,7 +3034,7 @@ define i32 @mul_select_ne_zero(i32 %x, i32 %y) {
; CHECK-LABEL: @mul_select_ne_zero(
; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[X:%.*]], 0
; CHECK-NEXT: [[Y_FR:%.*]] = freeze i32 [[Y:%.*]]
-; CHECK-NEXT: [[M:%.*]] = mul i32 [[Y_FR]], [[X]]
+; CHECK-NEXT: [[M:%.*]] = mul i32 [[X]], [[Y_FR]]
; CHECK-NEXT: call void @use(i1 [[C]])
; CHECK-NEXT: ret i32 [[M]]
;
@@ -3051,7 +3051,7 @@ define i32 @mul_select_ne_zero(i32 %x, i32 %y) {
define i32 @mul_select_eq_zero_sel_undef(i32 %x, i32 %y) {
; CHECK-LABEL: @mul_select_eq_zero_sel_undef(
; CHECK-NEXT: [[Y_FR:%.*]] = freeze i32 [[Y:%.*]]
-; CHECK-NEXT: [[M:%.*]] = mul i32 [[Y_FR]], [[X:%.*]]
+; CHECK-NEXT: [[M:%.*]] = mul i32 [[X:%.*]], [[Y_FR]]
; CHECK-NEXT: ret i32 [[M]]
;
%c = icmp eq i32 %x, 0
@@ -3065,7 +3065,7 @@ define i32 @mul_select_eq_zero_sel_undef(i32 %x, i32 %y) {
define i32 @mul_select_eq_zero_multiple_users(i32 %x, i32 %y) {
; CHECK-LABEL: @mul_select_eq_zero_multiple_users(
; CHECK-NEXT: [[Y_FR:%.*]] = freeze i32 [[Y:%.*]]
-; CHECK-NEXT: [[M:%.*]] = mul i32 [[Y_FR]], [[X:%.*]]
+; CHECK-NEXT: [[M:%.*]] = mul i32 [[X:%.*]], [[Y_FR]]
; CHECK-NEXT: call void @use_i32(i32 [[M]])
; CHECK-NEXT: call void @use_i32(i32 [[M]])
; CHECK-NEXT: call void @use_i32(i32 [[M]])
@@ -3099,7 +3099,7 @@ define i32 @mul_select_eq_zero_unrelated_condition(i32 %x, i32 %y, i32 %z) {
define <4 x i32> @mul_select_eq_zero_vector(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: @mul_select_eq_zero_vector(
; CHECK-NEXT: [[Y_FR:%.*]] = freeze <4 x i32> [[Y:%.*]]
-; CHECK-NEXT: [[M:%.*]] = mul <4 x i32> [[Y_FR]], [[X:%.*]]
+; CHECK-NEXT: [[M:%.*]] = mul <4 x i32> [[X:%.*]], [[Y_FR]]
; CHECK-NEXT: ret <4 x i32> [[M]]
;
%c = icmp eq <4 x i32> %x, zeroinitializer
@@ -3130,7 +3130,7 @@ define <2 x i32> @mul_select_eq_poison_vector(<2 x i32> %x, <2 x i32> %y) {
define <2 x i32> @mul_select_eq_zero_sel_poison_vector(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @mul_select_eq_zero_sel_poison_vector(
; CHECK-NEXT: [[Y_FR:%.*]] = freeze <2 x i32> [[Y:%.*]]
-; CHECK-NEXT: [[M:%.*]] = mul <2 x i32> [[Y_FR]], [[X:%.*]]
+; CHECK-NEXT: [[M:%.*]] = mul <2 x i32> [[X:%.*]], [[Y_FR]]
; CHECK-NEXT: ret <2 x i32> [[M]]
;
%c = icmp eq <2 x i32> %x, zeroinitializer
@@ -3968,7 +3968,7 @@ define i32 @src_or_eq_C_and_andnotxorC(i32 %x, i32 %y, i32 %c) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[NOT:%.*]] = xor i32 [[TMP0]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT]], [[C:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[C:%.*]], [[NOT]]
; CHECK-NEXT: ret i32 [[AND1]]
;
entry:
@@ -4004,7 +4004,7 @@ define i32 @src_or_eq_C_xor_andnotandC(i32 %x, i32 %y, i32 %c) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[AND:%.*]] = and i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NOT:%.*]] = xor i32 [[AND]], -1
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT]], [[C:%.*]]
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[C:%.*]], [[NOT]]
; CHECK-NEXT: ret i32 [[AND1]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/select_meta.ll b/llvm/test/Transforms/InstCombine/select_meta.ll
index 3898fd9fa1f57..d8f945b8d1b32 100644
--- a/llvm/test/Transforms/InstCombine/select_meta.ll
+++ b/llvm/test/Transforms/InstCombine/select_meta.ll
@@ -6,7 +6,7 @@ define i32 @foo(i32) local_unnamed_addr #0 {
; CHECK-LABEL: @foo(
; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP0:%.*]], 2
; CHECK-NEXT: [[DOTV:%.*]] = select i1 [[TMP2]], i32 20, i32 -20, !prof [[PROF0:![0-9]+]]
-; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[DOTV]], [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP0]], [[DOTV]]
; CHECK-NEXT: ret i32 [[TMP3]]
;
%2 = icmp sgt i32 %0, 2
@@ -51,7 +51,7 @@ define i32 @foo2(i32, i32) local_unnamed_addr #0 {
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[TMP0:%.*]], 2
; CHECK-NEXT: [[TMP4:%.*]] = sub i32 0, [[TMP1:%.*]]
; CHECK-NEXT: [[DOTP:%.*]] = select i1 [[TMP3]], i32 [[TMP1]], i32 [[TMP4]], !prof [[PROF0]]
-; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[DOTP]], [[TMP0]]
+; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP0]], [[DOTP]]
; CHECK-NEXT: ret i32 [[TMP5]]
;
%3 = icmp sgt i32 %0, 2
@@ -317,7 +317,7 @@ define <2 x i32> @not_cond_vec_poison(<2 x i1> %c, <2 x i32> %tv, <2 x i32> %fv)
define i64 @select_add(i1 %cond, i64 %x, i64 %y) {
; CHECK-LABEL: @select_add(
; CHECK-NEXT: [[OP:%.*]] = select i1 [[COND:%.*]], i64 [[Y:%.*]], i64 0, !prof [[PROF0]], !unpredictable [[META2:![0-9]+]]
-; CHECK-NEXT: [[RET:%.*]] = add i64 [[OP]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = add i64 [[X:%.*]], [[OP]]
; CHECK-NEXT: ret i64 [[RET]]
;
%op = add i64 %x, %y
@@ -328,7 +328,7 @@ define i64 @select_add(i1 %cond, i64 %x, i64 %y) {
define <2 x i32> @select_or(<2 x i1> %cond, <2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @select_or(
; CHECK-NEXT: [[OP:%.*]] = select <2 x i1> [[COND:%.*]], <2 x i32> [[Y:%.*]], <2 x i32> zeroinitializer, !prof [[PROF0]], !unpredictable [[META2]]
-; CHECK-NEXT: [[RET:%.*]] = or <2 x i32> [[OP]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = or <2 x i32> [[X:%.*]], [[OP]]
; CHECK-NEXT: ret <2 x i32> [[RET]]
;
%op = or <2 x i32> %x, %y
@@ -361,7 +361,7 @@ define i128 @select_ashr(i1 %cond, i128 %x, i128 %y) {
define double @select_fmul(i1 %cond, double %x, double %y) {
; CHECK-LABEL: @select_fmul(
; CHECK-NEXT: [[OP:%.*]] = select nnan i1 [[COND:%.*]], double [[Y:%.*]], double 1.000000e+00, !prof [[PROF0]], !unpredictable [[META2]]
-; CHECK-NEXT: [[RET:%.*]] = fmul double [[OP]], [[X:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = fmul double [[X:%.*]], [[OP]]
; CHECK-NEXT: ret double [[RET]]
;
%op = fmul double %x, %y
diff --git a/llvm/test/Transforms/InstCombine/set.ll b/llvm/test/Transforms/InstCombine/set.ll
index 50329ddf7caac..f44ac83f7f591 100644
--- a/llvm/test/Transforms/InstCombine/set.ll
+++ b/llvm/test/Transforms/InstCombine/set.ll
@@ -135,7 +135,7 @@ define i1 @test12(i1 %A) {
define i1 @test13(i1 %A, i1 %B) {
; CHECK-LABEL: @test13(
; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[B:%.*]], true
-; CHECK-NEXT: [[C:%.*]] = or i1 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[C]]
;
%C = icmp uge i1 %A, %B
@@ -145,7 +145,7 @@ define i1 @test13(i1 %A, i1 %B) {
define <2 x i1> @test13vec(<2 x i1> %A, <2 x i1> %B) {
; CHECK-LABEL: @test13vec(
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i1> [[B:%.*]], <i1 true, i1 true>
-; CHECK-NEXT: [[C:%.*]] = or <2 x i1> [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[C:%.*]] = or <2 x i1> [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i1> [[C]]
;
%C = icmp uge <2 x i1> %A, %B
diff --git a/llvm/test/Transforms/InstCombine/shift-add.ll b/llvm/test/Transforms/InstCombine/shift-add.ll
index 7f948848844c5..016f877a9efb5 100644
--- a/llvm/test/Transforms/InstCombine/shift-add.ll
+++ b/llvm/test/Transforms/InstCombine/shift-add.ll
@@ -505,7 +505,7 @@ define i2 @ashr_2_add_zext_basic(i1 %a, i1 %b) {
define i32 @lshr_16_add_zext_basic(i16 %a, i16 %b) {
; CHECK-LABEL: @lshr_16_add_zext_basic(
; CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[A:%.*]], -1
-; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i16 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ugt i16 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i32
; CHECK-NEXT: ret i32 [[LSHR]]
;
@@ -566,7 +566,7 @@ define i32 @lshr_16_add_not_known_16_leading_zeroes(i32 %a, i32 %b) {
define i64 @lshr_32_add_zext_basic(i32 %a, i32 %b) {
; CHECK-LABEL: @lshr_32_add_zext_basic(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i32 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ugt i32 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i64
; CHECK-NEXT: ret i64 [[LSHR]]
;
@@ -623,7 +623,7 @@ define i64 @lshr_33_i32_add_zext_basic(i32 %a, i32 %b) {
define i64 @lshr_16_to_64_add_zext_basic(i16 %a, i16 %b) {
; CHECK-LABEL: @lshr_16_to_64_add_zext_basic(
; CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[A:%.*]], -1
-; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i16 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ugt i16 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i64
; CHECK-NEXT: ret i64 [[LSHR]]
;
@@ -668,7 +668,7 @@ define i64 @lshr_32_add_not_known_32_leading_zeroes(i64 %a, i64 %b) {
define i32 @ashr_16_add_zext_basic(i16 %a, i16 %b) {
; CHECK-LABEL: @ashr_16_add_zext_basic(
; CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[A:%.*]], -1
-; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i16 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ugt i16 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i32
; CHECK-NEXT: ret i32 [[LSHR]]
;
@@ -682,7 +682,7 @@ define i32 @ashr_16_add_zext_basic(i16 %a, i16 %b) {
define i64 @ashr_32_add_zext_basic(i32 %a, i32 %b) {
; CHECK-LABEL: @ashr_32_add_zext_basic(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i32 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ugt i32 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i64
; CHECK-NEXT: ret i64 [[LSHR]]
;
@@ -696,7 +696,7 @@ define i64 @ashr_32_add_zext_basic(i32 %a, i32 %b) {
define i64 @ashr_16_to_64_add_zext_basic(i16 %a, i16 %b) {
; CHECK-LABEL: @ashr_16_to_64_add_zext_basic(
; CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[A:%.*]], -1
-; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i16 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ugt i16 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i64
; CHECK-NEXT: ret i64 [[LSHR]]
;
diff --git a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-lshr.ll b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-lshr.ll
index a0a3c8edfb4b5..c4260f4cb2bf8 100644
--- a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-lshr.ll
+++ b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-lshr.ll
@@ -294,7 +294,7 @@ define i1 @t10_almost_highest_bit(i32 %x, i64 %y, i32 %len) {
define i1 @t11_no_shift(i32 %x, i64 %y, i32 %len) {
; CHECK-LABEL: @t11_no_shift(
; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[X:%.*]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: [[T5:%.*]] = icmp ne i64 [[TMP2]], 0
; CHECK-NEXT: ret i1 [[T5]]
;
diff --git a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-shl.ll b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-shl.ll
index 3a85f19d8a037..6e9552e2af4cc 100644
--- a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-shl.ll
+++ b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-shl.ll
@@ -17,7 +17,7 @@ define i1 @t0_const_after_fold_lshr_shl_ne(i32 %x, i64 %y, i32 %len) {
; CHECK-LABEL: @t0_const_after_fold_lshr_shl_ne(
; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31
; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: [[T5:%.*]] = icmp ne i64 [[TMP3]], 0
; CHECK-NEXT: ret i1 [[T5]]
;
@@ -40,7 +40,7 @@ define <2 x i1> @t1_vec_splat(<2 x i32> %x, <2 x i64> %y, <2 x i32> %len) {
; CHECK-LABEL: @t1_vec_splat(
; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 31, i32 31>
; CHECK-NEXT: [[TMP2:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
-; CHECK-NEXT: [[TMP3:%.*]] = and <2 x i64> [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = and <2 x i64> [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: [[T5:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[T5]]
;
@@ -212,7 +212,7 @@ define i1 @t6_oneuse3(i32 %x, i64 %y, i32 %len) {
; CHECK-NEXT: call void @use64(i64 [[T3]])
; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31
; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], [[Y]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[Y]], [[TMP2]]
; CHECK-NEXT: [[T5:%.*]] = icmp ne i64 [[TMP3]], 0
; CHECK-NEXT: ret i1 [[T5]]
;
@@ -244,7 +244,7 @@ define i1 @t7_oneuse4(i32 %x, i64 %y, i32 %len) {
; CHECK-NEXT: call void @use32(i32 [[T3_TRUNC]])
; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31
; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], [[Y]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[Y]], [[TMP2]]
; CHECK-NEXT: [[T5:%.*]] = icmp ne i64 [[TMP3]], 0
; CHECK-NEXT: ret i1 [[T5]]
;
diff --git a/llvm/test/Transforms/InstCombine/shift-direction-in-bit-test.ll b/llvm/test/Transforms/InstCombine/shift-direction-in-bit-test.ll
index a8f4644f1ae42..ebb53e36a3f21 100644
--- a/llvm/test/Transforms/InstCombine/shift-direction-in-bit-test.ll
+++ b/llvm/test/Transforms/InstCombine/shift-direction-in-bit-test.ll
@@ -239,7 +239,7 @@ define i1 @t13_shift_of_const1(i32 %x, i32 %y, i32 %z) {
define i1 @t14_and_with_const0(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @t14_and_with_const0(
; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 1, [[Y:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[TMP2]], 0
; CHECK-NEXT: ret i1 [[T2]]
;
@@ -251,7 +251,7 @@ define i1 @t14_and_with_const0(i32 %x, i32 %y, i32 %z) {
define i1 @t15_and_with_const1(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @t15_and_with_const1(
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i32 1, [[Y:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[TMP2]], 0
; CHECK-NEXT: ret i1 [[T2]]
;
diff --git a/llvm/test/Transforms/InstCombine/shift-logic.ll b/llvm/test/Transforms/InstCombine/shift-logic.ll
index b591400c6a260..2abd8dd078fd9 100644
--- a/llvm/test/Transforms/InstCombine/shift-logic.ll
+++ b/llvm/test/Transforms/InstCombine/shift-logic.ll
@@ -189,7 +189,7 @@ define i32 @ashr_xor(i32 %x, i32 %py) {
define i32 @shr_mismatch_xor(i32 %x, i32 %y) {
; CHECK-LABEL: @shr_mismatch_xor(
; CHECK-NEXT: [[SH0:%.*]] = ashr i32 [[X:%.*]], 5
-; CHECK-NEXT: [[R:%.*]] = xor i32 [[SH0]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = xor i32 [[Y:%.*]], [[SH0]]
; CHECK-NEXT: [[SH1:%.*]] = lshr i32 [[R]], 7
; CHECK-NEXT: ret i32 [[SH1]]
;
@@ -202,7 +202,7 @@ define i32 @shr_mismatch_xor(i32 %x, i32 %y) {
define i32 @ashr_overshift_xor(i32 %x, i32 %y) {
; CHECK-LABEL: @ashr_overshift_xor(
; CHECK-NEXT: [[SH0:%.*]] = ashr i32 [[X:%.*]], 15
-; CHECK-NEXT: [[R:%.*]] = xor i32 [[SH0]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = xor i32 [[Y:%.*]], [[SH0]]
; CHECK-NEXT: [[SH1:%.*]] = ashr i32 [[R]], 17
; CHECK-NEXT: ret i32 [[SH1]]
;
@@ -215,7 +215,7 @@ define i32 @ashr_overshift_xor(i32 %x, i32 %y) {
define <2 x i32> @ashr_poison_poison_xor(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @ashr_poison_poison_xor(
; CHECK-NEXT: [[SH0:%.*]] = ashr <2 x i32> [[X:%.*]], <i32 15, i32 poison>
-; CHECK-NEXT: [[R:%.*]] = xor <2 x i32> [[SH0]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = xor <2 x i32> [[Y:%.*]], [[SH0]]
; CHECK-NEXT: [[SH1:%.*]] = ashr <2 x i32> [[R]], <i32 poison, i32 17>
; CHECK-NEXT: ret <2 x i32> [[SH1]]
;
diff --git a/llvm/test/Transforms/InstCombine/shift.ll b/llvm/test/Transforms/InstCombine/shift.ll
index 8da52e0746373..9626f6bc23164 100644
--- a/llvm/test/Transforms/InstCombine/shift.ll
+++ b/llvm/test/Transforms/InstCombine/shift.ll
@@ -1692,7 +1692,7 @@ define i177 @lshr_out_of_range(i177 %Y, ptr %A2, ptr %ptr) {
; CHECK-LABEL: @lshr_out_of_range(
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i177 [[Y:%.*]], -1
; CHECK-NEXT: [[B4:%.*]] = sext i1 [[TMP1]] to i177
-; CHECK-NEXT: [[C8:%.*]] = icmp ult i177 [[B4]], [[Y]]
+; CHECK-NEXT: [[C8:%.*]] = icmp ugt i177 [[Y]], [[B4]]
; CHECK-NEXT: [[TMP2:%.*]] = sext i1 [[C8]] to i64
; CHECK-NEXT: [[G18:%.*]] = getelementptr ptr, ptr [[A2:%.*]], i64 [[TMP2]]
; CHECK-NEXT: store ptr [[G18]], ptr [[PTR:%.*]], align 8
@@ -1810,7 +1810,7 @@ define void @ossfuzz_38078(i32 %arg, i32 %arg1, ptr %ptr, ptr %ptr2, ptr %ptr3,
; CHECK-NEXT: bb:
; CHECK-NEXT: [[G1:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i64 -4
; CHECK-NEXT: [[I2:%.*]] = sub i32 0, [[ARG1:%.*]]
-; CHECK-NEXT: [[I5:%.*]] = icmp eq i32 [[I2]], [[ARG:%.*]]
+; CHECK-NEXT: [[I5:%.*]] = icmp eq i32 [[ARG:%.*]], [[I2]]
; CHECK-NEXT: call void @llvm.assume(i1 [[I5]])
; CHECK-NEXT: store volatile i32 2147483647, ptr [[G1]], align 4
; CHECK-NEXT: br label [[BB:%.*]]
@@ -2047,7 +2047,7 @@ define i32 @ashr_sdiv_extra_use(i32 %x) {
define i32 @shl1_cttz(i32 %x) {
; CHECK-LABEL: @shl1_cttz(
; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]]
-; CHECK-NEXT: [[SHL:%.*]] = and i32 [[NEG]], [[X]]
+; CHECK-NEXT: [[SHL:%.*]] = and i32 [[X]], [[NEG]]
; CHECK-NEXT: ret i32 [[SHL]]
;
%tz = call i32 @llvm.cttz.i32(i32 %x, i1 true)
@@ -2058,7 +2058,7 @@ define i32 @shl1_cttz(i32 %x) {
define <2 x i8> @shl1_cttz_vec(<2 x i8> %x) {
; CHECK-LABEL: @shl1_cttz_vec(
; CHECK-NEXT: [[NEG:%.*]] = sub <2 x i8> zeroinitializer, [[X:%.*]]
-; CHECK-NEXT: [[SHL:%.*]] = and <2 x i8> [[NEG]], [[X]]
+; CHECK-NEXT: [[SHL:%.*]] = and <2 x i8> [[X]], [[NEG]]
; CHECK-NEXT: ret <2 x i8> [[SHL]]
;
%tz = call <2 x i8> @llvm.cttz.v2i8(<2 x i8> %x, i1 false)
@@ -2069,7 +2069,7 @@ define <2 x i8> @shl1_cttz_vec(<2 x i8> %x) {
define <2 x i8> @shl1_cttz_vec_poison(<2 x i8> %x) {
; CHECK-LABEL: @shl1_cttz_vec_poison(
; CHECK-NEXT: [[NEG:%.*]] = sub <2 x i8> zeroinitializer, [[X:%.*]]
-; CHECK-NEXT: [[SHL:%.*]] = and <2 x i8> [[NEG]], [[X]]
+; CHECK-NEXT: [[SHL:%.*]] = and <2 x i8> [[X]], [[NEG]]
; CHECK-NEXT: ret <2 x i8> [[SHL]]
;
%tz = call <2 x i8> @llvm.cttz.v2i8(<2 x i8> %x, i1 false)
diff --git a/llvm/test/Transforms/InstCombine/shl-bo.ll b/llvm/test/Transforms/InstCombine/shl-bo.ll
index d33d27c912d6b..0322e450a6520 100644
--- a/llvm/test/Transforms/InstCombine/shl-bo.ll
+++ b/llvm/test/Transforms/InstCombine/shl-bo.ll
@@ -7,7 +7,7 @@ define i8 @lshr_add(i8 %a, i8 %y) {
; CHECK-LABEL: @lshr_add(
; CHECK-NEXT: [[X:%.*]] = srem i8 [[A:%.*]], 42
; CHECK-NEXT: [[B1:%.*]] = shl i8 [[X]], 5
-; CHECK-NEXT: [[R2:%.*]] = add i8 [[B1]], [[Y:%.*]]
+; CHECK-NEXT: [[R2:%.*]] = add i8 [[Y:%.*]], [[B1]]
; CHECK-NEXT: [[L:%.*]] = and i8 [[R2]], -32
; CHECK-NEXT: ret i8 [[L]]
;
@@ -22,7 +22,7 @@ define <2 x i8> @lshr_add_commute_splat(<2 x i8> %a, <2 x i8> %y) {
; CHECK-LABEL: @lshr_add_commute_splat(
; CHECK-NEXT: [[X:%.*]] = srem <2 x i8> [[A:%.*]], <i8 42, i8 42>
; CHECK-NEXT: [[B1:%.*]] = shl <2 x i8> [[X]], <i8 5, i8 5>
-; CHECK-NEXT: [[R2:%.*]] = add <2 x i8> [[B1]], [[Y:%.*]]
+; CHECK-NEXT: [[R2:%.*]] = add <2 x i8> [[Y:%.*]], [[B1]]
; CHECK-NEXT: [[L:%.*]] = and <2 x i8> [[R2]], <i8 -32, i8 -32>
; CHECK-NEXT: ret <2 x i8> [[L]]
;
@@ -67,7 +67,7 @@ define i8 @lshr_and(i8 %a, i8 %y) {
; CHECK-LABEL: @lshr_and(
; CHECK-NEXT: [[X:%.*]] = srem i8 [[A:%.*]], 42
; CHECK-NEXT: [[B1:%.*]] = shl i8 [[X]], 6
-; CHECK-NEXT: [[R2:%.*]] = and i8 [[B1]], [[Y:%.*]]
+; CHECK-NEXT: [[R2:%.*]] = and i8 [[Y:%.*]], [[B1]]
; CHECK-NEXT: ret i8 [[R2]]
;
%x = srem i8 %a, 42 ; thwart complexity-based canonicalization
@@ -81,7 +81,7 @@ define <2 x i8> @lshr_and_commute_splat(<2 x i8> %a, <2 x i8> %y) {
; CHECK-LABEL: @lshr_and_commute_splat(
; CHECK-NEXT: [[X:%.*]] = srem <2 x i8> [[A:%.*]], <i8 42, i8 42>
; CHECK-NEXT: [[B1:%.*]] = shl <2 x i8> [[X]], <i8 6, i8 6>
-; CHECK-NEXT: [[R2:%.*]] = and <2 x i8> [[B1]], [[Y:%.*]]
+; CHECK-NEXT: [[R2:%.*]] = and <2 x i8> [[Y:%.*]], [[B1]]
; CHECK-NEXT: ret <2 x i8> [[R2]]
;
%x = srem <2 x i8> %a, <i8 42, i8 42> ; thwart complexity-based canonicalization
@@ -96,7 +96,7 @@ define i8 @lshr_or(i8 %a, i8 %y) {
; CHECK-NEXT: [[X:%.*]] = srem i8 [[A:%.*]], 42
; CHECK-NEXT: [[B1:%.*]] = shl i8 [[X]], 4
; CHECK-NEXT: [[Y_MASKED:%.*]] = and i8 [[Y:%.*]], -16
-; CHECK-NEXT: [[L:%.*]] = or i8 [[B1]], [[Y_MASKED]]
+; CHECK-NEXT: [[L:%.*]] = or i8 [[Y_MASKED]], [[B1]]
; CHECK-NEXT: ret i8 [[L]]
;
%x = srem i8 %a, 42 ; thwart complexity-based canonicalization
@@ -111,7 +111,7 @@ define <2 x i8> @lshr_or_commute_splat(<2 x i8> %a, <2 x i8> %y) {
; CHECK-NEXT: [[X:%.*]] = srem <2 x i8> [[A:%.*]], <i8 42, i8 42>
; CHECK-NEXT: [[B1:%.*]] = shl <2 x i8> [[X]], <i8 4, i8 4>
; CHECK-NEXT: [[Y_MASKED:%.*]] = and <2 x i8> [[Y:%.*]], <i8 -16, i8 -16>
-; CHECK-NEXT: [[L:%.*]] = or <2 x i8> [[B1]], [[Y_MASKED]]
+; CHECK-NEXT: [[L:%.*]] = or <2 x i8> [[Y_MASKED]], [[B1]]
; CHECK-NEXT: ret <2 x i8> [[L]]
;
%x = srem <2 x i8> %a, <i8 42, i8 42> ; thwart complexity-based canonicalization
@@ -126,7 +126,7 @@ define i8 @lshr_xor(i8 %a, i8 %y) {
; CHECK-NEXT: [[X:%.*]] = srem i8 [[A:%.*]], 42
; CHECK-NEXT: [[B1:%.*]] = shl i8 [[X]], 3
; CHECK-NEXT: [[Y_MASKED:%.*]] = and i8 [[Y:%.*]], -8
-; CHECK-NEXT: [[L:%.*]] = xor i8 [[B1]], [[Y_MASKED]]
+; CHECK-NEXT: [[L:%.*]] = xor i8 [[Y_MASKED]], [[B1]]
; CHECK-NEXT: ret i8 [[L]]
;
%x = srem i8 %a, 42 ; thwart complexity-based canonicalization
@@ -141,7 +141,7 @@ define <2 x i8> @lshr_xor_commute_splat(<2 x i8> %a, <2 x i8> %y) {
; CHECK-NEXT: [[X:%.*]] = srem <2 x i8> [[A:%.*]], <i8 42, i8 42>
; CHECK-NEXT: [[B1:%.*]] = shl <2 x i8> [[X]], <i8 3, i8 3>
; CHECK-NEXT: [[Y_MASKED:%.*]] = and <2 x i8> [[Y:%.*]], <i8 -8, i8 -8>
-; CHECK-NEXT: [[L:%.*]] = xor <2 x i8> [[B1]], [[Y_MASKED]]
+; CHECK-NEXT: [[L:%.*]] = xor <2 x i8> [[Y_MASKED]], [[B1]]
; CHECK-NEXT: ret <2 x i8> [[L]]
;
%x = srem <2 x i8> %a, <i8 42, i8 42> ; thwart complexity-based canonicalization
@@ -347,7 +347,7 @@ define i8 @lshr_and_add_use1(i8 %x, i8 %y) {
; CHECK-NEXT: [[R:%.*]] = lshr i8 [[Y:%.*]], 3
; CHECK-NEXT: call void @use(i8 [[R]])
; CHECK-NEXT: [[M:%.*]] = and i8 [[R]], 12
-; CHECK-NEXT: [[B:%.*]] = add i8 [[M]], [[X:%.*]]
+; CHECK-NEXT: [[B:%.*]] = add i8 [[X:%.*]], [[M]]
; CHECK-NEXT: [[L:%.*]] = shl i8 [[B]], 3
; CHECK-NEXT: ret i8 [[L]]
;
@@ -364,7 +364,7 @@ define i8 @lshr_and_add_use2(i8 %x, i8 %y) {
; CHECK-NEXT: [[R:%.*]] = lshr i8 [[Y:%.*]], 3
; CHECK-NEXT: [[M:%.*]] = and i8 [[R]], 12
; CHECK-NEXT: call void @use(i8 [[M]])
-; CHECK-NEXT: [[B:%.*]] = add i8 [[M]], [[X:%.*]]
+; CHECK-NEXT: [[B:%.*]] = add i8 [[X:%.*]], [[M]]
; CHECK-NEXT: [[L:%.*]] = shl i8 [[B]], 3
; CHECK-NEXT: ret i8 [[L]]
;
@@ -380,7 +380,7 @@ define i8 @lshr_and_add_use3(i8 %x, i8 %y) {
; CHECK-LABEL: @lshr_and_add_use3(
; CHECK-NEXT: [[R:%.*]] = lshr i8 [[Y:%.*]], 3
; CHECK-NEXT: [[M:%.*]] = and i8 [[R]], 12
-; CHECK-NEXT: [[B:%.*]] = add i8 [[M]], [[X:%.*]]
+; CHECK-NEXT: [[B:%.*]] = add i8 [[X:%.*]], [[M]]
; CHECK-NEXT: call void @use(i8 [[B]])
; CHECK-NEXT: [[L:%.*]] = shl i8 [[B]], 3
; CHECK-NEXT: ret i8 [[L]]
@@ -399,7 +399,7 @@ define i8 @lshr_and_add_use4(i8 %x, i8 %y) {
; CHECK-NEXT: call void @use(i8 [[R]])
; CHECK-NEXT: [[M:%.*]] = and i8 [[R]], 12
; CHECK-NEXT: call void @use(i8 [[M]])
-; CHECK-NEXT: [[B:%.*]] = add i8 [[M]], [[X:%.*]]
+; CHECK-NEXT: [[B:%.*]] = add i8 [[X:%.*]], [[M]]
; CHECK-NEXT: [[L:%.*]] = shl i8 [[B]], 3
; CHECK-NEXT: ret i8 [[L]]
;
@@ -417,7 +417,7 @@ define i8 @lshr_and_add_use5(i8 %x, i8 %y) {
; CHECK-NEXT: [[R:%.*]] = lshr i8 [[Y:%.*]], 3
; CHECK-NEXT: [[M:%.*]] = and i8 [[R]], 12
; CHECK-NEXT: call void @use(i8 [[M]])
-; CHECK-NEXT: [[B:%.*]] = add i8 [[M]], [[X:%.*]]
+; CHECK-NEXT: [[B:%.*]] = add i8 [[X:%.*]], [[M]]
; CHECK-NEXT: call void @use(i8 [[B]])
; CHECK-NEXT: [[L:%.*]] = shl i8 [[B]], 3
; CHECK-NEXT: ret i8 [[L]]
@@ -437,7 +437,7 @@ define i8 @lshr_and_add_use6(i8 %x, i8 %y) {
; CHECK-NEXT: call void @use(i8 [[R]])
; CHECK-NEXT: [[M:%.*]] = and i8 [[R]], 12
; CHECK-NEXT: call void @use(i8 [[M]])
-; CHECK-NEXT: [[B:%.*]] = add i8 [[M]], [[X:%.*]]
+; CHECK-NEXT: [[B:%.*]] = add i8 [[X:%.*]], [[M]]
; CHECK-NEXT: [[L:%.*]] = shl i8 [[B]], 3
; CHECK-NEXT: ret i8 [[L]]
;
@@ -508,7 +508,7 @@ define <2 x i32> @lshr_add_and_shl_v2i32_undef(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @lshr_add_and_shl_v2i32_undef(
; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 undef, i32 5>
; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], <i32 127, i32 127>
-; CHECK-NEXT: [[TMP3:%.*]] = add <2 x i32> [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = add <2 x i32> [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = shl <2 x i32> [[TMP3]], <i32 5, i32 undef>
; CHECK-NEXT: ret <2 x i32> [[TMP4]]
;
@@ -523,7 +523,7 @@ define <2 x i32> @lshr_add_and_shl_v2i32_nonuniform(<2 x i32> %x, <2 x i32> %y)
; CHECK-LABEL: @lshr_add_and_shl_v2i32_nonuniform(
; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 5, i32 6>
; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], <i32 127, i32 255>
-; CHECK-NEXT: [[TMP3:%.*]] = add <2 x i32> [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = add <2 x i32> [[Y:%.*]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = shl <2 x i32> [[TMP3]], <i32 5, i32 6>
; CHECK-NEXT: ret <2 x i32> [[TMP4]]
;
@@ -614,8 +614,8 @@ define <8 x i16> @test_FoldShiftByConstant_CreateSHL2(<8 x i16> %in) {
define <16 x i8> @test_FoldShiftByConstant_CreateAnd(<16 x i8> %in0) {
; CHECK-LABEL: @test_FoldShiftByConstant_CreateAnd(
-; CHECK-NEXT: [[TMP1:%.*]] = mul <16 x i8> [[IN0:%.*]], <i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33>
-; CHECK-NEXT: [[VSHL_N:%.*]] = and <16 x i8> [[TMP1]], <i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32>
+; CHECK-NEXT: [[VSRA_N2:%.*]] = mul <16 x i8> [[IN0:%.*]], <i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33>
+; CHECK-NEXT: [[VSHL_N:%.*]] = and <16 x i8> [[VSRA_N2]], <i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32, i8 -32>
; CHECK-NEXT: ret <16 x i8> [[VSHL_N]]
;
%vsra_n = ashr <16 x i8> %in0, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
diff --git a/llvm/test/Transforms/InstCombine/shuffle-binop.ll b/llvm/test/Transforms/InstCombine/shuffle-binop.ll
index 8460f8b2c6cd3..8ab7f315dbf54 100644
--- a/llvm/test/Transforms/InstCombine/shuffle-binop.ll
+++ b/llvm/test/Transforms/InstCombine/shuffle-binop.ll
@@ -82,7 +82,7 @@ define <4 x i8> @splat_binop_splat_x_splat_y(<4 x i8> %x, <4 x i8> %y) {
; CHECK-NEXT: call void @use(<4 x i8> [[XSPLAT]])
; CHECK-NEXT: [[YSPLAT:%.*]] = shufflevector <4 x i8> [[Y:%.*]], <4 x i8> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: call void @use(<4 x i8> [[YSPLAT]])
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw <4 x i8> [[Y]], [[X]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw <4 x i8> [[X]], [[Y]]
; CHECK-NEXT: [[BSPLAT:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: ret <4 x i8> [[BSPLAT]]
;
@@ -101,7 +101,7 @@ define <4 x float> @splat_binop_splat_x_splat_y_fmath_flags(<4 x float> %x, <4 x
; CHECK-NEXT: call void @use(<4 x float> [[XSPLAT]])
; CHECK-NEXT: [[YSPLAT:%.*]] = shufflevector <4 x float> [[Y:%.*]], <4 x float> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: call void @use(<4 x float> [[YSPLAT]])
-; CHECK-NEXT: [[TMP1:%.*]] = fmul fast <4 x float> [[Y]], [[X]]
+; CHECK-NEXT: [[TMP1:%.*]] = fmul fast <4 x float> [[X]], [[Y]]
; CHECK-NEXT: [[BSPLAT:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: ret <4 x float> [[BSPLAT]]
;
diff --git a/llvm/test/Transforms/InstCombine/signed-truncation-check.ll b/llvm/test/Transforms/InstCombine/signed-truncation-check.ll
index 7e762627e5ec0..513fb69ab7463 100644
--- a/llvm/test/Transforms/InstCombine/signed-truncation-check.ll
+++ b/llvm/test/Transforms/InstCombine/signed-truncation-check.ll
@@ -612,7 +612,7 @@ define zeroext i1 @oneuse_trunc_sext(i32 %arg) {
; CHECK-NEXT: call void @use8(i8 [[T3]])
; CHECK-NEXT: [[T4:%.*]] = sext i8 [[T3]] to i32
; CHECK-NEXT: call void @use32(i32 [[T4]])
-; CHECK-NEXT: [[T5:%.*]] = icmp eq i32 [[T4]], [[ARG]]
+; CHECK-NEXT: [[T5:%.*]] = icmp eq i32 [[ARG]], [[T4]]
; CHECK-NEXT: call void @use1(i1 [[T5]])
; CHECK-NEXT: [[T6:%.*]] = and i1 [[T2]], [[T5]]
; CHECK-NEXT: ret i1 [[T6]]
@@ -641,7 +641,7 @@ define zeroext i1 @oneuse_trunc_sext_logical(i32 %arg) {
; CHECK-NEXT: call void @use8(i8 [[T3]])
; CHECK-NEXT: [[T4:%.*]] = sext i8 [[T3]] to i32
; CHECK-NEXT: call void @use32(i32 [[T4]])
-; CHECK-NEXT: [[T5:%.*]] = icmp eq i32 [[T4]], [[ARG]]
+; CHECK-NEXT: [[T5:%.*]] = icmp eq i32 [[ARG]], [[T4]]
; CHECK-NEXT: call void @use1(i1 [[T5]])
; CHECK-NEXT: [[T6:%.*]] = select i1 [[T2]], i1 [[T5]], i1 false
; CHECK-NEXT: ret i1 [[T6]]
diff --git a/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass.ll b/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass.ll
index e4fb7764ba9e5..403f3bacf34d8 100644
--- a/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass.ll
+++ b/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass.ll
@@ -16,7 +16,7 @@ define float @ninf_user_select_inf(i1 %cond, float %x, float %y) {
; CHECK-LABEL: define float @ninf_user_select_inf
; CHECK-SAME: (i1 [[COND:%.*]], float [[X:%.*]], float [[Y:%.*]]) {
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[COND]], float [[X]], float 0x7FF0000000000000
-; CHECK-NEXT: [[NINF_USER:%.*]] = fmul ninf float [[SELECT]], [[Y]]
+; CHECK-NEXT: [[NINF_USER:%.*]] = fmul ninf float [[Y]], [[SELECT]]
; CHECK-NEXT: ret float [[NINF_USER]]
;
%select = select i1 %cond, float %x, float 0x7FF0000000000000
diff --git a/llvm/test/Transforms/InstCombine/sink-not-into-and.ll b/llvm/test/Transforms/InstCombine/sink-not-into-and.ll
index 9db6440a49ee7..1f3b46cdc386d 100644
--- a/llvm/test/Transforms/InstCombine/sink-not-into-and.ll
+++ b/llvm/test/Transforms/InstCombine/sink-not-into-and.ll
@@ -40,7 +40,7 @@ define i1 @n1(i1 %i1, i32 %v2, i32 %v3) {
define i1 @n2(i32 %v0, i32 %v1, i1 %i2) {
; CHECK-LABEL: @n2(
; CHECK-NEXT: [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT: [[I3:%.*]] = and i1 [[I1]], [[I2:%.*]]
+; CHECK-NEXT: [[I3:%.*]] = and i1 [[I2:%.*]], [[I1]]
; CHECK-NEXT: [[I4:%.*]] = xor i1 [[I3]], true
; CHECK-NEXT: ret i1 [[I4]]
;
diff --git a/llvm/test/Transforms/InstCombine/sink-not-into-or.ll b/llvm/test/Transforms/InstCombine/sink-not-into-or.ll
index 0b758112f699e..8e6c983b71fe3 100644
--- a/llvm/test/Transforms/InstCombine/sink-not-into-or.ll
+++ b/llvm/test/Transforms/InstCombine/sink-not-into-or.ll
@@ -40,7 +40,7 @@ define i1 @n1(i1 %i1, i32 %v2, i32 %v3) {
define i1 @n2(i32 %v0, i32 %v1, i1 %i2) {
; CHECK-LABEL: @n2(
; CHECK-NEXT: [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT: [[I3:%.*]] = or i1 [[I1]], [[I2:%.*]]
+; CHECK-NEXT: [[I3:%.*]] = or i1 [[I2:%.*]], [[I1]]
; CHECK-NEXT: [[I4:%.*]] = xor i1 [[I3]], true
; CHECK-NEXT: ret i1 [[I4]]
;
diff --git a/llvm/test/Transforms/InstCombine/smax-icmp.ll b/llvm/test/Transforms/InstCombine/smax-icmp.ll
index 022ec6ad4f346..4c9cbed9d9ebf 100644
--- a/llvm/test/Transforms/InstCombine/smax-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/smax-icmp.ll
@@ -95,7 +95,7 @@ define i1 @sle_smax2(i32 %x, i32 %y) {
define i1 @sle_smax3(i32 %a, i32 %y) {
; CHECK-LABEL: @sle_smax3(
; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[Y:%.*]], [[X]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%x = add i32 %a, 3 ; thwart complexity-based canonicalization
@@ -110,7 +110,7 @@ define i1 @sle_smax3(i32 %a, i32 %y) {
define i1 @sle_smax4(i32 %a, i32 %y) {
; CHECK-LABEL: @sle_smax4(
; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[Y:%.*]], [[X]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%x = add i32 %a, 3 ; thwart complexity-based canonicalization
@@ -207,7 +207,7 @@ define i1 @sgt_smax2(i32 %x, i32 %y) {
define i1 @sgt_smax3(i32 %a, i32 %y) {
; CHECK-LABEL: @sgt_smax3(
; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3
-; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[Y:%.*]], [[X]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%x = add i32 %a, 3 ; thwart complexity-based canonicalization
@@ -222,7 +222,7 @@ define i1 @sgt_smax3(i32 %a, i32 %y) {
define i1 @sgt_smax4(i32 %a, i32 %y) {
; CHECK-LABEL: @sgt_smax4(
; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3
-; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[Y:%.*]], [[X]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%x = add i32 %a, 3 ; thwart complexity-based canonicalization
diff --git a/llvm/test/Transforms/InstCombine/smin-icmp.ll b/llvm/test/Transforms/InstCombine/smin-icmp.ll
index c97f29f5eff8d..d1283d8afc0a7 100644
--- a/llvm/test/Transforms/InstCombine/smin-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/smin-icmp.ll
@@ -94,7 +94,7 @@ define i1 @sge_smin2(i32 %x, i32 %y) {
define i1 @sge_smin3(i32 %a, i32 %y) {
; CHECK-LABEL: @sge_smin3(
; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i32 [[Y:%.*]], [[X]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%x = add i32 %a, 3 ; thwart complexity-based canonicalization
@@ -109,7 +109,7 @@ define i1 @sge_smin3(i32 %a, i32 %y) {
define i1 @sge_smin4(i32 %a, i32 %y) {
; CHECK-LABEL: @sge_smin4(
; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i32 [[Y:%.*]], [[X]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%x = add i32 %a, 3 ; thwart complexity-based canonicalization
@@ -206,7 +206,7 @@ define i1 @slt_smin2(i32 %x, i32 %y) {
define i1 @slt_smin3(i32 %a, i32 %y) {
; CHECK-LABEL: @slt_smin3(
; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[Y:%.*]], [[X]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%x = add i32 %a, 3 ; thwart complexity-based canonicalization
@@ -221,7 +221,7 @@ define i1 @slt_smin3(i32 %a, i32 %y) {
define i1 @slt_smin4(i32 %a, i32 %y) {
; CHECK-LABEL: @slt_smin4(
; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[Y:%.*]], [[X]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%x = add i32 %a, 3 ; thwart complexity-based canonicalization
diff --git a/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll b/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll
index 0379f82f4a783..e21ca605fc5af 100644
--- a/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll
+++ b/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll
@@ -242,7 +242,7 @@ define i32 @sub_ashr_or_i32_extra_use_ashr(i32 %x, i32 %y, ptr %p) {
; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[SHR:%.*]] = sext i1 [[TMP1]] to i32
; CHECK-NEXT: store i32 [[SHR]], ptr [[P:%.*]], align 4
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[X]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X]], [[SHR]]
; CHECK-NEXT: ret i32 [[OR]]
;
%sub = sub nsw i32 %y, %x
@@ -268,7 +268,7 @@ define i32 @sub_ashr_or_i32_no_nsw_nuw(i32 %x, i32 %y) {
define i32 @neg_or_extra_use_ashr_i32(i32 %x, ptr %p) {
; CHECK-LABEL: @neg_or_extra_use_ashr_i32(
; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]]
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X]], [[NEG]]
; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[OR]], 31
; CHECK-NEXT: store i32 [[OR]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i32 [[SHR]]
diff --git a/llvm/test/Transforms/InstCombine/sub-gep.ll b/llvm/test/Transforms/InstCombine/sub-gep.ll
index 5130883409b28..b773d106b2c98 100644
--- a/llvm/test/Transforms/InstCombine/sub-gep.ll
+++ b/llvm/test/Transforms/InstCombine/sub-gep.ll
@@ -422,7 +422,7 @@ define i64 @nullptrtoint_scalable_x(i64 %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 4
-; CHECK-NEXT: [[PTR_IDX:%.*]] = mul nsw i64 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[PTR_IDX:%.*]] = mul nsw i64 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i64 [[PTR_IDX]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/sub-lshr-or-to-icmp-select.ll b/llvm/test/Transforms/InstCombine/sub-lshr-or-to-icmp-select.ll
index 5ecf4b8da0c49..33c02d77c45b9 100644
--- a/llvm/test/Transforms/InstCombine/sub-lshr-or-to-icmp-select.ll
+++ b/llvm/test/Transforms/InstCombine/sub-lshr-or-to-icmp-select.ll
@@ -81,7 +81,7 @@ define i32 @neg_extra_use_or_lshr_i32(i32 %x, ptr %p) {
define i32 @neg_or_extra_use_lshr_i32(i32 %x, ptr %p) {
; CHECK-LABEL: @neg_or_extra_use_lshr_i32(
; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]]
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X]], [[NEG]]
; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[OR]], 31
; CHECK-NEXT: store i32 [[OR]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret i32 [[SHR]]
diff --git a/llvm/test/Transforms/InstCombine/sub-minmax.ll b/llvm/test/Transforms/InstCombine/sub-minmax.ll
index c9ce165c38988..c5af57449bf71 100644
--- a/llvm/test/Transforms/InstCombine/sub-minmax.ll
+++ b/llvm/test/Transforms/InstCombine/sub-minmax.ll
@@ -770,7 +770,7 @@ define i8 @sub_add_umin(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: define {{[^@]+}}@sub_add_umin
; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[Z:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[Y]], i8 [[Z]])
-; CHECK-NEXT: [[S:%.*]] = add i8 [[TMP1]], [[X]]
+; CHECK-NEXT: [[S:%.*]] = add i8 [[X]], [[TMP1]]
; CHECK-NEXT: ret i8 [[S]]
;
%a = add i8 %x, %y
@@ -783,7 +783,7 @@ define i8 @sub_add_umin_commute_umin(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: define {{[^@]+}}@sub_add_umin_commute_umin
; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[Z:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[Y]], i8 [[Z]])
-; CHECK-NEXT: [[S:%.*]] = add i8 [[TMP1]], [[X]]
+; CHECK-NEXT: [[S:%.*]] = add i8 [[X]], [[TMP1]]
; CHECK-NEXT: ret i8 [[S]]
;
%a = add i8 %x, %y
@@ -796,7 +796,7 @@ define i8 @sub_add_umin_commute_add(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: define {{[^@]+}}@sub_add_umin_commute_add
; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[Z:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[Y]], i8 [[Z]])
-; CHECK-NEXT: [[S:%.*]] = add i8 [[TMP1]], [[X]]
+; CHECK-NEXT: [[S:%.*]] = add i8 [[X]], [[TMP1]]
; CHECK-NEXT: ret i8 [[S]]
;
%a = add i8 %y, %x
@@ -809,7 +809,7 @@ define i8 @sub_add_umin_commute_add_umin(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: define {{[^@]+}}@sub_add_umin_commute_add_umin
; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[Z:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[Y]], i8 [[Z]])
-; CHECK-NEXT: [[S:%.*]] = add i8 [[TMP1]], [[X]]
+; CHECK-NEXT: [[S:%.*]] = add i8 [[X]], [[TMP1]]
; CHECK-NEXT: ret i8 [[S]]
;
%a = add i8 %y, %x
@@ -822,7 +822,7 @@ define <2 x i8> @sub_add_umin_vec(<2 x i8> %x, <2 x i8> %y, <2 x i8> %z) {
; CHECK-LABEL: define {{[^@]+}}@sub_add_umin_vec
; CHECK-SAME: (<2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]], <2 x i8> [[Z:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[Y]], <2 x i8> [[Z]])
-; CHECK-NEXT: [[S:%.*]] = add <2 x i8> [[TMP1]], [[X]]
+; CHECK-NEXT: [[S:%.*]] = add <2 x i8> [[X]], [[TMP1]]
; CHECK-NEXT: ret <2 x i8> [[S]]
;
%a = add <2 x i8> %x, %y
diff --git a/llvm/test/Transforms/InstCombine/sub-not.ll b/llvm/test/Transforms/InstCombine/sub-not.ll
index 89ccf5aa3c8f4..5053319162f0d 100644
--- a/llvm/test/Transforms/InstCombine/sub-not.ll
+++ b/llvm/test/Transforms/InstCombine/sub-not.ll
@@ -6,7 +6,7 @@ declare void @use(i8)
define i8 @sub_not(i8 %x, i8 %y) {
; CHECK-LABEL: @sub_not(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = add i8 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = add i8 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i8 [[R]]
;
%s = sub i8 %x, %y
@@ -30,7 +30,7 @@ define i8 @sub_not_extra_use(i8 %x, i8 %y) {
define <2 x i8> @sub_not_vec(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @sub_not_vec(
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[X:%.*]], <i8 -1, i8 -1>
-; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i8> [[R]]
;
%s = sub <2 x i8> %x, %y
@@ -41,7 +41,7 @@ define <2 x i8> @sub_not_vec(<2 x i8> %x, <2 x i8> %y) {
define i8 @dec_sub(i8 %x, i8 %y) {
; CHECK-LABEL: @dec_sub(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = add i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = add i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i8 [[R]]
;
%s = sub i8 %x, %y
@@ -65,7 +65,7 @@ define i8 @dec_sub_extra_use(i8 %x, i8 %y) {
define <2 x i8> @dec_sub_vec(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @dec_sub_vec(
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[Y:%.*]], <i8 -1, i8 -1>
-; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i8> [[R]]
;
%s = sub <2 x i8> %x, %y
@@ -76,7 +76,7 @@ define <2 x i8> @dec_sub_vec(<2 x i8> %x, <2 x i8> %y) {
define i8 @sub_inc(i8 %x, i8 %y) {
; CHECK-LABEL: @sub_inc(
; CHECK-NEXT: [[S_NEG:%.*]] = xor i8 [[X:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = add i8 [[S_NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = add i8 [[Y:%.*]], [[S_NEG]]
; CHECK-NEXT: ret i8 [[R]]
;
%s = add i8 %x, 1
@@ -100,7 +100,7 @@ define i8 @sub_inc_extra_use(i8 %x, i8 %y) {
define <2 x i8> @sub_inc_vec(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @sub_inc_vec(
; CHECK-NEXT: [[S_NEG:%.*]] = xor <2 x i8> [[X:%.*]], <i8 -1, i8 -1>
-; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[S_NEG]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[Y:%.*]], [[S_NEG]]
; CHECK-NEXT: ret <2 x i8> [[R]]
;
%s = add <2 x i8> %x, <i8 poison, i8 1>
@@ -111,7 +111,7 @@ define <2 x i8> @sub_inc_vec(<2 x i8> %x, <2 x i8> %y) {
define i8 @sub_dec(i8 %x, i8 %y) {
; CHECK-LABEL: @sub_dec(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = add i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = add i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i8 [[R]]
;
%s = add i8 %x, -1
@@ -135,7 +135,7 @@ define i8 @sub_dec_extra_use(i8 %x, i8 %y) {
define <2 x i8> @sub_dec_vec(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @sub_dec_vec(
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[Y:%.*]], <i8 -1, i8 -1>
-; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i8> [[R]]
;
%s = add <2 x i8> %x, <i8 poison, i8 -1>
diff --git a/llvm/test/Transforms/InstCombine/sub-of-negatible-inseltpoison.ll b/llvm/test/Transforms/InstCombine/sub-of-negatible-inseltpoison.ll
index 76a172302999a..60607041ad2f9 100644
--- a/llvm/test/Transforms/InstCombine/sub-of-negatible-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/sub-of-negatible-inseltpoison.ll
@@ -262,7 +262,7 @@ define i8 @t12(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[T1:%.*]] = sub i8 0, [[Z:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T1]])
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y]], [[Z]]
-; CHECK-NEXT: [[T3:%.*]] = add i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = add i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i8 [[T3]]
;
%t0 = sub i8 0, %y
@@ -296,7 +296,7 @@ define i8 @n14(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y]], [[Z]]
; CHECK-NEXT: [[T2:%.*]] = sub i8 0, [[TMP1]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
-; CHECK-NEXT: [[T3:%.*]] = add i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = add i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i8 [[T3]]
;
%t0 = sub i8 0, %y
@@ -399,7 +399,7 @@ define i8 @n16(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @n16(
; CHECK-NEXT: [[T0:%.*]] = sub i8 0, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = mul i8 [[T0]], [[Z:%.*]]
+; CHECK-NEXT: [[T1:%.*]] = mul i8 [[Z:%.*]], [[T0]]
; CHECK-NEXT: call void @use8(i8 [[T1]])
; CHECK-NEXT: [[T2:%.*]] = sub i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: ret i8 [[T2]]
@@ -535,7 +535,7 @@ define i8 @t20(i8 %x, i16 %y) {
; CHECK-LABEL: @t20(
; CHECK-NEXT: [[T0_NEG:%.*]] = shl i16 42, [[Y:%.*]]
; CHECK-NEXT: [[T1_NEG:%.*]] = trunc i16 [[T0_NEG]] to i8
-; CHECK-NEXT: [[T2:%.*]] = add i8 [[T1_NEG]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = add i8 [[X:%.*]], [[T1_NEG]]
; CHECK-NEXT: ret i8 [[T2]]
;
%t0 = shl i16 -42, %y
@@ -742,7 +742,7 @@ define i8 @negate_lshr_wrongshift(i8 %x, i8 %y) {
define i8 @negate_sext(i8 %x, i1 %y) {
; CHECK-LABEL: @negate_sext(
; CHECK-NEXT: [[T0_NEG:%.*]] = zext i1 [[Y:%.*]] to i8
-; CHECK-NEXT: [[T1:%.*]] = add i8 [[T0_NEG]], [[X:%.*]]
+; CHECK-NEXT: [[T1:%.*]] = add i8 [[X:%.*]], [[T0_NEG]]
; CHECK-NEXT: ret i8 [[T1]]
;
%t0 = sext i1 %y to i8
@@ -752,7 +752,7 @@ define i8 @negate_sext(i8 %x, i1 %y) {
define i8 @negate_zext(i8 %x, i1 %y) {
; CHECK-LABEL: @negate_zext(
; CHECK-NEXT: [[T0_NEG:%.*]] = sext i1 [[Y:%.*]] to i8
-; CHECK-NEXT: [[T1:%.*]] = add i8 [[T0_NEG]], [[X:%.*]]
+; CHECK-NEXT: [[T1:%.*]] = add i8 [[X:%.*]], [[T0_NEG]]
; CHECK-NEXT: ret i8 [[T1]]
;
%t0 = zext i1 %y to i8
@@ -1009,7 +1009,7 @@ define i8 @negation_of_increment_via_or_with_no_common_bits_set(i8 %x, i8 %y) {
; CHECK-LABEL: @negation_of_increment_via_or_with_no_common_bits_set(
; CHECK-NEXT: [[T0:%.*]] = shl i8 [[Y:%.*]], 1
; CHECK-NEXT: [[T1_NEG:%.*]] = xor i8 [[T0]], -1
-; CHECK-NEXT: [[T2:%.*]] = add i8 [[T1_NEG]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = add i8 [[X:%.*]], [[T1_NEG]]
; CHECK-NEXT: ret i8 [[T2]]
;
%t0 = shl i8 %y, 1
@@ -1312,7 +1312,7 @@ define i8 @negate_nabs(i8 %x, i8 %y) {
; CHECK-NEXT: [[T0:%.*]] = sub i8 0, [[X:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X]], i1 false)
-; CHECK-NEXT: [[T3:%.*]] = add i8 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = add i8 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i8 [[T3]]
;
%t0 = sub i8 0, %x
diff --git a/llvm/test/Transforms/InstCombine/sub-of-negatible.ll b/llvm/test/Transforms/InstCombine/sub-of-negatible.ll
index b2e14ceaca1b0..b19eae4d8f9a4 100644
--- a/llvm/test/Transforms/InstCombine/sub-of-negatible.ll
+++ b/llvm/test/Transforms/InstCombine/sub-of-negatible.ll
@@ -286,7 +286,7 @@ define i8 @t12(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[T1:%.*]] = sub i8 0, [[Z:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T1]])
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y]], [[Z]]
-; CHECK-NEXT: [[T3:%.*]] = add i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = add i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i8 [[T3]]
;
%t0 = sub i8 0, %y
@@ -320,7 +320,7 @@ define i8 @n14(i8 %x, i8 %y, i8 %z) {
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y]], [[Z]]
; CHECK-NEXT: [[T2:%.*]] = sub i8 0, [[TMP1]]
; CHECK-NEXT: call void @use8(i8 [[T2]])
-; CHECK-NEXT: [[T3:%.*]] = add i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = add i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i8 [[T3]]
;
%t0 = sub i8 0, %y
@@ -423,7 +423,7 @@ define i8 @n16(i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @n16(
; CHECK-NEXT: [[T0:%.*]] = sub i8 0, [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[T1:%.*]] = mul i8 [[T0]], [[Z:%.*]]
+; CHECK-NEXT: [[T1:%.*]] = mul i8 [[Z:%.*]], [[T0]]
; CHECK-NEXT: call void @use8(i8 [[T1]])
; CHECK-NEXT: [[T2:%.*]] = sub i8 [[X:%.*]], [[T1]]
; CHECK-NEXT: ret i8 [[T2]]
@@ -559,7 +559,7 @@ define i8 @t20(i8 %x, i16 %y) {
; CHECK-LABEL: @t20(
; CHECK-NEXT: [[T0_NEG:%.*]] = shl i16 42, [[Y:%.*]]
; CHECK-NEXT: [[T1_NEG:%.*]] = trunc i16 [[T0_NEG]] to i8
-; CHECK-NEXT: [[T2:%.*]] = add i8 [[T1_NEG]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = add i8 [[X:%.*]], [[T1_NEG]]
; CHECK-NEXT: ret i8 [[T2]]
;
%t0 = shl i16 -42, %y
@@ -766,7 +766,7 @@ define i8 @negate_lshr_wrongshift(i8 %x, i8 %y) {
define i8 @negate_sext(i8 %x, i1 %y) {
; CHECK-LABEL: @negate_sext(
; CHECK-NEXT: [[T0_NEG:%.*]] = zext i1 [[Y:%.*]] to i8
-; CHECK-NEXT: [[T1:%.*]] = add i8 [[T0_NEG]], [[X:%.*]]
+; CHECK-NEXT: [[T1:%.*]] = add i8 [[X:%.*]], [[T0_NEG]]
; CHECK-NEXT: ret i8 [[T1]]
;
%t0 = sext i1 %y to i8
@@ -776,7 +776,7 @@ define i8 @negate_sext(i8 %x, i1 %y) {
define i8 @negate_zext(i8 %x, i1 %y) {
; CHECK-LABEL: @negate_zext(
; CHECK-NEXT: [[T0_NEG:%.*]] = sext i1 [[Y:%.*]] to i8
-; CHECK-NEXT: [[T1:%.*]] = add i8 [[T0_NEG]], [[X:%.*]]
+; CHECK-NEXT: [[T1:%.*]] = add i8 [[X:%.*]], [[T0_NEG]]
; CHECK-NEXT: ret i8 [[T1]]
;
%t0 = zext i1 %y to i8
@@ -1033,7 +1033,7 @@ define i8 @negation_of_increment_via_or_with_no_common_bits_set(i8 %x, i8 %y) {
; CHECK-LABEL: @negation_of_increment_via_or_with_no_common_bits_set(
; CHECK-NEXT: [[T0:%.*]] = shl i8 [[Y:%.*]], 1
; CHECK-NEXT: [[T1_NEG:%.*]] = xor i8 [[T0]], -1
-; CHECK-NEXT: [[T2:%.*]] = add i8 [[T1_NEG]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = add i8 [[X:%.*]], [[T1_NEG]]
; CHECK-NEXT: ret i8 [[T2]]
;
%t0 = shl i8 %y, 1
@@ -1071,7 +1071,7 @@ define i8 @negation_of_increment_via_or_common_bits_set(i8 %x, i8 %y) {
define i8 @negation_of_increment_via_or_disjoint(i8 %x, i8 %y) {
; CHECK-LABEL: @negation_of_increment_via_or_disjoint(
; CHECK-NEXT: [[T1_NEG:%.*]] = xor i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[T2:%.*]] = add i8 [[T1_NEG]], [[X:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = add i8 [[X:%.*]], [[T1_NEG]]
; CHECK-NEXT: ret i8 [[T2]]
;
%t1 = or disjoint i8 %y, 1
@@ -1347,7 +1347,7 @@ define i8 @negate_nabs(i8 %x, i8 %y) {
; CHECK-NEXT: [[T0:%.*]] = sub i8 0, [[X:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X]], i1 false)
-; CHECK-NEXT: [[T3:%.*]] = add i8 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[T3:%.*]] = add i8 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i8 [[T3]]
;
%t0 = sub i8 0, %x
diff --git a/llvm/test/Transforms/InstCombine/sub-xor-cmp.ll b/llvm/test/Transforms/InstCombine/sub-xor-cmp.ll
index 461c9b0fb1e0c..acbc29db871e8 100644
--- a/llvm/test/Transforms/InstCombine/sub-xor-cmp.ll
+++ b/llvm/test/Transforms/InstCombine/sub-xor-cmp.ll
@@ -58,7 +58,7 @@ define i64 @sext_non_bool_xor_sub(i64 %a, i8 %b) {
; CHECK-LABEL: define i64 @sext_non_bool_xor_sub(
; CHECK-SAME: i64 [[A:%.*]], i8 [[B:%.*]]) {
; CHECK-NEXT: [[C:%.*]] = sext i8 [[B]] to i64
-; CHECK-NEXT: [[D:%.*]] = xor i64 [[C]], [[A]]
+; CHECK-NEXT: [[D:%.*]] = xor i64 [[A]], [[C]]
; CHECK-NEXT: [[R:%.*]] = sub i64 [[D]], [[C]]
; CHECK-NEXT: ret i64 [[R]]
;
@@ -72,7 +72,7 @@ define i64 @sext_non_bool_xor_sub_1(i64 %a, i8 %b) {
; CHECK-LABEL: define i64 @sext_non_bool_xor_sub_1(
; CHECK-SAME: i64 [[A:%.*]], i8 [[B:%.*]]) {
; CHECK-NEXT: [[C:%.*]] = sext i8 [[B]] to i64
-; CHECK-NEXT: [[D:%.*]] = xor i64 [[C]], [[A]]
+; CHECK-NEXT: [[D:%.*]] = xor i64 [[A]], [[C]]
; CHECK-NEXT: [[R:%.*]] = sub i64 [[D]], [[C]]
; CHECK-NEXT: ret i64 [[R]]
;
@@ -135,9 +135,9 @@ define i64 @xor_multi_uses(i64 %a, i1 %b, i64 %x) {
; CHECK-LABEL: define i64 @xor_multi_uses(
; CHECK-SAME: i64 [[A:%.*]], i1 [[B:%.*]], i64 [[X:%.*]]) {
; CHECK-NEXT: [[C:%.*]] = sext i1 [[B]] to i64
-; CHECK-NEXT: [[D:%.*]] = xor i64 [[C]], [[A]]
+; CHECK-NEXT: [[D:%.*]] = xor i64 [[A]], [[C]]
; CHECK-NEXT: [[E:%.*]] = sub i64 [[D]], [[C]]
-; CHECK-NEXT: [[F:%.*]] = mul i64 [[D]], [[X]]
+; CHECK-NEXT: [[F:%.*]] = mul i64 [[X]], [[D]]
; CHECK-NEXT: [[R:%.*]] = add i64 [[F]], [[E]]
; CHECK-NEXT: ret i64 [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/sub.ll b/llvm/test/Transforms/InstCombine/sub.ll
index 32ed4a787e926..4f6520609777a 100644
--- a/llvm/test/Transforms/InstCombine/sub.ll
+++ b/llvm/test/Transforms/InstCombine/sub.ll
@@ -230,7 +230,7 @@ define i32 @test5(i32 %A, i32 %B, i32 %C) {
define i32 @test6(i32 %A, i32 %B) {
; CHECK-LABEL: @test6(
; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 [[B:%.*]], -1
-; CHECK-NEXT: [[D:%.*]] = and i32 [[B_NOT]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = and i32 [[A:%.*]], [[B_NOT]]
; CHECK-NEXT: ret i32 [[D]]
;
%C = and i32 %A, %B
@@ -241,7 +241,7 @@ define i32 @test6(i32 %A, i32 %B) {
define i32 @test6commuted(i32 %A, i32 %B) {
; CHECK-LABEL: @test6commuted(
; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 [[B:%.*]], -1
-; CHECK-NEXT: [[D:%.*]] = and i32 [[B_NOT]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = and i32 [[A:%.*]], [[B_NOT]]
; CHECK-NEXT: ret i32 [[D]]
;
%C = and i32 %B, %A
@@ -686,7 +686,7 @@ define <2 x i32> @test27commutedvecmixed(<2 x i32> %x, <2 x i32> %y) {
define i32 @test28(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @test28(
; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[Z:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[SUB:%.*]] = add i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = add i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[SUB]]
;
%neg = sub i32 0, %z
@@ -698,7 +698,7 @@ define i32 @test28(i32 %x, i32 %y, i32 %z) {
define i32 @test28commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @test28commuted(
; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[Z:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[SUB:%.*]] = add i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = add i32 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i32 [[SUB]]
;
%neg = sub i32 0, %z
@@ -893,7 +893,7 @@ define i32 @test45commuted(i32 %x, i32 %y) {
define i32 @test46(i32 %x, i32 %y) {
; CHECK-LABEL: @test46(
; CHECK-NEXT: [[X_NOT:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[SUB:%.*]] = and i32 [[X_NOT]], [[Y:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = and i32 [[Y:%.*]], [[X_NOT]]
; CHECK-NEXT: ret i32 [[SUB]]
;
%or = or i32 %x, %y
@@ -904,7 +904,7 @@ define i32 @test46(i32 %x, i32 %y) {
define i32 @test46commuted(i32 %x, i32 %y) {
; CHECK-LABEL: @test46commuted(
; CHECK-NEXT: [[X_NOT:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[SUB:%.*]] = and i32 [[X_NOT]], [[Y:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = and i32 [[Y:%.*]], [[X_NOT]]
; CHECK-NEXT: ret i32 [[SUB]]
;
%or = or i32 %y, %x
@@ -1350,7 +1350,7 @@ define i32 @test71(i32 %A, i32 %B) {
define <2 x i32> @test72(<2 x i32> %A, <2 x i32> %B) {
; CHECK-LABEL: @test72(
; CHECK-NEXT: [[B_NOT:%.*]] = xor <2 x i32> [[B:%.*]], <i32 -1, i32 -1>
-; CHECK-NEXT: [[D:%.*]] = and <2 x i32> [[B_NOT]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = and <2 x i32> [[A:%.*]], [[B_NOT]]
; CHECK-NEXT: ret <2 x i32> [[D]]
;
%C = or <2 x i32> %A, %B
@@ -1442,7 +1442,7 @@ define i8 @sub_add_sub_reassoc(i8 %w, i8 %x, i8 %y, i8 %z) {
define <2 x i8> @sub_add_sub_reassoc_commute(<2 x i8> %w, <2 x i8> %x, <2 x i8> %y, <2 x i8> %z) {
; CHECK-LABEL: @sub_add_sub_reassoc_commute(
; CHECK-NEXT: [[D:%.*]] = sdiv <2 x i8> [[Y:%.*]], <i8 42, i8 -42>
-; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[D]], [[W:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[W:%.*]], [[D]]
; CHECK-NEXT: [[TMP2:%.*]] = add <2 x i8> [[X:%.*]], [[Z:%.*]]
; CHECK-NEXT: [[S2:%.*]] = sub <2 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: ret <2 x i8> [[S2]]
@@ -1460,7 +1460,7 @@ define i8 @sub_add_sub_reassoc_twice(i8 %v, i8 %w, i8 %x, i8 %y, i8 %z) {
; CHECK-LABEL: @sub_add_sub_reassoc_twice(
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[W:%.*]], [[Z:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = add i8 [[X:%.*]], [[V:%.*]]
-; CHECK-NEXT: [[TMP3:%.*]] = add i8 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = add i8 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: [[S3:%.*]] = sub i8 [[TMP2]], [[TMP3]]
; CHECK-NEXT: ret i8 [[S3]]
;
@@ -2026,7 +2026,7 @@ define i16 @urem_zext_noundef(i8 noundef %x, i8 %y) {
define i8 @mul_sub_common_factor_commute1(i8 %x, i8 %y) {
; CHECK-LABEL: @mul_sub_common_factor_commute1(
; CHECK-NEXT: [[X1:%.*]] = add i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[A:%.*]] = mul i8 [[X1]], [[X:%.*]]
+; CHECK-NEXT: [[A:%.*]] = mul i8 [[X:%.*]], [[X1]]
; CHECK-NEXT: ret i8 [[A]]
;
%m = mul nsw i8 %x, %y
@@ -2052,7 +2052,7 @@ define <2 x i8> @mul_sub_common_factor_commute2(<2 x i8> %x, <2 x i8> %y) {
define i8 @mul_sub_common_factor_commute3(i8 %x, i8 %y) {
; CHECK-LABEL: @mul_sub_common_factor_commute3(
; CHECK-NEXT: [[M1:%.*]] = sub i8 1, [[Y:%.*]]
-; CHECK-NEXT: [[A:%.*]] = mul i8 [[M1]], [[X:%.*]]
+; CHECK-NEXT: [[A:%.*]] = mul i8 [[X:%.*]], [[M1]]
; CHECK-NEXT: ret i8 [[A]]
;
%m = mul nuw i8 %x, %y
@@ -2063,7 +2063,7 @@ define i8 @mul_sub_common_factor_commute3(i8 %x, i8 %y) {
define i8 @mul_sub_common_factor_commute4(i8 %x, i8 %y) {
; CHECK-LABEL: @mul_sub_common_factor_commute4(
; CHECK-NEXT: [[M1:%.*]] = sub i8 1, [[Y:%.*]]
-; CHECK-NEXT: [[A:%.*]] = mul i8 [[M1]], [[X:%.*]]
+; CHECK-NEXT: [[A:%.*]] = mul i8 [[X:%.*]], [[M1]]
; CHECK-NEXT: ret i8 [[A]]
;
%m = mul nsw i8 %y, %x
diff --git a/llvm/test/Transforms/InstCombine/trunc-binop-ext.ll b/llvm/test/Transforms/InstCombine/trunc-binop-ext.ll
index e3103906911af..4593730b8809f 100644
--- a/llvm/test/Transforms/InstCombine/trunc-binop-ext.ll
+++ b/llvm/test/Transforms/InstCombine/trunc-binop-ext.ll
@@ -5,7 +5,7 @@ define i16 @narrow_sext_and(i16 %x16, i32 %y32) {
; CHECK-LABEL: define i16 @narrow_sext_and(
; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16
-; CHECK-NEXT: [[R:%.*]] = and i16 [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = and i16 [[X16]], [[TMP1]]
; CHECK-NEXT: ret i16 [[R]]
;
%x32 = sext i16 %x16 to i32
@@ -18,7 +18,7 @@ define i16 @narrow_zext_and(i16 %x16, i32 %y32) {
; CHECK-LABEL: define i16 @narrow_zext_and(
; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16
-; CHECK-NEXT: [[R:%.*]] = and i16 [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = and i16 [[X16]], [[TMP1]]
; CHECK-NEXT: ret i16 [[R]]
;
%x32 = zext i16 %x16 to i32
@@ -31,7 +31,7 @@ define i16 @narrow_sext_or(i16 %x16, i32 %y32) {
; CHECK-LABEL: define i16 @narrow_sext_or(
; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16
-; CHECK-NEXT: [[R:%.*]] = or i16 [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = or i16 [[X16]], [[TMP1]]
; CHECK-NEXT: ret i16 [[R]]
;
%x32 = sext i16 %x16 to i32
@@ -44,7 +44,7 @@ define i16 @narrow_zext_or(i16 %x16, i32 %y32) {
; CHECK-LABEL: define i16 @narrow_zext_or(
; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16
-; CHECK-NEXT: [[R:%.*]] = or i16 [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = or i16 [[X16]], [[TMP1]]
; CHECK-NEXT: ret i16 [[R]]
;
%x32 = zext i16 %x16 to i32
@@ -57,7 +57,7 @@ define i16 @narrow_sext_xor(i16 %x16, i32 %y32) {
; CHECK-LABEL: define i16 @narrow_sext_xor(
; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16
-; CHECK-NEXT: [[R:%.*]] = xor i16 [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = xor i16 [[X16]], [[TMP1]]
; CHECK-NEXT: ret i16 [[R]]
;
%x32 = sext i16 %x16 to i32
@@ -70,7 +70,7 @@ define i16 @narrow_zext_xor(i16 %x16, i32 %y32) {
; CHECK-LABEL: define i16 @narrow_zext_xor(
; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16
-; CHECK-NEXT: [[R:%.*]] = xor i16 [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = xor i16 [[X16]], [[TMP1]]
; CHECK-NEXT: ret i16 [[R]]
;
%x32 = zext i16 %x16 to i32
@@ -83,7 +83,7 @@ define i16 @narrow_sext_add(i16 %x16, i32 %y32) {
; CHECK-LABEL: define i16 @narrow_sext_add(
; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16
-; CHECK-NEXT: [[R:%.*]] = add i16 [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = add i16 [[X16]], [[TMP1]]
; CHECK-NEXT: ret i16 [[R]]
;
%x32 = sext i16 %x16 to i32
@@ -96,7 +96,7 @@ define i16 @narrow_zext_add(i16 %x16, i32 %y32) {
; CHECK-LABEL: define i16 @narrow_zext_add(
; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16
-; CHECK-NEXT: [[R:%.*]] = add i16 [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = add i16 [[X16]], [[TMP1]]
; CHECK-NEXT: ret i16 [[R]]
;
%x32 = zext i16 %x16 to i32
@@ -135,7 +135,7 @@ define i16 @narrow_sext_mul(i16 %x16, i32 %y32) {
; CHECK-LABEL: define i16 @narrow_sext_mul(
; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16
-; CHECK-NEXT: [[R:%.*]] = mul i16 [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = mul i16 [[X16]], [[TMP1]]
; CHECK-NEXT: ret i16 [[R]]
;
%x32 = sext i16 %x16 to i32
@@ -148,7 +148,7 @@ define i16 @narrow_zext_mul(i16 %x16, i32 %y32) {
; CHECK-LABEL: define i16 @narrow_zext_mul(
; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16
-; CHECK-NEXT: [[R:%.*]] = mul i16 [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = mul i16 [[X16]], [[TMP1]]
; CHECK-NEXT: ret i16 [[R]]
;
%x32 = zext i16 %x16 to i32
@@ -165,7 +165,7 @@ define <2 x i16> @narrow_sext_and_commute(<2 x i16> %x16, <2 x i32> %y32) {
; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) {
; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], <i32 7, i32 -17>
; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16>
-; CHECK-NEXT: [[R:%.*]] = and <2 x i16> [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = and <2 x i16> [[X16]], [[TMP1]]
; CHECK-NEXT: ret <2 x i16> [[R]]
;
%y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17>
@@ -180,7 +180,7 @@ define <2 x i16> @narrow_zext_and_commute(<2 x i16> %x16, <2 x i32> %y32) {
; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) {
; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], <i32 7, i32 -17>
; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16>
-; CHECK-NEXT: [[R:%.*]] = and <2 x i16> [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = and <2 x i16> [[X16]], [[TMP1]]
; CHECK-NEXT: ret <2 x i16> [[R]]
;
%y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17>
@@ -195,7 +195,7 @@ define <2 x i16> @narrow_sext_or_commute(<2 x i16> %x16, <2 x i32> %y32) {
; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) {
; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], <i32 7, i32 -17>
; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16>
-; CHECK-NEXT: [[R:%.*]] = or <2 x i16> [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = or <2 x i16> [[X16]], [[TMP1]]
; CHECK-NEXT: ret <2 x i16> [[R]]
;
%y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17>
@@ -210,7 +210,7 @@ define <2 x i16> @narrow_zext_or_commute(<2 x i16> %x16, <2 x i32> %y32) {
; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) {
; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], <i32 7, i32 -17>
; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16>
-; CHECK-NEXT: [[R:%.*]] = or <2 x i16> [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = or <2 x i16> [[X16]], [[TMP1]]
; CHECK-NEXT: ret <2 x i16> [[R]]
;
%y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17>
@@ -225,7 +225,7 @@ define <2 x i16> @narrow_sext_xor_commute(<2 x i16> %x16, <2 x i32> %y32) {
; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) {
; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], <i32 7, i32 -17>
; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16>
-; CHECK-NEXT: [[R:%.*]] = xor <2 x i16> [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = xor <2 x i16> [[X16]], [[TMP1]]
; CHECK-NEXT: ret <2 x i16> [[R]]
;
%y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17>
@@ -240,7 +240,7 @@ define <2 x i16> @narrow_zext_xor_commute(<2 x i16> %x16, <2 x i32> %y32) {
; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) {
; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], <i32 7, i32 -17>
; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16>
-; CHECK-NEXT: [[R:%.*]] = xor <2 x i16> [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = xor <2 x i16> [[X16]], [[TMP1]]
; CHECK-NEXT: ret <2 x i16> [[R]]
;
%y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17>
@@ -255,7 +255,7 @@ define <2 x i16> @narrow_sext_add_commute(<2 x i16> %x16, <2 x i32> %y32) {
; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) {
; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], <i32 7, i32 -17>
; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16>
-; CHECK-NEXT: [[R:%.*]] = add <2 x i16> [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = add <2 x i16> [[X16]], [[TMP1]]
; CHECK-NEXT: ret <2 x i16> [[R]]
;
%y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17>
@@ -270,7 +270,7 @@ define <2 x i16> @narrow_zext_add_commute(<2 x i16> %x16, <2 x i32> %y32) {
; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) {
; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], <i32 7, i32 -17>
; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16>
-; CHECK-NEXT: [[R:%.*]] = add <2 x i16> [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = add <2 x i16> [[X16]], [[TMP1]]
; CHECK-NEXT: ret <2 x i16> [[R]]
;
%y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17>
@@ -315,7 +315,7 @@ define <2 x i16> @narrow_sext_mul_commute(<2 x i16> %x16, <2 x i32> %y32) {
; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) {
; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], <i32 7, i32 -17>
; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16>
-; CHECK-NEXT: [[R:%.*]] = mul <2 x i16> [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = mul <2 x i16> [[X16]], [[TMP1]]
; CHECK-NEXT: ret <2 x i16> [[R]]
;
%y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17>
@@ -330,7 +330,7 @@ define <2 x i16> @narrow_zext_mul_commute(<2 x i16> %x16, <2 x i32> %y32) {
; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) {
; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], <i32 7, i32 -17>
; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16>
-; CHECK-NEXT: [[R:%.*]] = mul <2 x i16> [[TMP1]], [[X16]]
+; CHECK-NEXT: [[R:%.*]] = mul <2 x i16> [[X16]], [[TMP1]]
; CHECK-NEXT: ret <2 x i16> [[R]]
;
%y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17>
diff --git a/llvm/test/Transforms/InstCombine/uaddo.ll b/llvm/test/Transforms/InstCombine/uaddo.ll
index c638c0adef055..9b56dce8b4585 100644
--- a/llvm/test/Transforms/InstCombine/uaddo.ll
+++ b/llvm/test/Transforms/InstCombine/uaddo.ll
@@ -5,7 +5,7 @@ define i32 @uaddo_commute1(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @uaddo_commute1(
; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y:%.*]], -1
; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y]]
-; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NOTY]], [[X]]
+; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], [[NOTY]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[Z:%.*]], i32 [[A]]
; CHECK-NEXT: ret i32 [[R]]
;
@@ -20,7 +20,7 @@ define <2 x i32> @uaddo_commute2(<2 x i32> %x, <2 x i32> %y, <2 x i32> %z) {
; CHECK-LABEL: @uaddo_commute2(
; CHECK-NEXT: [[NOTY:%.*]] = xor <2 x i32> [[Y:%.*]], <i32 -1, i32 -1>
; CHECK-NEXT: [[A:%.*]] = add <2 x i32> [[Y]], [[X:%.*]]
-; CHECK-NEXT: [[C:%.*]] = icmp ult <2 x i32> [[NOTY]], [[X]]
+; CHECK-NEXT: [[C:%.*]] = icmp ugt <2 x i32> [[X]], [[NOTY]]
; CHECK-NEXT: [[R:%.*]] = select <2 x i1> [[C]], <2 x i32> [[Z:%.*]], <2 x i32> [[A]]
; CHECK-NEXT: ret <2 x i32> [[R]]
;
@@ -35,7 +35,7 @@ define i32 @uaddo_commute3(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @uaddo_commute3(
; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y:%.*]], -1
; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y]]
-; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NOTY]], [[X]]
+; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], [[NOTY]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[Z:%.*]], i32 [[A]]
; CHECK-NEXT: ret i32 [[R]]
;
@@ -50,7 +50,7 @@ define i32 @uaddo_commute4(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @uaddo_commute4(
; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y:%.*]], -1
; CHECK-NEXT: [[A:%.*]] = add i32 [[Y]], [[X:%.*]]
-; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NOTY]], [[X]]
+; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], [[NOTY]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[Z:%.*]], i32 [[A]]
; CHECK-NEXT: ret i32 [[R]]
;
@@ -65,7 +65,7 @@ define i32 @uaddo_commute5(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @uaddo_commute5(
; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y:%.*]], -1
; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y]]
-; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NOTY]], [[X]]
+; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], [[NOTY]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 [[Z:%.*]]
; CHECK-NEXT: ret i32 [[R]]
;
@@ -80,7 +80,7 @@ define i32 @uaddo_commute6(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @uaddo_commute6(
; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y:%.*]], -1
; CHECK-NEXT: [[A:%.*]] = add i32 [[Y]], [[X:%.*]]
-; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NOTY]], [[X]]
+; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], [[NOTY]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 [[Z:%.*]]
; CHECK-NEXT: ret i32 [[R]]
;
@@ -95,7 +95,7 @@ define i32 @uaddo_commute7(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @uaddo_commute7(
; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y:%.*]], -1
; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y]]
-; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NOTY]], [[X]]
+; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], [[NOTY]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 [[Z:%.*]]
; CHECK-NEXT: ret i32 [[R]]
;
@@ -110,7 +110,7 @@ define i32 @uaddo_commute8(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @uaddo_commute8(
; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y:%.*]], -1
; CHECK-NEXT: [[A:%.*]] = add i32 [[Y]], [[X:%.*]]
-; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NOTY]], [[X]]
+; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], [[NOTY]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 [[Z:%.*]]
; CHECK-NEXT: ret i32 [[R]]
;
@@ -125,7 +125,7 @@ define i32 @uaddo_wrong_pred1(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @uaddo_wrong_pred1(
; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y:%.*]], -1
; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y]]
-; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[NOTY]], [[X]]
+; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X]], [[NOTY]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[Z:%.*]], i32 [[A]]
; CHECK-NEXT: ret i32 [[R]]
;
@@ -140,7 +140,7 @@ define i32 @uaddo_wrong_pred2(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @uaddo_wrong_pred2(
; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y:%.*]], -1
; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y]]
-; CHECK-NEXT: [[C_NOT:%.*]] = icmp ugt i32 [[NOTY]], [[X]]
+; CHECK-NEXT: [[C_NOT:%.*]] = icmp ult i32 [[X]], [[NOTY]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[C_NOT]], i32 [[A]], i32 [[Z:%.*]]
; CHECK-NEXT: ret i32 [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/umax-icmp.ll b/llvm/test/Transforms/InstCombine/umax-icmp.ll
index 9946f3c390f0f..b4eea30bfc6af 100644
--- a/llvm/test/Transforms/InstCombine/umax-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/umax-icmp.ll
@@ -95,7 +95,7 @@ define i1 @ule_umax2(i32 %x, i32 %y) {
define i1 @ule_umax3(i32 %a, i32 %y) {
; CHECK-LABEL: @ule_umax3(
; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3
-; CHECK-NEXT: [[CMP2:%.*]] = icmp uge i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ule i32 [[Y:%.*]], [[X]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%x = add i32 %a, 3 ; thwart complexity-based canonicalization
@@ -110,7 +110,7 @@ define i1 @ule_umax3(i32 %a, i32 %y) {
define i1 @ule_umax4(i32 %a, i32 %y) {
; CHECK-LABEL: @ule_umax4(
; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3
-; CHECK-NEXT: [[CMP2:%.*]] = icmp uge i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ule i32 [[Y:%.*]], [[X]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%x = add i32 %a, 3 ; thwart complexity-based canonicalization
@@ -207,7 +207,7 @@ define i1 @ugt_umax2(i32 %x, i32 %y) {
define i1 @ugt_umax3(i32 %a, i32 %y) {
; CHECK-LABEL: @ugt_umax3(
; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[Y:%.*]], [[X]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%x = add i32 %a, 3 ; thwart complexity-based canonicalization
@@ -222,7 +222,7 @@ define i1 @ugt_umax3(i32 %a, i32 %y) {
define i1 @ugt_umax4(i32 %a, i32 %y) {
; CHECK-LABEL: @ugt_umax4(
; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[Y:%.*]], [[X]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%x = add i32 %a, 3 ; thwart complexity-based canonicalization
diff --git a/llvm/test/Transforms/InstCombine/umin-icmp.ll b/llvm/test/Transforms/InstCombine/umin-icmp.ll
index da901c6c5e484..cb23b2f00d292 100644
--- a/llvm/test/Transforms/InstCombine/umin-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/umin-icmp.ll
@@ -95,7 +95,7 @@ define i1 @uge_umin2(i32 %x, i32 %y) {
define i1 @uge_umin3(i32 %a, i32 %y) {
; CHECK-LABEL: @uge_umin3(
; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ule i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp uge i32 [[Y:%.*]], [[X]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%x = add i32 %a, 3 ; thwart complexity-based canonicalization
@@ -110,7 +110,7 @@ define i1 @uge_umin3(i32 %a, i32 %y) {
define i1 @uge_umin4(i32 %a, i32 %y) {
; CHECK-LABEL: @uge_umin4(
; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ule i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp uge i32 [[Y:%.*]], [[X]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%x = add i32 %a, 3 ; thwart complexity-based canonicalization
@@ -207,7 +207,7 @@ define i1 @ult_umin2(i32 %x, i32 %y) {
define i1 @ult_umin3(i32 %a, i32 %y) {
; CHECK-LABEL: @ult_umin3(
; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i32 [[Y:%.*]], [[X]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%x = add i32 %a, 3 ; thwart complexity-based canonicalization
@@ -222,7 +222,7 @@ define i1 @ult_umin3(i32 %a, i32 %y) {
define i1 @ult_umin4(i32 %a, i32 %y) {
; CHECK-LABEL: @ult_umin4(
; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i32 [[Y:%.*]], [[X]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%x = add i32 %a, 3 ; thwart complexity-based canonicalization
diff --git a/llvm/test/Transforms/InstCombine/unordered-compare-and-ordered.ll b/llvm/test/Transforms/InstCombine/unordered-compare-and-ordered.ll
index 8ab1f130f1cda..ec015e8ad2aaa 100644
--- a/llvm/test/Transforms/InstCombine/unordered-compare-and-ordered.ll
+++ b/llvm/test/Transforms/InstCombine/unordered-compare-and-ordered.ll
@@ -360,7 +360,7 @@ define i1 @fcmp_ord_and_fneg_ueq(half %x, half %y) {
; CHECK-LABEL: @fcmp_ord_and_fneg_ueq(
; CHECK-NEXT: [[FNEG_X:%.*]] = fneg half [[X:%.*]]
; CHECK-NEXT: [[ORD:%.*]] = fcmp ord half [[X]], 0xH0000
-; CHECK-NEXT: [[UEQ:%.*]] = fcmp ueq half [[FNEG_X]], [[Y:%.*]]
+; CHECK-NEXT: [[UEQ:%.*]] = fcmp ueq half [[Y:%.*]], [[FNEG_X]]
; CHECK-NEXT: [[AND:%.*]] = and i1 [[ORD]], [[UEQ]]
; CHECK-NEXT: ret i1 [[AND]]
;
@@ -389,7 +389,7 @@ define i1 @fcmp_ord_fneg_and_fneg_ueq(half %x, half %y) {
; CHECK-LABEL: @fcmp_ord_fneg_and_fneg_ueq(
; CHECK-NEXT: [[FNEG_X:%.*]] = fneg half [[X:%.*]]
; CHECK-NEXT: [[ORD:%.*]] = fcmp ord half [[X]], 0xH0000
-; CHECK-NEXT: [[UEQ:%.*]] = fcmp ueq half [[FNEG_X]], [[Y:%.*]]
+; CHECK-NEXT: [[UEQ:%.*]] = fcmp ueq half [[Y:%.*]], [[FNEG_X]]
; CHECK-NEXT: [[AND:%.*]] = and i1 [[ORD]], [[UEQ]]
; CHECK-NEXT: ret i1 [[AND]]
;
@@ -405,7 +405,7 @@ define i1 @fcmp_ord_and_fneg_fabs_ueq(half %x, half %y) {
; CHECK-NEXT: [[FABS_X:%.*]] = call half @llvm.fabs.f16(half [[X:%.*]])
; CHECK-NEXT: [[FNEG_FABS_X:%.*]] = fneg half [[FABS_X]]
; CHECK-NEXT: [[ORD:%.*]] = fcmp ord half [[X]], 0xH0000
-; CHECK-NEXT: [[UEQ:%.*]] = fcmp ueq half [[FNEG_FABS_X]], [[Y:%.*]]
+; CHECK-NEXT: [[UEQ:%.*]] = fcmp ueq half [[Y:%.*]], [[FNEG_FABS_X]]
; CHECK-NEXT: [[AND:%.*]] = and i1 [[ORD]], [[UEQ]]
; CHECK-NEXT: ret i1 [[AND]]
;
@@ -451,7 +451,7 @@ define i1 @fcmp_ord_and_copysign_ueq_commute(half %x, half %y, half %z) {
; CHECK-LABEL: @fcmp_ord_and_copysign_ueq_commute(
; CHECK-NEXT: [[COPYSIGN_X_Y:%.*]] = call half @llvm.copysign.f16(half [[X:%.*]], half [[Z:%.*]])
; CHECK-NEXT: [[ORD:%.*]] = fcmp ord half [[X]], 0xH0000
-; CHECK-NEXT: [[UEQ:%.*]] = fcmp ueq half [[COPYSIGN_X_Y]], [[Y:%.*]]
+; CHECK-NEXT: [[UEQ:%.*]] = fcmp ueq half [[Y:%.*]], [[COPYSIGN_X_Y]]
; CHECK-NEXT: [[AND:%.*]] = and i1 [[ORD]], [[UEQ]]
; CHECK-NEXT: ret i1 [[AND]]
;
diff --git a/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-add.ll b/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-add.ll
index c5be9a7b769ce..5a0d283ff8bb6 100644
--- a/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-add.ll
+++ b/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-add.ll
@@ -108,7 +108,7 @@ define i1 @t5_commutative(i8 %x) {
define i1 @t6_no_extrause(i8 %x, i8 %y) {
; CHECK-LABEL: @t6_no_extrause(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = add i8 %x, %y
diff --git a/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-xor.ll b/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-xor.ll
index 1b41f609705ef..17b32670ae9d7 100644
--- a/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-xor.ll
+++ b/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-xor.ll
@@ -15,7 +15,7 @@ define i1 @t0_basic(i8 %x, i8 %y) {
; CHECK-LABEL: @t0_basic(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
@@ -28,7 +28,7 @@ define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @t1_vec(
; CHECK-NEXT: [[T0:%.*]] = xor <2 x i8> [[Y:%.*]], <i8 -1, i8 -1>
; CHECK-NEXT: call void @use2x8(<2 x i8> [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp uge <2 x i8> [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule <2 x i8> [[X:%.*]], [[T0]]
; CHECK-NEXT: ret <2 x i1> [[R]]
;
%t0 = xor <2 x i8> %y, <i8 -1, i8 -1>
@@ -61,7 +61,7 @@ define i1 @t2_commutative(i8 %y) {
define i1 @t3_no_extrause(i8 %x, i8 %y) {
; CHECK-LABEL: @t3_no_extrause(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
@@ -75,7 +75,7 @@ define i1 @n4_wrong_pred0(i8 %x, i8 %y) {
; CHECK-LABEL: @n4_wrong_pred0(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
@@ -88,7 +88,7 @@ define i1 @n5_wrong_pred1(i8 %x, i8 %y) {
; CHECK-LABEL: @n5_wrong_pred1(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
@@ -101,7 +101,7 @@ define i1 @n6_wrong_pred2(i8 %x, i8 %y) {
; CHECK-LABEL: @n6_wrong_pred2(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
@@ -114,7 +114,7 @@ define i1 @n7_wrong_pred3(i8 %x, i8 %y) {
; CHECK-LABEL: @n7_wrong_pred3(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
@@ -127,7 +127,7 @@ define i1 @n8_wrong_pred4(i8 %x, i8 %y) {
; CHECK-LABEL: @n8_wrong_pred4(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
@@ -140,7 +140,7 @@ define i1 @n9_wrong_pred5(i8 %x, i8 %y) {
; CHECK-LABEL: @n9_wrong_pred5(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
@@ -153,7 +153,7 @@ define i1 @n10_wrong_pred6(i8 %x, i8 %y) {
; CHECK-LABEL: @n10_wrong_pred6(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
@@ -166,7 +166,7 @@ define i1 @n11_wrong_pred7(i8 %x, i8 %y) {
; CHECK-LABEL: @n11_wrong_pred7(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
diff --git a/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check.ll b/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check.ll
index e7120a7d01cfa..677ef47456c01 100644
--- a/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check.ll
+++ b/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check.ll
@@ -11,7 +11,7 @@
define i1 @t0_basic(i8 %x, i8 %y) {
; CHECK-LABEL: @t0_basic(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = add i8 %x, %y
@@ -22,7 +22,7 @@ define i1 @t0_basic(i8 %x, i8 %y) {
define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @t1_vec(
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[Y:%.*]], <i8 -1, i8 -1>
-; CHECK-NEXT: [[R:%.*]] = icmp uge <2 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule <2 x i8> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i1> [[R]]
;
%t0 = add <2 x i8> %x, %y
@@ -35,7 +35,7 @@ define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) {
define i1 @t2_symmetry(i8 %x, i8 %y) {
; CHECK-LABEL: @t2_symmetry(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = add i8 %x, %y
@@ -49,7 +49,7 @@ define i1 @t3_commutative(i8 %x) {
; CHECK-LABEL: @t3_commutative(
; CHECK-NEXT: [[Y:%.*]] = call i8 @gen8()
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%y = call i8 @gen8()
@@ -61,7 +61,7 @@ define i1 @t3_commutative(i8 %x) {
define i1 @t4_commutative(i8 %x, i8 %y) {
; CHECK-LABEL: @t4_commutative(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = add i8 %x, %y
@@ -73,7 +73,7 @@ define i1 @t5_commutative(i8 %x) {
; CHECK-LABEL: @t5_commutative(
; CHECK-NEXT: [[Y:%.*]] = call i8 @gen8()
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%y = call i8 @gen8()
diff --git a/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-add.ll b/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-add.ll
index 23b89b7c1e65f..bfdcb8343f2d9 100644
--- a/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-add.ll
+++ b/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-add.ll
@@ -75,7 +75,7 @@ define i1 @t4_commutative(i8 %x, i8 %y) {
; CHECK-LABEL: @t4_commutative(
; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T0]], [[Y]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[Y]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = add i8 %x, %y
@@ -104,7 +104,7 @@ define i1 @t5_commutative(i8 %x) {
define i1 @t6_no_extrause(i8 %x, i8 %y) {
; CHECK-LABEL: @t6_no_extrause(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = add i8 %x, %y
diff --git a/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-xor.ll b/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-xor.ll
index 646bd635807a7..457a0e594b630 100644
--- a/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-xor.ll
+++ b/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-xor.ll
@@ -15,7 +15,7 @@ define i1 @t0_basic(i8 %x, i8 %y) {
; CHECK-LABEL: @t0_basic(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
@@ -28,7 +28,7 @@ define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @t1_vec(
; CHECK-NEXT: [[T0:%.*]] = xor <2 x i8> [[Y:%.*]], <i8 -1, i8 -1>
; CHECK-NEXT: call void @use2x8(<2 x i8> [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp ult <2 x i8> [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt <2 x i8> [[X:%.*]], [[T0]]
; CHECK-NEXT: ret <2 x i1> [[R]]
;
%t0 = xor <2 x i8> %y, <i8 -1, i8 -1>
@@ -61,7 +61,7 @@ define i1 @t2_commutative(i8 %y) {
define i1 @t3_no_extrause(i8 %x, i8 %y) {
; CHECK-LABEL: @t3_no_extrause(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
@@ -75,7 +75,7 @@ define i1 @n4_wrong_pred0(i8 %x, i8 %y) {
; CHECK-LABEL: @n4_wrong_pred0(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
@@ -88,7 +88,7 @@ define i1 @n5_wrong_pred1(i8 %x, i8 %y) {
; CHECK-LABEL: @n5_wrong_pred1(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
@@ -101,7 +101,7 @@ define i1 @n6_wrong_pred2(i8 %x, i8 %y) {
; CHECK-LABEL: @n6_wrong_pred2(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
@@ -114,7 +114,7 @@ define i1 @n7_wrong_pred3(i8 %x, i8 %y) {
; CHECK-LABEL: @n7_wrong_pred3(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
@@ -127,7 +127,7 @@ define i1 @n8_wrong_pred4(i8 %x, i8 %y) {
; CHECK-LABEL: @n8_wrong_pred4(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
@@ -140,7 +140,7 @@ define i1 @n9_wrong_pred5(i8 %x, i8 %y) {
; CHECK-LABEL: @n9_wrong_pred5(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
@@ -153,7 +153,7 @@ define i1 @n10_wrong_pred6(i8 %x, i8 %y) {
; CHECK-LABEL: @n10_wrong_pred6(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
@@ -166,7 +166,7 @@ define i1 @n11_wrong_pred7(i8 %x, i8 %y) {
; CHECK-LABEL: @n11_wrong_pred7(
; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @use8(i8 [[T0]])
-; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[T0]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[X:%.*]], [[T0]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = xor i8 %y, -1
diff --git a/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check.ll b/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check.ll
index 3533c6a54a22a..94966a1eba328 100644
--- a/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check.ll
+++ b/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check.ll
@@ -11,7 +11,7 @@
define i1 @t0_basic(i8 %x, i8 %y) {
; CHECK-LABEL: @t0_basic(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = add i8 %x, %y
@@ -22,7 +22,7 @@ define i1 @t0_basic(i8 %x, i8 %y) {
define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @t1_vec(
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[Y:%.*]], <i8 -1, i8 -1>
-; CHECK-NEXT: [[R:%.*]] = icmp ult <2 x i8> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt <2 x i8> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x i1> [[R]]
;
%t0 = add <2 x i8> %x, %y
@@ -35,7 +35,7 @@ define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) {
define i1 @t2_symmetry(i8 %x, i8 %y) {
; CHECK-LABEL: @t2_symmetry(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[Y:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = add i8 %x, %y
@@ -49,7 +49,7 @@ define i1 @t3_commutative(i8 %x) {
; CHECK-LABEL: @t3_commutative(
; CHECK-NEXT: [[Y:%.*]] = call i8 @gen8()
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%y = call i8 @gen8()
@@ -61,7 +61,7 @@ define i1 @t3_commutative(i8 %x) {
define i1 @t4_commutative(i8 %x, i8 %y) {
; CHECK-LABEL: @t4_commutative(
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = add i8 %x, %y
@@ -73,7 +73,7 @@ define i1 @t5_commutative(i8 %x) {
; CHECK-LABEL: @t5_commutative(
; CHECK-NEXT: [[Y:%.*]] = call i8 @gen8()
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y]], -1
-; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[R]]
;
%y = call i8 @gen8()
diff --git a/llvm/test/Transforms/InstCombine/unsigned-sub-lack-of-overflow-check.ll b/llvm/test/Transforms/InstCombine/unsigned-sub-lack-of-overflow-check.ll
index 500d61ac1b111..e844b321830a1 100644
--- a/llvm/test/Transforms/InstCombine/unsigned-sub-lack-of-overflow-check.ll
+++ b/llvm/test/Transforms/InstCombine/unsigned-sub-lack-of-overflow-check.ll
@@ -30,7 +30,7 @@ define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) {
define i1 @t2_commutative(i8 %x, i8 %y) {
; CHECK-LABEL: @t2_commutative(
-; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = sub i8 %x, %y
diff --git a/llvm/test/Transforms/InstCombine/unsigned-sub-overflow-check.ll b/llvm/test/Transforms/InstCombine/unsigned-sub-overflow-check.ll
index 5b273026dafe7..5f37b1d962345 100644
--- a/llvm/test/Transforms/InstCombine/unsigned-sub-overflow-check.ll
+++ b/llvm/test/Transforms/InstCombine/unsigned-sub-overflow-check.ll
@@ -30,7 +30,7 @@ define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) {
define i1 @t2_commutative(i8 %x, i8 %y) {
; CHECK-LABEL: @t2_commutative(
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret i1 [[R]]
;
%t0 = sub i8 %x, %y
diff --git a/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll b/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll
index fd55a236e0d75..e4b7d425cd3ad 100644
--- a/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll
+++ b/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll
@@ -1163,7 +1163,7 @@ define i4 @common_binop_demand_via_extelt_op0_mismatch_elt1(<2 x i4> %x, <2 x i4
define <2 x i8> @common_binop_demand_via_splat_mask_poison(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @common_binop_demand_via_splat_mask_poison(
; CHECK-NEXT: [[YSPLAT:%.*]] = shufflevector <2 x i8> [[Y:%.*]], <2 x i8> poison, <2 x i32> <i32 0, i32 poison>
-; CHECK-NEXT: [[VV:%.*]] = add <2 x i8> [[YSPLAT]], [[X:%.*]]
+; CHECK-NEXT: [[VV:%.*]] = add <2 x i8> [[X:%.*]], [[YSPLAT]]
; CHECK-NEXT: [[MSPLAT:%.*]] = shufflevector <2 x i8> [[VV]], <2 x i8> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[RES:%.*]] = add <2 x i8> [[VV]], [[MSPLAT]]
; CHECK-NEXT: ret <2 x i8> [[RES]]
@@ -1179,7 +1179,7 @@ define <2 x i8> @common_binop_demand_via_splat_mask_poison(<2 x i8> %x, <2 x i8>
define <2 x i8> @common_binop_demand_via_splat_mask_poison_2(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @common_binop_demand_via_splat_mask_poison_2(
; CHECK-NEXT: [[YSPLAT:%.*]] = shufflevector <2 x i8> [[Y:%.*]], <2 x i8> poison, <2 x i32> <i32 poison, i32 0>
-; CHECK-NEXT: [[VV:%.*]] = add <2 x i8> [[YSPLAT]], [[X:%.*]]
+; CHECK-NEXT: [[VV:%.*]] = add <2 x i8> [[X:%.*]], [[YSPLAT]]
; CHECK-NEXT: [[M:%.*]] = add <2 x i8> [[X]], [[Y]]
; CHECK-NEXT: [[MSPLAT:%.*]] = shufflevector <2 x i8> [[M]], <2 x i8> [[Y]], <2 x i32> <i32 0, i32 2>
; CHECK-NEXT: [[RES:%.*]] = add <2 x i8> [[VV]], [[MSPLAT]]
@@ -1196,7 +1196,7 @@ define <2 x i8> @common_binop_demand_via_splat_mask_poison_2(<2 x i8> %x, <2 x i
define <2 x i8> @common_binop_demand_via_splat_mask_poison_3(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @common_binop_demand_via_splat_mask_poison_3(
; CHECK-NEXT: [[YSPLAT:%.*]] = shufflevector <2 x i8> [[Y:%.*]], <2 x i8> poison, <2 x i32> <i32 poison, i32 0>
-; CHECK-NEXT: [[VV:%.*]] = add <2 x i8> [[YSPLAT]], [[X:%.*]]
+; CHECK-NEXT: [[VV:%.*]] = add <2 x i8> [[X:%.*]], [[YSPLAT]]
; CHECK-NEXT: [[M:%.*]] = add <2 x i8> [[X]], [[Y]]
; CHECK-NEXT: [[MSPLAT:%.*]] = shufflevector <2 x i8> [[M]], <2 x i8> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[RES:%.*]] = add <2 x i8> [[VV]], [[MSPLAT]]
diff --git a/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll b/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll
index ef085d3e7b50b..f325d6d37409e 100644
--- a/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll
@@ -1594,7 +1594,7 @@ define <2 x float> @splat_assoc_fmul(<2 x float> %x, <2 x float> %y) {
define <3 x i8> @splat_assoc_mul(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) {
; CHECK-LABEL: @splat_assoc_mul(
-; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[Z:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[X:%.*]], [[Z:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <3 x i8> [[TMP1]], <3 x i8> poison, <3 x i32> <i32 2, i32 2, i32 2>
; CHECK-NEXT: [[R:%.*]] = mul <3 x i8> [[TMP2]], [[Y:%.*]]
; CHECK-NEXT: ret <3 x i8> [[R]]
@@ -1608,7 +1608,7 @@ define <3 x i8> @splat_assoc_mul(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) {
define <3 x i8> @splat_assoc_mul_undef_elt1(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) {
; CHECK-LABEL: @splat_assoc_mul_undef_elt1(
-; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[Z:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[X:%.*]], [[Z:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <3 x i8> [[TMP1]], <3 x i8> poison, <3 x i32> <i32 2, i32 2, i32 2>
; CHECK-NEXT: [[R:%.*]] = mul <3 x i8> [[TMP2]], [[Y:%.*]]
; CHECK-NEXT: ret <3 x i8> [[R]]
@@ -1624,7 +1624,7 @@ define <3 x i8> @splat_assoc_mul_undef_elt2(<3 x i8> %x, <3 x i8> %y, <3 x i8> %
; CHECK-LABEL: @splat_assoc_mul_undef_elt2(
; CHECK-NEXT: [[SPLATX:%.*]] = shufflevector <3 x i8> [[X:%.*]], <3 x i8> poison, <3 x i32> <i32 2, i32 2, i32 2>
; CHECK-NEXT: [[SPLATZ:%.*]] = shufflevector <3 x i8> [[Z:%.*]], <3 x i8> poison, <3 x i32> <i32 poison, i32 2, i32 2>
-; CHECK-NEXT: [[A:%.*]] = mul nsw <3 x i8> [[SPLATZ]], [[Y:%.*]]
+; CHECK-NEXT: [[A:%.*]] = mul nsw <3 x i8> [[Y:%.*]], [[SPLATZ]]
; CHECK-NEXT: [[R:%.*]] = mul nuw nsw <3 x i8> [[A]], [[SPLATX]]
; CHECK-NEXT: ret <3 x i8> [[R]]
;
@@ -1637,7 +1637,7 @@ define <3 x i8> @splat_assoc_mul_undef_elt2(<3 x i8> %x, <3 x i8> %y, <3 x i8> %
define <3 x i8> @splat_assoc_mul_undef_elt_at_splat_index1(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) {
; CHECK-LABEL: @splat_assoc_mul_undef_elt_at_splat_index1(
-; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[Z:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[X:%.*]], [[Z:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <3 x i8> [[TMP1]], <3 x i8> poison, <3 x i32> <i32 2, i32 2, i32 2>
; CHECK-NEXT: [[R:%.*]] = mul <3 x i8> [[TMP2]], [[Y:%.*]]
; CHECK-NEXT: ret <3 x i8> [[R]]
@@ -1653,7 +1653,7 @@ define <3 x i8> @splat_assoc_mul_undef_elt_at_splat_index2(<3 x i8> %x, <3 x i8>
; CHECK-LABEL: @splat_assoc_mul_undef_elt_at_splat_index2(
; CHECK-NEXT: [[SPLATX:%.*]] = shufflevector <3 x i8> [[X:%.*]], <3 x i8> poison, <3 x i32> <i32 2, i32 2, i32 2>
; CHECK-NEXT: [[SPLATZ:%.*]] = shufflevector <3 x i8> [[Z:%.*]], <3 x i8> poison, <3 x i32> <i32 2, i32 2, i32 poison>
-; CHECK-NEXT: [[A:%.*]] = mul nsw <3 x i8> [[SPLATZ]], [[Y:%.*]]
+; CHECK-NEXT: [[A:%.*]] = mul nsw <3 x i8> [[Y:%.*]], [[SPLATZ]]
; CHECK-NEXT: [[R:%.*]] = mul nuw nsw <3 x i8> [[A]], [[SPLATX]]
; CHECK-NEXT: ret <3 x i8> [[R]]
;
@@ -1670,7 +1670,7 @@ define <3 x i8> @splat_assoc_or(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) {
; CHECK-LABEL: @splat_assoc_or(
; CHECK-NEXT: [[SPLATX:%.*]] = shufflevector <3 x i8> [[X:%.*]], <3 x i8> poison, <3 x i32> <i32 1, i32 1, i32 1>
; CHECK-NEXT: [[SPLATZ:%.*]] = shufflevector <3 x i8> [[Z:%.*]], <3 x i8> poison, <3 x i32> <i32 2, i32 2, i32 2>
-; CHECK-NEXT: [[A:%.*]] = or <3 x i8> [[SPLATZ]], [[Y:%.*]]
+; CHECK-NEXT: [[A:%.*]] = or <3 x i8> [[Y:%.*]], [[SPLATZ]]
; CHECK-NEXT: [[R:%.*]] = or <3 x i8> [[A]], [[SPLATX]]
; CHECK-NEXT: ret <3 x i8> [[R]]
;
@@ -1733,7 +1733,7 @@ define <3 x i32> @splat_assoc_and(<4 x i32> %x, <3 x i32> %y) {
define <5 x i32> @splat_assoc_xor(<4 x i32> %x, <5 x i32> %y) {
; CHECK-LABEL: @splat_assoc_xor(
; CHECK-NEXT: [[SPLATX:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <5 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP1:%.*]] = xor <5 x i32> [[SPLATX]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor <5 x i32> [[Y:%.*]], [[SPLATX]]
; CHECK-NEXT: [[R:%.*]] = xor <5 x i32> [[TMP1]], <i32 42, i32 42, i32 42, i32 42, i32 42>
; CHECK-NEXT: ret <5 x i32> [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/vec_shuffle.ll b/llvm/test/Transforms/InstCombine/vec_shuffle.ll
index 919e30f672e44..0b54b7d60c00e 100644
--- a/llvm/test/Transforms/InstCombine/vec_shuffle.ll
+++ b/llvm/test/Transforms/InstCombine/vec_shuffle.ll
@@ -1604,7 +1604,7 @@ define <2 x float> @splat_assoc_fmul(<2 x float> %x, <2 x float> %y) {
define <3 x i8> @splat_assoc_mul(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) {
; CHECK-LABEL: @splat_assoc_mul(
-; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[Z:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[X:%.*]], [[Z:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <3 x i8> [[TMP1]], <3 x i8> poison, <3 x i32> <i32 2, i32 2, i32 2>
; CHECK-NEXT: [[R:%.*]] = mul <3 x i8> [[TMP2]], [[Y:%.*]]
; CHECK-NEXT: ret <3 x i8> [[R]]
@@ -1618,7 +1618,7 @@ define <3 x i8> @splat_assoc_mul(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) {
define <3 x i8> @splat_assoc_mul_undef_elt1(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) {
; CHECK-LABEL: @splat_assoc_mul_undef_elt1(
-; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[Z:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[X:%.*]], [[Z:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <3 x i8> [[TMP1]], <3 x i8> poison, <3 x i32> <i32 2, i32 2, i32 2>
; CHECK-NEXT: [[R:%.*]] = mul <3 x i8> [[TMP2]], [[Y:%.*]]
; CHECK-NEXT: ret <3 x i8> [[R]]
@@ -1634,7 +1634,7 @@ define <3 x i8> @splat_assoc_mul_undef_elt2(<3 x i8> %x, <3 x i8> %y, <3 x i8> %
; CHECK-LABEL: @splat_assoc_mul_undef_elt2(
; CHECK-NEXT: [[SPLATX:%.*]] = shufflevector <3 x i8> [[X:%.*]], <3 x i8> poison, <3 x i32> <i32 2, i32 2, i32 2>
; CHECK-NEXT: [[SPLATZ:%.*]] = shufflevector <3 x i8> [[Z:%.*]], <3 x i8> poison, <3 x i32> <i32 poison, i32 2, i32 2>
-; CHECK-NEXT: [[A:%.*]] = mul nsw <3 x i8> [[SPLATZ]], [[Y:%.*]]
+; CHECK-NEXT: [[A:%.*]] = mul nsw <3 x i8> [[Y:%.*]], [[SPLATZ]]
; CHECK-NEXT: [[R:%.*]] = mul nuw nsw <3 x i8> [[A]], [[SPLATX]]
; CHECK-NEXT: ret <3 x i8> [[R]]
;
@@ -1647,7 +1647,7 @@ define <3 x i8> @splat_assoc_mul_undef_elt2(<3 x i8> %x, <3 x i8> %y, <3 x i8> %
define <3 x i8> @splat_assoc_mul_undef_elt_at_splat_index1(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) {
; CHECK-LABEL: @splat_assoc_mul_undef_elt_at_splat_index1(
-; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[Z:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[X:%.*]], [[Z:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <3 x i8> [[TMP1]], <3 x i8> poison, <3 x i32> <i32 2, i32 2, i32 2>
; CHECK-NEXT: [[R:%.*]] = mul <3 x i8> [[TMP2]], [[Y:%.*]]
; CHECK-NEXT: ret <3 x i8> [[R]]
@@ -1663,7 +1663,7 @@ define <3 x i8> @splat_assoc_mul_undef_elt_at_splat_index2(<3 x i8> %x, <3 x i8>
; CHECK-LABEL: @splat_assoc_mul_undef_elt_at_splat_index2(
; CHECK-NEXT: [[SPLATX:%.*]] = shufflevector <3 x i8> [[X:%.*]], <3 x i8> poison, <3 x i32> <i32 2, i32 2, i32 2>
; CHECK-NEXT: [[SPLATZ:%.*]] = shufflevector <3 x i8> [[Z:%.*]], <3 x i8> poison, <3 x i32> <i32 2, i32 2, i32 poison>
-; CHECK-NEXT: [[A:%.*]] = mul nsw <3 x i8> [[SPLATZ]], [[Y:%.*]]
+; CHECK-NEXT: [[A:%.*]] = mul nsw <3 x i8> [[Y:%.*]], [[SPLATZ]]
; CHECK-NEXT: [[R:%.*]] = mul nuw nsw <3 x i8> [[A]], [[SPLATX]]
; CHECK-NEXT: ret <3 x i8> [[R]]
;
@@ -1680,7 +1680,7 @@ define <3 x i8> @splat_assoc_or(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) {
; CHECK-LABEL: @splat_assoc_or(
; CHECK-NEXT: [[SPLATX:%.*]] = shufflevector <3 x i8> [[X:%.*]], <3 x i8> poison, <3 x i32> <i32 1, i32 1, i32 1>
; CHECK-NEXT: [[SPLATZ:%.*]] = shufflevector <3 x i8> [[Z:%.*]], <3 x i8> poison, <3 x i32> <i32 2, i32 2, i32 2>
-; CHECK-NEXT: [[A:%.*]] = or <3 x i8> [[SPLATZ]], [[Y:%.*]]
+; CHECK-NEXT: [[A:%.*]] = or <3 x i8> [[Y:%.*]], [[SPLATZ]]
; CHECK-NEXT: [[R:%.*]] = or <3 x i8> [[A]], [[SPLATX]]
; CHECK-NEXT: ret <3 x i8> [[R]]
;
@@ -1743,7 +1743,7 @@ define <3 x i32> @splat_assoc_and(<4 x i32> %x, <3 x i32> %y) {
define <5 x i32> @splat_assoc_xor(<4 x i32> %x, <5 x i32> %y) {
; CHECK-LABEL: @splat_assoc_xor(
; CHECK-NEXT: [[SPLATX:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <5 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP1:%.*]] = xor <5 x i32> [[SPLATX]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor <5 x i32> [[Y:%.*]], [[SPLATX]]
; CHECK-NEXT: [[R:%.*]] = xor <5 x i32> [[TMP1]], <i32 42, i32 42, i32 42, i32 42, i32 42>
; CHECK-NEXT: ret <5 x i32> [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/vector-reverse.ll b/llvm/test/Transforms/InstCombine/vector-reverse.ll
index a1a6ee949a138..c9c68d2241b34 100644
--- a/llvm/test/Transforms/InstCombine/vector-reverse.ll
+++ b/llvm/test/Transforms/InstCombine/vector-reverse.ll
@@ -250,7 +250,7 @@ define <vscale x 4 x i1> @icmp_reverse_splat_RHS(<vscale x 4 x i32> %a, i32 %b)
; CHECK-LABEL: @icmp_reverse_splat_RHS(
; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[B_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt <vscale x 4 x i32> [[B_SPLAT]], [[A:%.*]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt <vscale x 4 x i32> [[A:%.*]], [[B_SPLAT]]
; CHECK-NEXT: [[CMP:%.*]] = call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[CMP1]])
; CHECK-NEXT: ret <vscale x 4 x i1> [[CMP]]
;
diff --git a/llvm/test/Transforms/InstCombine/vector-xor.ll b/llvm/test/Transforms/InstCombine/vector-xor.ll
index 5c96f1a691ed0..13894ef85b5da 100644
--- a/llvm/test/Transforms/InstCombine/vector-xor.ll
+++ b/llvm/test/Transforms/InstCombine/vector-xor.ll
@@ -6,7 +6,7 @@
define <4 x i32> @test_v4i32_xor_repeated_and_0(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; CHECK-LABEL: @test_v4i32_xor_repeated_and_0(
; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i32> [[B:%.*]], [[C:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i32> [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i32> [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret <4 x i32> [[TMP2]]
;
%1 = and <4 x i32> %a, %b
@@ -18,7 +18,7 @@ define <4 x i32> @test_v4i32_xor_repeated_and_0(<4 x i32> %a, <4 x i32> %b, <4 x
define <4 x i32> @test_v4i32_xor_repeated_and_1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; CHECK-LABEL: @test_v4i32_xor_repeated_and_1(
; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i32> [[B:%.*]], [[C:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i32> [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i32> [[A:%.*]], [[TMP1]]
; CHECK-NEXT: ret <4 x i32> [[TMP2]]
;
%1 = and <4 x i32> %a, %b
@@ -69,7 +69,7 @@ define <4 x i32> @test_v4i32_xor_bswap_const_poison(<4 x i32> %a0) {
define <4 x i32> @test_v4i32_demorgan_and(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: @test_v4i32_demorgan_and(
; CHECK-NEXT: [[Y_NOT:%.*]] = xor <4 x i32> [[Y:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1>
-; CHECK-NEXT: [[TMP1:%.*]] = or <4 x i32> [[Y_NOT]], [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = or <4 x i32> [[X:%.*]], [[Y_NOT]]
; CHECK-NEXT: ret <4 x i32> [[TMP1]]
;
%1 = xor <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %x
@@ -83,7 +83,7 @@ define <4 x i32> @test_v4i32_demorgan_and(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @test_v4i32_demorgan_or(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: @test_v4i32_demorgan_or(
; CHECK-NEXT: [[Y_NOT:%.*]] = xor <4 x i32> [[Y:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1>
-; CHECK-NEXT: [[TMP1:%.*]] = and <4 x i32> [[Y_NOT]], [[X:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = and <4 x i32> [[X:%.*]], [[Y_NOT]]
; CHECK-NEXT: ret <4 x i32> [[TMP1]]
;
%1 = xor <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %x
diff --git a/llvm/test/Transforms/InstCombine/widenable-conditions.ll b/llvm/test/Transforms/InstCombine/widenable-conditions.ll
index 0e377c9fa4862..46a93580e9c78 100644
--- a/llvm/test/Transforms/InstCombine/widenable-conditions.ll
+++ b/llvm/test/Transforms/InstCombine/widenable-conditions.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu"
define i1 @test1(i1 %a, i1 %b) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: [[WC:%.*]] = call i1 @llvm.experimental.widenable.condition()
-; CHECK-NEXT: [[LHS:%.*]] = and i1 [[WC]], [[B:%.*]]
+; CHECK-NEXT: [[LHS:%.*]] = and i1 [[B:%.*]], [[WC]]
; CHECK-NEXT: [[AND:%.*]] = and i1 [[LHS]], [[A:%.*]]
; CHECK-NEXT: ret i1 [[AND]]
;
@@ -20,7 +20,7 @@ define i1 @test1(i1 %a, i1 %b) {
define i1 @test1_logical(i1 %a, i1 %b) {
; CHECK-LABEL: @test1_logical(
; CHECK-NEXT: [[WC:%.*]] = call i1 @llvm.experimental.widenable.condition()
-; CHECK-NEXT: [[LHS:%.*]] = and i1 [[WC]], [[B:%.*]]
+; CHECK-NEXT: [[LHS:%.*]] = and i1 [[B:%.*]], [[WC]]
; CHECK-NEXT: [[AND:%.*]] = select i1 [[LHS]], i1 [[A:%.*]], i1 false
; CHECK-NEXT: ret i1 [[AND]]
;
@@ -34,7 +34,7 @@ define i1 @test1_logical(i1 %a, i1 %b) {
define i1 @test1b(i1 %a, i1 %b) {
; CHECK-LABEL: @test1b(
; CHECK-NEXT: [[WC:%.*]] = call i1 @llvm.experimental.widenable.condition()
-; CHECK-NEXT: [[LHS:%.*]] = and i1 [[WC]], [[B:%.*]]
+; CHECK-NEXT: [[LHS:%.*]] = and i1 [[B:%.*]], [[WC]]
; CHECK-NEXT: call void @use(i1 [[LHS]])
; CHECK-NEXT: [[AND:%.*]] = and i1 [[LHS]], [[A:%.*]]
; CHECK-NEXT: ret i1 [[AND]]
@@ -49,7 +49,7 @@ define i1 @test1b(i1 %a, i1 %b) {
define i1 @test1b_logical(i1 %a, i1 %b) {
; CHECK-LABEL: @test1b_logical(
; CHECK-NEXT: [[WC:%.*]] = call i1 @llvm.experimental.widenable.condition()
-; CHECK-NEXT: [[LHS:%.*]] = and i1 [[WC]], [[B:%.*]]
+; CHECK-NEXT: [[LHS:%.*]] = and i1 [[B:%.*]], [[WC]]
; CHECK-NEXT: call void @use(i1 [[LHS]])
; CHECK-NEXT: [[AND:%.*]] = select i1 [[LHS]], i1 [[A:%.*]], i1 false
; CHECK-NEXT: ret i1 [[AND]]
@@ -68,7 +68,7 @@ define i1 @test1c(i1 %a, i1 %b) {
; CHECK-NEXT: call void @use(i1 [[B:%.*]])
; CHECK-NEXT: [[WC:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: call void @use(i1 [[WC]])
-; CHECK-NEXT: [[LHS:%.*]] = and i1 [[WC]], [[B]]
+; CHECK-NEXT: [[LHS:%.*]] = and i1 [[B]], [[WC]]
; CHECK-NEXT: [[AND:%.*]] = and i1 [[LHS]], [[A]]
; CHECK-NEXT: ret i1 [[AND]]
;
@@ -87,7 +87,7 @@ define i1 @test1c_logical(i1 %a, i1 %b) {
; CHECK-NEXT: call void @use(i1 [[B:%.*]])
; CHECK-NEXT: [[WC:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: call void @use(i1 [[WC]])
-; CHECK-NEXT: [[LHS:%.*]] = and i1 [[WC]], [[B]]
+; CHECK-NEXT: [[LHS:%.*]] = and i1 [[B]], [[WC]]
; CHECK-NEXT: [[AND:%.*]] = select i1 [[LHS]], i1 [[A]], i1 false
; CHECK-NEXT: ret i1 [[AND]]
;
@@ -132,7 +132,7 @@ define i1 @test3(i1 %a, i1 %b, i1 %c) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: [[WC:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: [[LHS:%.*]] = and i1 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[RHS:%.*]] = and i1 [[WC]], [[C:%.*]]
+; CHECK-NEXT: [[RHS:%.*]] = and i1 [[C:%.*]], [[WC]]
; CHECK-NEXT: [[AND:%.*]] = and i1 [[LHS]], [[RHS]]
; CHECK-NEXT: ret i1 [[AND]]
;
@@ -147,7 +147,7 @@ define i1 @test3_logical(i1 %a, i1 %b, i1 %c) {
; CHECK-LABEL: @test3_logical(
; CHECK-NEXT: [[WC:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: [[LHS:%.*]] = select i1 [[A:%.*]], i1 [[B:%.*]], i1 false
-; CHECK-NEXT: [[RHS:%.*]] = and i1 [[WC]], [[C:%.*]]
+; CHECK-NEXT: [[RHS:%.*]] = and i1 [[C:%.*]], [[WC]]
; CHECK-NEXT: [[AND:%.*]] = select i1 [[LHS]], i1 [[RHS]], i1 false
; CHECK-NEXT: ret i1 [[AND]]
;
diff --git a/llvm/test/Transforms/InstCombine/xor.ll b/llvm/test/Transforms/InstCombine/xor.ll
index 9a59db40ef8b1..ac97e816a4b69 100644
--- a/llvm/test/Transforms/InstCombine/xor.ll
+++ b/llvm/test/Transforms/InstCombine/xor.ll
@@ -72,8 +72,8 @@ define i32 @test7(i32 %A, i32 %B) {
; CHECK-LABEL: @test7(
; CHECK-NEXT: [[A1:%.*]] = and i32 [[A:%.*]], 7
; CHECK-NEXT: [[B1:%.*]] = and i32 [[B:%.*]], 128
-; CHECK-NEXT: [[C11:%.*]] = or disjoint i32 [[A1]], [[B1]]
-; CHECK-NEXT: ret i32 [[C11]]
+; CHECK-NEXT: [[C1:%.*]] = or disjoint i32 [[A1]], [[B1]]
+; CHECK-NEXT: ret i32 [[C1]]
;
%A1 = and i32 %A, 7
%B1 = and i32 %B, 128
@@ -122,8 +122,8 @@ define <2 x i1> @test9vec(<2 x i8> %a) {
define i8 @test10(i8 %A) {
; CHECK-LABEL: @test10(
; CHECK-NEXT: [[B:%.*]] = and i8 [[A:%.*]], 3
-; CHECK-NEXT: [[C1:%.*]] = or disjoint i8 [[B]], 4
-; CHECK-NEXT: ret i8 [[C1]]
+; CHECK-NEXT: [[C:%.*]] = or disjoint i8 [[B]], 4
+; CHECK-NEXT: ret i8 [[C]]
;
%B = and i8 %A, 3
%C = xor i8 %B, 4
@@ -253,7 +253,7 @@ define i1 @test24(i32 %c, i32 %d) {
define i32 @test25(i32 %g, i32 %h) {
; CHECK-LABEL: @test25(
-; CHECK-NEXT: [[T4:%.*]] = and i32 [[H:%.*]], [[G:%.*]]
+; CHECK-NEXT: [[T4:%.*]] = and i32 [[G:%.*]], [[H:%.*]]
; CHECK-NEXT: ret i32 [[T4]]
;
%h2 = xor i32 %h, -1
@@ -487,7 +487,7 @@ define i32 @or_xor_extra_use(i32 %a, i32 %b, ptr %p) {
; CHECK-LABEL: @or_xor_extra_use(
; CHECK-NEXT: [[O:%.*]] = or i32 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: store i32 [[O]], ptr [[P:%.*]], align 4
-; CHECK-NEXT: [[R:%.*]] = xor i32 [[O]], [[B]]
+; CHECK-NEXT: [[R:%.*]] = xor i32 [[B]], [[O]]
; CHECK-NEXT: ret i32 [[R]]
;
%o = or i32 %a, %b
@@ -572,7 +572,7 @@ define i32 @and_xor_extra_use(i32 %a, i32 %b, ptr %p) {
; CHECK-LABEL: @and_xor_extra_use(
; CHECK-NEXT: [[O:%.*]] = and i32 [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: store i32 [[O]], ptr [[P:%.*]], align 4
-; CHECK-NEXT: [[R:%.*]] = xor i32 [[O]], [[B]]
+; CHECK-NEXT: [[R:%.*]] = xor i32 [[B]], [[O]]
; CHECK-NEXT: ret i32 [[R]]
;
%o = and i32 %a, %b
@@ -773,7 +773,7 @@ define <4 x i32> @test46(<4 x i32> %x) {
define i32 @test47(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @test47(
; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[NOTX]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[Y:%.*]], i32 [[NOTX]])
; CHECK-NEXT: [[UMIN:%.*]] = xor i32 [[UMAX]], -1
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[UMAX]], [[Z:%.*]]
; CHECK-NEXT: [[RES:%.*]] = mul i32 [[ADD]], [[UMIN]]
@@ -988,7 +988,7 @@ define i4 @or_or_xor_use2(i4 %x, i4 %y, i4 %z, ptr %p) {
define i32 @not_is_canonical(i32 %x, i32 %y) {
; CHECK-LABEL: @not_is_canonical(
; CHECK-NEXT: [[SUB:%.*]] = xor i32 [[X:%.*]], -1
-; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SUB]], [[Y:%.*]]
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[Y:%.*]], [[SUB]]
; CHECK-NEXT: [[MUL:%.*]] = shl i32 [[ADD]], 2
; CHECK-NEXT: ret i32 [[MUL]]
;
@@ -1175,7 +1175,7 @@ define <2 x i32> @xor_andn_commute1(<2 x i32> %a, <2 x i32> %b) {
define i33 @xor_andn_commute2(i33 %a, i33 %pb) {
; CHECK-LABEL: @xor_andn_commute2(
; CHECK-NEXT: [[B:%.*]] = udiv i33 42, [[PB:%.*]]
-; CHECK-NEXT: [[Z:%.*]] = or i33 [[B]], [[A:%.*]]
+; CHECK-NEXT: [[Z:%.*]] = or i33 [[A:%.*]], [[B]]
; CHECK-NEXT: ret i33 [[Z]]
;
%b = udiv i33 42, %pb ; thwart complexity-based canonicalization
@@ -1252,7 +1252,7 @@ define i8 @xor_orn_commute1(i8 %pa, i8 %b) {
define i32 @xor_orn_commute2(i32 %a, i32 %pb,ptr %s) {
; CHECK-LABEL: @xor_orn_commute2(
; CHECK-NEXT: [[B:%.*]] = udiv i32 42, [[PB:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[B]], [[A:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], [[B]]
; CHECK-NEXT: [[Z:%.*]] = xor i32 [[TMP1]], -1
; CHECK-NEXT: ret i32 [[Z]]
;
@@ -1268,7 +1268,7 @@ define i32 @xor_orn_commute2_1use(i32 %a, i32 %pb,ptr %s) {
; CHECK-NEXT: [[B:%.*]] = udiv i32 42, [[PB:%.*]]
; CHECK-NEXT: [[NOTA:%.*]] = xor i32 [[A:%.*]], -1
; CHECK-NEXT: store i32 [[NOTA]], ptr [[S:%.*]], align 4
-; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[B]], [[A]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A]], [[B]]
; CHECK-NEXT: [[Z:%.*]] = xor i32 [[TMP1]], -1
; CHECK-NEXT: ret i32 [[Z]]
;
@@ -1321,7 +1321,7 @@ define i32 @xor_orn_2use(i32 %a, i32 %b, ptr %s1, ptr %s2) {
; CHECK-LABEL: @xor_orn_2use(
; CHECK-NEXT: [[NOTA:%.*]] = xor i32 [[A:%.*]], -1
; CHECK-NEXT: store i32 [[NOTA]], ptr [[S1:%.*]], align 4
-; CHECK-NEXT: [[L:%.*]] = or i32 [[NOTA]], [[B:%.*]]
+; CHECK-NEXT: [[L:%.*]] = or i32 [[B:%.*]], [[NOTA]]
; CHECK-NEXT: store i32 [[L]], ptr [[S2:%.*]], align 4
; CHECK-NEXT: [[Z:%.*]] = xor i32 [[L]], [[A]]
; CHECK-NEXT: ret i32 [[Z]]
@@ -1367,7 +1367,7 @@ define <2 x i8> @cttz_pow2(<2 x i8> %x, <2 x i8> %y) {
define i32 @ctlz_pow2_or_zero(i32 %x) {
; CHECK-LABEL: @ctlz_pow2_or_zero(
; CHECK-NEXT: [[N:%.*]] = sub i32 0, [[X:%.*]]
-; CHECK-NEXT: [[A:%.*]] = and i32 [[N]], [[X]]
+; CHECK-NEXT: [[A:%.*]] = and i32 [[X]], [[N]]
; CHECK-NEXT: [[Z:%.*]] = call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 [[A]], i1 false)
; CHECK-NEXT: [[R:%.*]] = xor i32 [[Z]], 31
; CHECK-NEXT: ret i32 [[R]]
@@ -1384,7 +1384,7 @@ define i32 @ctlz_pow2_or_zero(i32 %x) {
define i32 @ctlz_pow2_wrong_const(i32 %x) {
; CHECK-LABEL: @ctlz_pow2_wrong_const(
; CHECK-NEXT: [[N:%.*]] = sub i32 0, [[X:%.*]]
-; CHECK-NEXT: [[A:%.*]] = and i32 [[N]], [[X]]
+; CHECK-NEXT: [[A:%.*]] = and i32 [[X]], [[N]]
; CHECK-NEXT: [[Z:%.*]] = call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 [[A]], i1 true)
; CHECK-NEXT: [[R:%.*]] = xor i32 [[Z]], 30
; CHECK-NEXT: ret i32 [[R]]
diff --git a/llvm/test/Transforms/InstCombine/xor2.ll b/llvm/test/Transforms/InstCombine/xor2.ll
index 7d12a00a8bd51..0b4fca76ed0a7 100644
--- a/llvm/test/Transforms/InstCombine/xor2.ll
+++ b/llvm/test/Transforms/InstCombine/xor2.ll
@@ -36,8 +36,8 @@ define i1 @test1(i32 %A) {
define i32 @test2(i32 %t1) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: [[OVM:%.*]] = and i32 [[T1:%.*]], 32
-; CHECK-NEXT: [[OV1101:%.*]] = or disjoint i32 [[OVM]], 8
-; CHECK-NEXT: ret i32 [[OV1101]]
+; CHECK-NEXT: [[OV110:%.*]] = or disjoint i32 [[OVM]], 8
+; CHECK-NEXT: ret i32 [[OV110]]
;
%ovm = and i32 %t1, 32
%ov3 = add i32 %ovm, 145
@@ -48,8 +48,8 @@ define i32 @test2(i32 %t1) {
define i32 @test3(i32 %t1) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: [[OVM:%.*]] = and i32 [[T1:%.*]], 32
-; CHECK-NEXT: [[OV1101:%.*]] = or disjoint i32 [[OVM]], 8
-; CHECK-NEXT: ret i32 [[OV1101]]
+; CHECK-NEXT: [[OV110:%.*]] = or disjoint i32 [[OVM]], 8
+; CHECK-NEXT: ret i32 [[OV110]]
;
%ovm = or i32 %t1, 145
%ov31 = and i32 %ovm, 177
@@ -99,7 +99,7 @@ define i32 @test6(i32 %x) {
define i32 @test7(i32 %a, i32 %b) {
; CHECK-LABEL: @test7(
; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 [[B:%.*]], -1
-; CHECK-NEXT: [[XOR:%.*]] = or i32 [[B_NOT]], [[A:%.*]]
+; CHECK-NEXT: [[XOR:%.*]] = or i32 [[A:%.*]], [[B_NOT]]
; CHECK-NEXT: ret i32 [[XOR]]
;
%or = or i32 %a, %b
@@ -112,7 +112,7 @@ define i32 @test7(i32 %a, i32 %b) {
define i32 @test8(i32 %a, i32 %b) {
; CHECK-LABEL: @test8(
; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 [[B:%.*]], -1
-; CHECK-NEXT: [[XOR:%.*]] = or i32 [[B_NOT]], [[A:%.*]]
+; CHECK-NEXT: [[XOR:%.*]] = or i32 [[A:%.*]], [[B_NOT]]
; CHECK-NEXT: ret i32 [[XOR]]
;
%neg = xor i32 %a, -1
@@ -233,7 +233,7 @@ define i32 @test11e(i32 %A, i32 %B, i32 %C) {
; CHECK-LABEL: @test11e(
; CHECK-NEXT: [[FORCE:%.*]] = mul i32 [[B:%.*]], [[C:%.*]]
; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[FORCE]], [[A:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[FORCE]], [[A]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A]], [[FORCE]]
; CHECK-NEXT: [[XOR2:%.*]] = xor i32 [[TMP1]], -1
; CHECK-NEXT: [[AND:%.*]] = and i32 [[XOR1]], [[XOR2]]
; CHECK-NEXT: ret i32 [[AND]]
@@ -250,7 +250,7 @@ define i32 @test11f(i32 %A, i32 %B, i32 %C) {
; CHECK-LABEL: @test11f(
; CHECK-NEXT: [[FORCE:%.*]] = mul i32 [[B:%.*]], [[C:%.*]]
; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[FORCE]], [[A:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[FORCE]], [[A]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A]], [[FORCE]]
; CHECK-NEXT: [[XOR2:%.*]] = xor i32 [[TMP1]], -1
; CHECK-NEXT: [[AND:%.*]] = and i32 [[XOR1]], [[XOR2]]
; CHECK-NEXT: ret i32 [[AND]]
@@ -324,7 +324,7 @@ define i32 @test13commuted(i32 %a, i32 %b) {
define i32 @xor_or_xor_common_op_commute1(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: @xor_or_xor_common_op_commute1(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = xor i32 [[TMP2]], [[C:%.*]]
; CHECK-NEXT: ret i32 [[R]]
;
@@ -339,7 +339,7 @@ define i32 @xor_or_xor_common_op_commute1(i32 %a, i32 %b, i32 %c) {
define i32 @xor_or_xor_common_op_commute2(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: @xor_or_xor_common_op_commute2(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = xor i32 [[TMP2]], [[C:%.*]]
; CHECK-NEXT: ret i32 [[R]]
;
@@ -354,7 +354,7 @@ define i32 @xor_or_xor_common_op_commute2(i32 %a, i32 %b, i32 %c) {
define i32 @xor_or_xor_common_op_commute3(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: @xor_or_xor_common_op_commute3(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = xor i32 [[TMP2]], [[C:%.*]]
; CHECK-NEXT: ret i32 [[R]]
;
@@ -369,7 +369,7 @@ define i32 @xor_or_xor_common_op_commute3(i32 %a, i32 %b, i32 %c) {
define i32 @xor_or_xor_common_op_commute4(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: @xor_or_xor_common_op_commute4(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = xor i32 [[TMP2]], [[C:%.*]]
; CHECK-NEXT: ret i32 [[R]]
;
@@ -384,7 +384,7 @@ define i32 @xor_or_xor_common_op_commute4(i32 %a, i32 %b, i32 %c) {
define i32 @xor_or_xor_common_op_commute5(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: @xor_or_xor_common_op_commute5(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = xor i32 [[TMP2]], [[C:%.*]]
; CHECK-NEXT: ret i32 [[R]]
;
@@ -399,7 +399,7 @@ define i32 @xor_or_xor_common_op_commute5(i32 %a, i32 %b, i32 %c) {
define i32 @xor_or_xor_common_op_commute6(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: @xor_or_xor_common_op_commute6(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = xor i32 [[TMP2]], [[C:%.*]]
; CHECK-NEXT: ret i32 [[R]]
;
@@ -414,7 +414,7 @@ define i32 @xor_or_xor_common_op_commute6(i32 %a, i32 %b, i32 %c) {
define i32 @xor_or_xor_common_op_commute7(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: @xor_or_xor_common_op_commute7(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = xor i32 [[TMP2]], [[C:%.*]]
; CHECK-NEXT: ret i32 [[R]]
;
@@ -429,7 +429,7 @@ define i32 @xor_or_xor_common_op_commute7(i32 %a, i32 %b, i32 %c) {
define i32 @xor_or_xor_common_op_commute8(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: @xor_or_xor_common_op_commute8(
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[R:%.*]] = xor i32 [[TMP2]], [[C:%.*]]
; CHECK-NEXT: ret i32 [[R]]
;
diff --git a/llvm/test/Transforms/InstCombine/zext-bool-add-sub.ll b/llvm/test/Transforms/InstCombine/zext-bool-add-sub.ll
index 12739b5686a0a..c9da18d3d88bd 100644
--- a/llvm/test/Transforms/InstCombine/zext-bool-add-sub.ll
+++ b/llvm/test/Transforms/InstCombine/zext-bool-add-sub.ll
@@ -268,7 +268,7 @@ define <2 x i64> @sext_sub_const_vec_poison_elt(<2 x i1> %A) {
define i8 @sext_sub(i8 %x, i1 %y) {
; CHECK-LABEL: @sext_sub(
; CHECK-NEXT: [[SEXT_NEG:%.*]] = zext i1 [[Y:%.*]] to i8
-; CHECK-NEXT: [[SUB:%.*]] = add i8 [[SEXT_NEG]], [[X:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[SEXT_NEG]]
; CHECK-NEXT: ret i8 [[SUB]]
;
%sext = sext i1 %y to i8
@@ -281,7 +281,7 @@ define i8 @sext_sub(i8 %x, i1 %y) {
define <2 x i8> @sext_sub_vec(<2 x i8> %x, <2 x i1> %y) {
; CHECK-LABEL: @sext_sub_vec(
; CHECK-NEXT: [[SEXT_NEG:%.*]] = zext <2 x i1> [[Y:%.*]] to <2 x i8>
-; CHECK-NEXT: [[SUB:%.*]] = add <2 x i8> [[SEXT_NEG]], [[X:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = add <2 x i8> [[X:%.*]], [[SEXT_NEG]]
; CHECK-NEXT: ret <2 x i8> [[SUB]]
;
%sext = sext <2 x i1> %y to <2 x i8>
@@ -294,7 +294,7 @@ define <2 x i8> @sext_sub_vec(<2 x i8> %x, <2 x i1> %y) {
define <2 x i8> @sext_sub_vec_nsw(<2 x i8> %x, <2 x i1> %y) {
; CHECK-LABEL: @sext_sub_vec_nsw(
; CHECK-NEXT: [[SEXT_NEG:%.*]] = zext <2 x i1> [[Y:%.*]] to <2 x i8>
-; CHECK-NEXT: [[SUB:%.*]] = add <2 x i8> [[SEXT_NEG]], [[X:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = add <2 x i8> [[X:%.*]], [[SEXT_NEG]]
; CHECK-NEXT: ret <2 x i8> [[SUB]]
;
%sext = sext <2 x i1> %y to <2 x i8>
@@ -307,7 +307,7 @@ define <2 x i8> @sext_sub_vec_nsw(<2 x i8> %x, <2 x i1> %y) {
define i8 @sext_sub_nuw(i8 %x, i1 %y) {
; CHECK-LABEL: @sext_sub_nuw(
; CHECK-NEXT: [[SEXT_NEG:%.*]] = zext i1 [[Y:%.*]] to i8
-; CHECK-NEXT: [[SUB:%.*]] = add i8 [[SEXT_NEG]], [[X:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[SEXT_NEG]]
; CHECK-NEXT: ret i8 [[SUB]]
;
%sext = sext i1 %y to i8
@@ -318,7 +318,7 @@ define i8 @sext_sub_nuw(i8 %x, i1 %y) {
define i32 @sextbool_add(i1 %c, i32 %x) {
; CHECK-LABEL: @sextbool_add(
; CHECK-NEXT: [[B:%.*]] = sext i1 [[C:%.*]] to i32
-; CHECK-NEXT: [[S:%.*]] = add i32 [[B]], [[X:%.*]]
+; CHECK-NEXT: [[S:%.*]] = add i32 [[X:%.*]], [[B]]
; CHECK-NEXT: ret i32 [[S]]
;
%b = sext i1 %c to i32
@@ -347,7 +347,7 @@ define i32 @sextbool_add_uses(i1 %c, i32 %x) {
; CHECK-LABEL: @sextbool_add_uses(
; CHECK-NEXT: [[B:%.*]] = sext i1 [[C:%.*]] to i32
; CHECK-NEXT: call void @use32(i32 [[B]])
-; CHECK-NEXT: [[S:%.*]] = add i32 [[B]], [[X:%.*]]
+; CHECK-NEXT: [[S:%.*]] = add i32 [[X:%.*]], [[B]]
; CHECK-NEXT: ret i32 [[S]]
;
%b = sext i1 %c to i32
@@ -359,7 +359,7 @@ define i32 @sextbool_add_uses(i1 %c, i32 %x) {
define <4 x i32> @sextbool_add_vector(<4 x i1> %c, <4 x i32> %x) {
; CHECK-LABEL: @sextbool_add_vector(
; CHECK-NEXT: [[B:%.*]] = sext <4 x i1> [[C:%.*]] to <4 x i32>
-; CHECK-NEXT: [[S:%.*]] = add <4 x i32> [[B]], [[X:%.*]]
+; CHECK-NEXT: [[S:%.*]] = add <4 x i32> [[X:%.*]], [[B]]
; CHECK-NEXT: ret <4 x i32> [[S]]
;
%b = sext <4 x i1> %c to <4 x i32>
@@ -394,7 +394,7 @@ define i32 @zextbool_sub_uses(i1 %c, i32 %x) {
define <4 x i32> @zextbool_sub_vector(<4 x i1> %c, <4 x i32> %x) {
; CHECK-LABEL: @zextbool_sub_vector(
; CHECK-NEXT: [[B_NEG:%.*]] = sext <4 x i1> [[C:%.*]] to <4 x i32>
-; CHECK-NEXT: [[S:%.*]] = add <4 x i32> [[B_NEG]], [[X:%.*]]
+; CHECK-NEXT: [[S:%.*]] = add <4 x i32> [[X:%.*]], [[B_NEG]]
; CHECK-NEXT: ret <4 x i32> [[S]]
;
%b = zext <4 x i1> %c to <4 x i32>
diff --git a/llvm/test/Transforms/InstCombine/zext-or-icmp.ll b/llvm/test/Transforms/InstCombine/zext-or-icmp.ll
index a4b74aa8cc7dc..acf547b55722f 100644
--- a/llvm/test/Transforms/InstCombine/zext-or-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/zext-or-icmp.ll
@@ -181,7 +181,7 @@ define i8 @PR49475_infloop(i32 %t0, i16 %insert, i64 %e, i8 %i162) {
; CHECK-NEXT: [[CONV18:%.*]] = ashr exact i64 [[SEXT]], 32
; CHECK-NEXT: [[CMP:%.*]] = icmp sge i64 [[XOR]], [[CONV18]]
; CHECK-NEXT: [[TRUNC44:%.*]] = zext i1 [[CMP]] to i8
-; CHECK-NEXT: [[INC:%.*]] = add i8 [[TRUNC44]], [[I162]]
+; CHECK-NEXT: [[INC:%.*]] = add i8 [[I162]], [[TRUNC44]]
; CHECK-NEXT: [[TOBOOL23_NOT:%.*]] = xor i1 [[CMP]], true
; CHECK-NEXT: call void @llvm.assume(i1 [[TOBOOL23_NOT]])
; CHECK-NEXT: ret i8 [[INC]]
diff --git a/llvm/test/Transforms/InstCombine/zext.ll b/llvm/test/Transforms/InstCombine/zext.ll
index 88cd9c70af40d..7b2cf131c396a 100644
--- a/llvm/test/Transforms/InstCombine/zext.ll
+++ b/llvm/test/Transforms/InstCombine/zext.ll
@@ -546,7 +546,7 @@ define i64 @and_trunc_extra_use1(i64 %x, i32 %y) {
; CHECK-LABEL: @and_trunc_extra_use1(
; CHECK-NEXT: [[T:%.*]] = trunc i64 [[X:%.*]] to i32
; CHECK-NEXT: call void @use32(i32 [[T]])
-; CHECK-NEXT: [[A:%.*]] = and i32 [[T]], [[Y:%.*]]
+; CHECK-NEXT: [[A:%.*]] = and i32 [[Y:%.*]], [[T]]
; CHECK-NEXT: [[Z:%.*]] = zext i32 [[A]] to i64
; CHECK-NEXT: ret i64 [[Z]]
;
@@ -581,7 +581,7 @@ define i64 @and_trunc_extra_use1_commute(i64 %x, i32 %p) {
define i64 @and_trunc_extra_use2(i64 %x, i32 %y) {
; CHECK-LABEL: @and_trunc_extra_use2(
; CHECK-NEXT: [[T:%.*]] = trunc i64 [[X:%.*]] to i32
-; CHECK-NEXT: [[A:%.*]] = and i32 [[T]], [[Y:%.*]]
+; CHECK-NEXT: [[A:%.*]] = and i32 [[Y:%.*]], [[T]]
; CHECK-NEXT: call void @use32(i32 [[A]])
; CHECK-NEXT: [[Z:%.*]] = zext i32 [[A]] to i64
; CHECK-NEXT: ret i64 [[Z]]
@@ -635,7 +635,7 @@ define i64 @and_trunc_extra_use1_wider_src(i65 %x, i32 %y) {
; CHECK-LABEL: @and_trunc_extra_use1_wider_src(
; CHECK-NEXT: [[T:%.*]] = trunc i65 [[X:%.*]] to i32
; CHECK-NEXT: call void @use32(i32 [[T]])
-; CHECK-NEXT: [[A:%.*]] = and i32 [[T]], [[Y:%.*]]
+; CHECK-NEXT: [[A:%.*]] = and i32 [[Y:%.*]], [[T]]
; CHECK-NEXT: [[Z:%.*]] = zext i32 [[A]] to i64
; CHECK-NEXT: ret i64 [[Z]]
;
@@ -782,7 +782,7 @@ define i64 @evaluate_zexted_const_expr(i1 %c) {
define i16 @zext_nneg_flag_drop(i8 %x, i16 %y) {
; CHECK-LABEL: @zext_nneg_flag_drop(
; CHECK-NEXT: [[EXT:%.*]] = zext i8 [[X:%.*]] to i16
-; CHECK-NEXT: [[OR1:%.*]] = or i16 [[EXT]], [[Y:%.*]]
+; CHECK-NEXT: [[OR1:%.*]] = or i16 [[Y:%.*]], [[EXT]]
; CHECK-NEXT: [[OR2:%.*]] = or i16 [[OR1]], 128
; CHECK-NEXT: ret i16 [[OR2]]
;
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll b/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll
index ed8d8e15282d5..6953d6c48694c 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll
@@ -110,7 +110,7 @@ define void @test_pr25490(i32 %n, ptr noalias nocapture %a, ptr noalias nocaptur
; CHECK-NEXT: store i8 [[CONV12]], ptr [[ARRAYIDX8]], align 1
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll
index 6f62f2f2096f1..4768167a9c69f 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll
@@ -7,12 +7,12 @@ define void @cond_inv_load_i32i32i16(ptr noalias nocapture %a, ptr noalias nocap
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[INV:%.*]], i64 0
@@ -31,7 +31,7 @@ define void @cond_inv_load_i32i32i16(ptr noalias nocapture %a, ptr noalias nocap
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -86,12 +86,12 @@ define void @cond_inv_load_f64f64f64(ptr noalias nocapture %a, ptr noalias nocap
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[INV:%.*]], i64 0
@@ -109,7 +109,7 @@ define void @cond_inv_load_f64f64f64(ptr noalias nocapture %a, ptr noalias nocap
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -162,12 +162,12 @@ define void @invariant_load_cond(ptr noalias nocapture %a, ptr nocapture readonl
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -189,7 +189,7 @@ define void @invariant_load_cond(ptr noalias nocapture %a, ptr nocapture readonl
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll
index ce1cfda438170..28a80cf177c1c 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll
@@ -7,12 +7,12 @@ define void @gather_nxv4i32_ind64(ptr noalias nocapture readonly %a, ptr noalias
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -28,7 +28,7 @@ define void @gather_nxv4i32_ind64(ptr noalias nocapture readonly %a, ptr noalias
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -74,12 +74,12 @@ define void @scatter_nxv4i32_ind32(ptr noalias nocapture %a, ptr noalias nocaptu
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -96,7 +96,7 @@ define void @scatter_nxv4i32_ind32(ptr noalias nocapture %a, ptr noalias nocaptu
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -141,12 +141,12 @@ define void @scatter_inv_nxv4i32(ptr noalias nocapture %inv, ptr noalias nocaptu
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[INV:%.*]], i64 0
@@ -162,7 +162,7 @@ define void @scatter_inv_nxv4i32(ptr noalias nocapture %inv, ptr noalias nocaptu
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -211,12 +211,12 @@ define void @gather_inv_nxv4i32(ptr noalias nocapture %a, ptr noalias nocapture
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[INV:%.*]], i64 0
@@ -233,7 +233,7 @@ define void @gather_inv_nxv4i32(ptr noalias nocapture %a, ptr noalias nocapture
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -286,12 +286,12 @@ define void @gather_nxv4i32_ind64_stride2(ptr noalias nocapture %a, ptr noalias
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -8
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 3
; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
@@ -321,7 +321,7 @@ define void @gather_nxv4i32_ind64_stride2(ptr noalias nocapture %a, ptr noalias
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll
index 965c71c008aa1..34fb5bb640471 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll
@@ -16,7 +16,7 @@ define void @cond_ind64(ptr noalias nocapture %a, ptr noalias nocapture readonly
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
index 1853e551806bc..5dfc077129d48 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
@@ -1464,7 +1464,7 @@ define void @PR34743(ptr %a, ptr %b, i64 %n) #1 {
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A]], i64 2
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP5]]
; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[TMP7]], i64 6
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP2]], [[B]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP2]]
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP1]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll
index e3bba1338e1df..81121019efe76 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll
@@ -16,49 +16,49 @@ define void @vector_reverse_f64(i64 %N, ptr noalias %a, ptr noalias %b) #0{
; CHECK: for.body.preheader:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP30:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP31:%.*]] = shl i64 [[TMP30]], 4
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[INDEX]], -1
-; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], [[N]]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds double, ptr [[B:%.*]], i64 [[TMP5]]
-; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 3
-; CHECK-NEXT: [[TMP9:%.*]] = sub i64 1, [[TMP8]]
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP9]]
-; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP12:%.*]] = shl i64 [[TMP11]], 3
-; CHECK-NEXT: [[TMP13:%.*]] = sub i64 0, [[TMP12]]
-; CHECK-NEXT: [[TMP14:%.*]] = sub i64 1, [[TMP12]]
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP13]]
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds double, ptr [[TMP15]], i64 [[TMP14]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x double>, ptr [[TMP10]], align 8
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x double>, ptr [[TMP16]], align 8
-; CHECK-NEXT: [[TMP17:%.*]] = fadd <vscale x 8 x double> [[WIDE_LOAD]], shufflevector (<vscale x 8 x double> insertelement (<vscale x 8 x double> poison, double 1.000000e+00, i64 0), <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP18:%.*]] = fadd <vscale x 8 x double> [[WIDE_LOAD1]], shufflevector (<vscale x 8 x double> insertelement (<vscale x 8 x double> poison, double 1.000000e+00, i64 0), <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[TMP5]]
-; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP21:%.*]] = shl i64 [[TMP20]], 3
-; CHECK-NEXT: [[TMP22:%.*]] = sub i64 1, [[TMP21]]
-; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds double, ptr [[TMP19]], i64 [[TMP22]]
-; CHECK-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP25:%.*]] = shl i64 [[TMP24]], 3
-; CHECK-NEXT: [[TMP26:%.*]] = sub i64 0, [[TMP25]]
-; CHECK-NEXT: [[TMP27:%.*]] = sub i64 1, [[TMP25]]
-; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds double, ptr [[TMP19]], i64 [[TMP26]]
-; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds double, ptr [[TMP28]], i64 [[TMP27]]
-; CHECK-NEXT: store <vscale x 8 x double> [[TMP17]], ptr [[TMP23]], align 8
-; CHECK-NEXT: store <vscale x 8 x double> [[TMP18]], ptr [[TMP29]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP31]]
+; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[INDEX]], -1
+; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[N]], [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds double, ptr [[B:%.*]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP10:%.*]] = shl i64 [[TMP9]], 3
+; CHECK-NEXT: [[TMP11:%.*]] = sub i64 1, [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds double, ptr [[TMP8]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP14:%.*]] = shl i64 [[TMP13]], 3
+; CHECK-NEXT: [[TMP15:%.*]] = sub i64 0, [[TMP14]]
+; CHECK-NEXT: [[TMP16:%.*]] = sub i64 1, [[TMP14]]
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds double, ptr [[TMP8]], i64 [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds double, ptr [[TMP17]], i64 [[TMP16]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x double>, ptr [[TMP12]], align 8
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x double>, ptr [[TMP18]], align 8
+; CHECK-NEXT: [[TMP19:%.*]] = fadd <vscale x 8 x double> [[WIDE_LOAD]], shufflevector (<vscale x 8 x double> insertelement (<vscale x 8 x double> poison, double 1.000000e+00, i64 0), <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP20:%.*]] = fadd <vscale x 8 x double> [[WIDE_LOAD1]], shufflevector (<vscale x 8 x double> insertelement (<vscale x 8 x double> poison, double 1.000000e+00, i64 0), <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP23:%.*]] = shl i64 [[TMP22]], 3
+; CHECK-NEXT: [[TMP24:%.*]] = sub i64 1, [[TMP23]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds double, ptr [[TMP21]], i64 [[TMP24]]
+; CHECK-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP27:%.*]] = shl i64 [[TMP26]], 3
+; CHECK-NEXT: [[TMP28:%.*]] = sub i64 0, [[TMP27]]
+; CHECK-NEXT: [[TMP29:%.*]] = sub i64 1, [[TMP27]]
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds double, ptr [[TMP21]], i64 [[TMP28]]
+; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds double, ptr [[TMP30]], i64 [[TMP29]]
+; CHECK-NEXT: store <vscale x 8 x double> [[TMP19]], ptr [[TMP25]], align 8
+; CHECK-NEXT: store <vscale x 8 x double> [[TMP20]], ptr [[TMP31]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
@@ -112,7 +112,7 @@ define void @vector_reverse_i64(i64 %N, ptr %a, ptr %b) #0 {
; CHECK: for.body.preheader:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
; CHECK: vector.memcheck:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
@@ -125,42 +125,42 @@ define void @vector_reverse_i64(i64 %N, ptr %a, ptr %b) #0 {
; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 4
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP33:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP34:%.*]] = shl i64 [[TMP33]], 4
+; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[INDEX]], -1
-; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], [[N]]
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[TMP8]]
-; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP11:%.*]] = shl i64 [[TMP10]], 3
-; CHECK-NEXT: [[TMP12:%.*]] = sub i64 1, [[TMP11]]
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i64, ptr [[TMP9]], i64 [[TMP12]]
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP15:%.*]] = shl i64 [[TMP14]], 3
-; CHECK-NEXT: [[TMP16:%.*]] = sub i64 0, [[TMP15]]
-; CHECK-NEXT: [[TMP17:%.*]] = sub i64 1, [[TMP15]]
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i64, ptr [[TMP9]], i64 [[TMP16]]
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i64, ptr [[TMP18]], i64 [[TMP17]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i64>, ptr [[TMP13]], align 8
-; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x i64>, ptr [[TMP19]], align 8
-; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 8 x i64> [[WIDE_LOAD]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP21:%.*]] = add <vscale x 8 x i64> [[WIDE_LOAD3]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP8]]
-; CHECK-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP24:%.*]] = shl i64 [[TMP23]], 3
-; CHECK-NEXT: [[TMP25:%.*]] = sub i64 1, [[TMP24]]
-; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i64, ptr [[TMP22]], i64 [[TMP25]]
-; CHECK-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP28:%.*]] = shl i64 [[TMP27]], 3
-; CHECK-NEXT: [[TMP29:%.*]] = sub i64 0, [[TMP28]]
-; CHECK-NEXT: [[TMP30:%.*]] = sub i64 1, [[TMP28]]
-; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds i64, ptr [[TMP22]], i64 [[TMP29]]
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i64, ptr [[TMP31]], i64 [[TMP30]]
-; CHECK-NEXT: store <vscale x 8 x i64> [[TMP20]], ptr [[TMP26]], align 8
-; CHECK-NEXT: store <vscale x 8 x i64> [[TMP21]], ptr [[TMP32]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP34]]
+; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[INDEX]], -1
+; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[N]], [[TMP9]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP13:%.*]] = shl i64 [[TMP12]], 3
+; CHECK-NEXT: [[TMP14:%.*]] = sub i64 1, [[TMP13]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i64, ptr [[TMP11]], i64 [[TMP14]]
+; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP17:%.*]] = shl i64 [[TMP16]], 3
+; CHECK-NEXT: [[TMP18:%.*]] = sub i64 0, [[TMP17]]
+; CHECK-NEXT: [[TMP19:%.*]] = sub i64 1, [[TMP17]]
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i64, ptr [[TMP11]], i64 [[TMP18]]
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr [[TMP20]], i64 [[TMP19]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i64>, ptr [[TMP15]], align 8
+; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x i64>, ptr [[TMP21]], align 8
+; CHECK-NEXT: [[TMP22:%.*]] = add <vscale x 8 x i64> [[WIDE_LOAD]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP23:%.*]] = add <vscale x 8 x i64> [[WIDE_LOAD3]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP10]]
+; CHECK-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP26:%.*]] = shl i64 [[TMP25]], 3
+; CHECK-NEXT: [[TMP27:%.*]] = sub i64 1, [[TMP26]]
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, ptr [[TMP24]], i64 [[TMP27]]
+; CHECK-NEXT: [[TMP29:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP30:%.*]] = shl i64 [[TMP29]], 3
+; CHECK-NEXT: [[TMP31:%.*]] = sub i64 0, [[TMP30]]
+; CHECK-NEXT: [[TMP32:%.*]] = sub i64 1, [[TMP30]]
+; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds i64, ptr [[TMP24]], i64 [[TMP31]]
+; CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds i64, ptr [[TMP33]], i64 [[TMP32]]
+; CHECK-NEXT: store <vscale x 8 x i64> [[TMP22]], ptr [[TMP28]], align 8
+; CHECK-NEXT: store <vscale x 8 x i64> [[TMP23]], ptr [[TMP34]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
index 61105e51cb946..1a267a76aa9b6 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
@@ -19,12 +19,12 @@ define void @widen_ptr_phi_unrolled(ptr noalias nocapture %a, ptr noalias nocapt
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -8
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[N_VEC]], 3
; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 [[TMP3]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
@@ -35,38 +35,38 @@ define void @widen_ptr_phi_unrolled(ptr noalias nocapture %a, ptr noalias nocapt
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 3
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP7:%.*]] = shl nuw nsw i64 [[TMP6]], 5
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[C]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[C]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[C]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP9]], i64 [[TMP7]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP8]], align 4
-; CHECK-NEXT: [[WIDE_VEC2:%.*]] = load <vscale x 8 x i32>, ptr [[TMP10]], align 4
+; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[TMP8]], i64 [[TMP7]]
+; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[NEXT_GEP]], align 4
+; CHECK-NEXT: [[WIDE_VEC3:%.*]] = load <vscale x 8 x i32>, ptr [[NEXT_GEP2]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
-; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC2]])
-; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC3]], 0
-; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC3]], 1
-; CHECK-NEXT: [[TMP15:%.*]] = add nsw <vscale x 4 x i32> [[TMP11]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP16:%.*]] = add nsw <vscale x 4 x i32> [[TMP13]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i64 [[TMP18]], 2
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i64 [[TMP19]]
-; CHECK-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP17]], align 4
-; CHECK-NEXT: store <vscale x 4 x i32> [[TMP16]], ptr [[TMP20]], align 4
-; CHECK-NEXT: [[TMP21:%.*]] = add nsw <vscale x 4 x i32> [[TMP12]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP22:%.*]] = add nsw <vscale x 4 x i32> [[TMP14]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP25:%.*]] = shl nuw nsw i64 [[TMP24]], 2
-; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[TMP23]], i64 [[TMP25]]
-; CHECK-NEXT: store <vscale x 4 x i32> [[TMP21]], ptr [[TMP23]], align 4
-; CHECK-NEXT: store <vscale x 4 x i32> [[TMP22]], ptr [[TMP26]], align 4
+; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
+; CHECK-NEXT: [[STRIDED_VEC4:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC3]])
+; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC4]], 0
+; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC4]], 1
+; CHECK-NEXT: [[TMP13:%.*]] = add nsw <vscale x 4 x i32> [[TMP9]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP14:%.*]] = add nsw <vscale x 4 x i32> [[TMP11]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP17:%.*]] = shl nuw nsw i64 [[TMP16]], 2
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 [[TMP17]]
+; CHECK-NEXT: store <vscale x 4 x i32> [[TMP13]], ptr [[TMP15]], align 4
+; CHECK-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP18]], align 4
+; CHECK-NEXT: [[TMP19:%.*]] = add nsw <vscale x 4 x i32> [[TMP10]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP20:%.*]] = add nsw <vscale x 4 x i32> [[TMP12]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP23:%.*]] = shl nuw nsw i64 [[TMP22]], 2
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i64 [[TMP23]]
+; CHECK-NEXT: store <vscale x 4 x i32> [[TMP19]], ptr [[TMP21]], align 4
+; CHECK-NEXT: store <vscale x 4 x i32> [[TMP20]], ptr [[TMP24]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[C]], [[ENTRY:%.*]] ]
@@ -76,13 +76,13 @@ define void @widen_ptr_phi_unrolled(ptr noalias nocapture %a, ptr noalias nocapt
; CHECK-NEXT: [[PTR_014:%.*]] = phi ptr [ [[INCDEC_PTR1:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[I_013:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i8, ptr [[PTR_014]], i64 4
-; CHECK-NEXT: [[TMP28:%.*]] = load i32, ptr [[PTR_014]], align 4
+; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[PTR_014]], align 4
; CHECK-NEXT: [[INCDEC_PTR1]] = getelementptr inbounds i8, ptr [[PTR_014]], i64 8
-; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[INCDEC_PTR]], align 4
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP28]], 1
+; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[INCDEC_PTR]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP26]], 1
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I_013]]
; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP29]], 1
+; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP27]], 1
; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I_013]]
; CHECK-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX3]], align 4
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_013]], 1
@@ -132,12 +132,12 @@ define void @widen_2ptrs_phi_unrolled(ptr noalias nocapture %dst, ptr noalias no
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -8
-; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]]
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[N_VEC]], 2
; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[SRC:%.*]], i64 [[TMP3]]
; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[N_VEC]], 2
@@ -148,26 +148,26 @@ define void @widen_2ptrs_phi_unrolled(ptr noalias nocapture %dst, ptr noalias no
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX4]]
-; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP10:%.*]] = shl nuw nsw i64 [[TMP9]], 2
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP7]], i64 [[TMP10]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
-; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x i32>, ptr [[TMP11]], align 4
-; CHECK-NEXT: [[TMP12:%.*]] = shl nsw <vscale x 4 x i32> [[WIDE_LOAD]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP13:%.*]] = shl nsw <vscale x 4 x i32> [[WIDE_LOAD5]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP15:%.*]] = shl nuw nsw i64 [[TMP14]], 2
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP8]], i64 [[TMP15]]
-; CHECK-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP8]], align 4
-; CHECK-NEXT: store <vscale x 4 x i32> [[TMP13]], ptr [[TMP16]], align 4
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX5:%.*]] = shl i64 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX5]]
+; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i64 [[TMP8]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[NEXT_GEP]], align 4
+; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 4 x i32>, ptr [[TMP9]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = shl nsw <vscale x 4 x i32> [[WIDE_LOAD]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP11:%.*]] = shl nsw <vscale x 4 x i32> [[WIDE_LOAD8]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP13:%.*]] = shl nuw nsw i64 [[TMP12]], 2
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[NEXT_GEP6]], i64 [[TMP13]]
+; CHECK-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[NEXT_GEP6]], align 4
+; CHECK-NEXT: store <vscale x 4 x i32> [[TMP11]], ptr [[TMP14]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -178,8 +178,8 @@ define void @widen_2ptrs_phi_unrolled(ptr noalias nocapture %dst, ptr noalias no
; CHECK-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[S_010:%.*]] = phi ptr [ [[INCDEC_PTR1:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[D_09:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[S_010]], align 4
-; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP18]], 1
+; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[S_010]], align 4
+; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP16]], 1
; CHECK-NEXT: store i32 [[MUL]], ptr [[D_09]], align 4
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[D_09]], i64 4
; CHECK-NEXT: [[INCDEC_PTR1]] = getelementptr inbounds i8, ptr [[S_010]], i64 4
@@ -239,31 +239,31 @@ define i32 @pointer_iv_mixed(ptr noalias %a, ptr noalias %b, i64 %n) #0 {
; CHECK: vector.body:
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[A]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 3
; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = shl <vscale x 2 x i64> [[TMP9]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 2, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[VECTOR_GEP]]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[B]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 2 x ptr> [[TMP10]], i64 0
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP12]], align 8
-; CHECK-NEXT: [[TMP13]] = add <vscale x 2 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: store <vscale x 2 x ptr> [[TMP10]], ptr [[TMP11]], align 8
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP11:%.*]] = extractelement <vscale x 2 x ptr> [[TMP10]], i64 0
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP12]] = add <vscale x 2 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: store <vscale x 2 x ptr> [[TMP10]], ptr [[NEXT_GEP]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP8]]
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[TMP13]])
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[TMP12]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[A]], [[ENTRY]] ]
; CHECK-NEXT: [[BC_RESUME_VAL3:%.*]] = phi ptr [ [[IND_END2]], [[MIDDLE_BLOCK]] ], [ [[B]], [[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP15]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP14]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
@@ -279,7 +279,7 @@ define i32 @pointer_iv_mixed(ptr noalias %a, ptr noalias %b, i64 %n) #0 {
; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: for.end:
-; CHECK-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR2]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR2]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[VAR5]]
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse-mask4.ll b/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse-mask4.ll
index c22613509be4f..57807604b37a8 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse-mask4.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse-mask4.ll
@@ -32,7 +32,7 @@ define void @vector_reverse_mask_v4i1(ptr noalias %a, ptr noalias %cond, i64 %N)
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = xor i64 [[INDEX]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], [[N]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[N]], [[TMP0]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds double, ptr [[COND:%.*]], i64 [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 -24
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 -56
@@ -47,17 +47,17 @@ define void @vector_reverse_mask_v4i1(ptr noalias %a, ptr noalias %cond, i64 %N)
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP7]], i64 -56
; CHECK-NEXT: [[REVERSE3:%.*]] = shufflevector <4 x i1> [[TMP5]], <4 x i1> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[TMP8]], i32 8, <4 x i1> [[REVERSE3]], <4 x double> poison)
-; CHECK-NEXT: [[REVERSE4:%.*]] = shufflevector <4 x i1> [[TMP6]], <4 x i1> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[WIDE_MASKED_LOAD6:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[TMP9]], i32 8, <4 x i1> [[REVERSE4]], <4 x double> poison)
+; CHECK-NEXT: [[REVERSE5:%.*]] = shufflevector <4 x i1> [[TMP6]], <4 x i1> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[WIDE_MASKED_LOAD6:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[TMP9]], i32 8, <4 x i1> [[REVERSE5]], <4 x double> poison)
; CHECK-NEXT: [[TMP10:%.*]] = fadd <4 x double> [[WIDE_MASKED_LOAD]], <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
; CHECK-NEXT: [[TMP11:%.*]] = fadd <4 x double> [[WIDE_MASKED_LOAD6]], <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
; CHECK-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP10]], ptr [[TMP8]], i32 8, <4 x i1> [[REVERSE3]])
-; CHECK-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP11]], ptr [[TMP9]], i32 8, <4 x i1> [[REVERSE4]])
+; CHECK-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP11]], ptr [[TMP9]], i32 8, <4 x i1> [[REVERSE5]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[N]], [[FOR_BODY_PREHEADER]] ]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-qabs.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-qabs.ll
index 45b84a0b5e856..fec5921720fed 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-qabs.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-qabs.ll
@@ -38,7 +38,7 @@ define void @arm_abs_q7(ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32 %
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[BLOCKSIZE]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[BLOCKSIZE]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[PSRC]], [[WHILE_BODY_PREHEADER]] ]
@@ -118,22 +118,22 @@ define void @arm_abs_q15(ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = shl i32 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRC]], i32 [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = shl i32 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[PDST]], i32 [[TMP4]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 1
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRC]], i32 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX7:%.*]] = shl i32 [[INDEX]], 1
+; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[PDST]], i32 [[OFFSET_IDX7]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2
-; CHECK-NEXT: [[TMP5:%.*]] = icmp sgt <8 x i16> [[WIDE_LOAD]], zeroinitializer
-; CHECK-NEXT: [[TMP6:%.*]] = icmp eq <8 x i16> [[WIDE_LOAD]], <i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768>
-; CHECK-NEXT: [[TMP7:%.*]] = sub <8 x i16> zeroinitializer, [[WIDE_LOAD]]
-; CHECK-NEXT: [[TMP8:%.*]] = select <8 x i1> [[TMP6]], <8 x i16> <i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767>, <8 x i16> [[TMP7]]
-; CHECK-NEXT: [[TMP9:%.*]] = select <8 x i1> [[TMP5]], <8 x i16> [[WIDE_LOAD]], <8 x i16> [[TMP8]]
-; CHECK-NEXT: store <8 x i16> [[TMP9]], ptr [[NEXT_GEP7]], align 2
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt <8 x i16> [[WIDE_LOAD]], zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <8 x i16> [[WIDE_LOAD]], <i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768>
+; CHECK-NEXT: [[TMP5:%.*]] = sub <8 x i16> zeroinitializer, [[WIDE_LOAD]]
+; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[TMP4]], <8 x i16> <i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767, i16 32767>, <8 x i16> [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = select <8 x i1> [[TMP3]], <8 x i16> [[WIDE_LOAD]], <8 x i16> [[TMP6]]
+; CHECK-NEXT: store <8 x i16> [[TMP7]], ptr [[NEXT_GEP8]], align 2
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[BLOCKSIZE]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[BLOCKSIZE]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[PSRC]], [[WHILE_BODY_PREHEADER]] ]
@@ -145,12 +145,12 @@ define void @arm_abs_q15(ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32
; CHECK-NEXT: [[BLKCNT_022:%.*]] = phi i32 [ [[DEC:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL4]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[PDST_ADDR_021:%.*]] = phi ptr [ [[INCDEC_PTR13:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL6]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[PSRC_ADDR_023]], i32 2
-; CHECK-NEXT: [[TMP11:%.*]] = load i16, ptr [[PSRC_ADDR_023]], align 2
-; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i16 [[TMP11]], 0
-; CHECK-NEXT: [[CMP5:%.*]] = icmp eq i16 [[TMP11]], -32768
-; CHECK-NEXT: [[SUB:%.*]] = sub i16 0, [[TMP11]]
+; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[PSRC_ADDR_023]], align 2
+; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i16 [[TMP9]], 0
+; CHECK-NEXT: [[CMP5:%.*]] = icmp eq i16 [[TMP9]], -32768
+; CHECK-NEXT: [[SUB:%.*]] = sub i16 0, [[TMP9]]
; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP5]], i16 32767, i16 [[SUB]]
-; CHECK-NEXT: [[COND11:%.*]] = select i1 [[CMP1]], i16 [[TMP11]], i16 [[COND]]
+; CHECK-NEXT: [[COND11:%.*]] = select i1 [[CMP1]], i16 [[TMP9]], i16 [[COND]]
; CHECK-NEXT: [[INCDEC_PTR13]] = getelementptr inbounds i8, ptr [[PDST_ADDR_021]], i32 2
; CHECK-NEXT: store i16 [[COND11]], ptr [[PDST_ADDR_021]], align 2
; CHECK-NEXT: [[DEC]] = add i32 [[BLKCNT_022]], -1
@@ -213,22 +213,22 @@ define void @arm_abs_q31(ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = shl i32 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRC]], i32 [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = shl i32 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[PDST]], i32 [[TMP4]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRC]], i32 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX7:%.*]] = shl i32 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[PDST]], i32 [[OFFSET_IDX7]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[NEXT_GEP]], align 4
-; CHECK-NEXT: [[TMP5:%.*]] = icmp sgt <4 x i32> [[WIDE_LOAD]], zeroinitializer
-; CHECK-NEXT: [[TMP6:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD]], <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
-; CHECK-NEXT: [[TMP7:%.*]] = sub nsw <4 x i32> zeroinitializer, [[WIDE_LOAD]]
-; CHECK-NEXT: [[TMP8:%.*]] = select <4 x i1> [[TMP6]], <4 x i32> <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>, <4 x i32> [[TMP7]]
-; CHECK-NEXT: [[TMP9:%.*]] = select <4 x i1> [[TMP5]], <4 x i32> [[WIDE_LOAD]], <4 x i32> [[TMP8]]
-; CHECK-NEXT: store <4 x i32> [[TMP9]], ptr [[NEXT_GEP7]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt <4 x i32> [[WIDE_LOAD]], zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD]], <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
+; CHECK-NEXT: [[TMP5:%.*]] = sub nsw <4 x i32> zeroinitializer, [[WIDE_LOAD]]
+; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[TMP4]], <4 x i32> <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>, <4 x i32> [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = select <4 x i1> [[TMP3]], <4 x i32> [[WIDE_LOAD]], <4 x i32> [[TMP6]]
+; CHECK-NEXT: store <4 x i32> [[TMP7]], ptr [[NEXT_GEP8]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[BLOCKSIZE]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[BLOCKSIZE]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[PSRC]], [[WHILE_BODY_PREHEADER]] ]
@@ -240,12 +240,12 @@ define void @arm_abs_q31(ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32
; CHECK-NEXT: [[BLKCNT_016:%.*]] = phi i32 [ [[DEC:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL4]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[PDST_ADDR_015:%.*]] = phi ptr [ [[INCDEC_PTR7:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL6]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[PSRC_ADDR_017]], i32 4
-; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[PSRC_ADDR_017]], align 4
-; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP11]], 0
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[TMP11]], -2147483648
-; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 0, [[TMP11]]
+; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[PSRC_ADDR_017]], align 4
+; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP9]], 0
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[TMP9]], -2147483648
+; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 0, [[TMP9]]
; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP2]], i32 2147483647, i32 [[SUB]]
-; CHECK-NEXT: [[COND6:%.*]] = select i1 [[CMP1]], i32 [[TMP11]], i32 [[COND]]
+; CHECK-NEXT: [[COND6:%.*]] = select i1 [[CMP1]], i32 [[TMP9]], i32 [[COND]]
; CHECK-NEXT: [[INCDEC_PTR7]] = getelementptr inbounds i8, ptr [[PDST_ADDR_015]], i32 4
; CHECK-NEXT: store i32 [[COND6]], ptr [[PDST_ADDR_015]], align 4
; CHECK-NEXT: [[DEC]] = add i32 [[BLKCNT_016]], -1
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
index 18caa9cc16f35..a7cb5c61ca550 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
@@ -67,7 +67,7 @@ define i64 @add_i32_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
@@ -132,7 +132,7 @@ define i64 @add_i16_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
@@ -197,7 +197,7 @@ define i64 @add_i8_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
@@ -582,7 +582,7 @@ define i64 @mla_i32_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
@@ -658,7 +658,7 @@ define i64 @mla_i16_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
@@ -738,7 +738,7 @@ define i64 @mla_i8_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i32
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
@@ -1197,7 +1197,7 @@ define i64 @red_mla_ext_s16_u16_s64(ptr noalias nocapture readonly %A, ptr noali
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll
index 6953834335669..d904c50f3bf9c 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll
@@ -30,35 +30,35 @@ define float @test(ptr nocapture readonly %pA, ptr nocapture readonly %pB, i32 %
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PA]], i32 [[TMP2]]
-; CHECK-NEXT: [[TMP3:%.*]] = shl i32 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[PB]], i32 [[TMP3]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PA]], i32 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX5:%.*]] = shl i32 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[PB]], i32 [[OFFSET_IDX5]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[NEXT_GEP]], align 4
-; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[NEXT_GEP5]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = fcmp fast oeq <4 x float> [[WIDE_LOAD]], zeroinitializer
-; CHECK-NEXT: [[TMP5:%.*]] = fcmp fast oeq <4 x float> [[WIDE_LOAD6]], zeroinitializer
-; CHECK-NEXT: [[DOTNOT8:%.*]] = select <4 x i1> [[TMP4]], <4 x i1> [[TMP5]], <4 x i1> zeroinitializer
-; CHECK-NEXT: [[TMP6:%.*]] = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP7:%.*]] = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> [[WIDE_LOAD6]])
-; CHECK-NEXT: [[TMP8:%.*]] = fadd fast <4 x float> [[TMP7]], [[TMP6]]
-; CHECK-NEXT: [[TMP9:%.*]] = fsub fast <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD6]]
-; CHECK-NEXT: [[TMP10:%.*]] = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> [[TMP9]])
-; CHECK-NEXT: [[TMP11:%.*]] = fdiv fast <4 x float> [[TMP10]], [[TMP8]]
-; CHECK-NEXT: [[TMP12:%.*]] = fadd fast <4 x float> [[TMP11]], [[VEC_PHI]]
-; CHECK-NEXT: [[PREDPHI]] = select <4 x i1> [[DOTNOT8]], <4 x float> [[VEC_PHI]], <4 x float> [[TMP12]]
+; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[NEXT_GEP6]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fcmp fast oeq <4 x float> [[WIDE_LOAD]], zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = fcmp fast oeq <4 x float> [[WIDE_LOAD7]], zeroinitializer
+; CHECK-NEXT: [[DOTNOT9:%.*]] = select <4 x i1> [[TMP2]], <4 x i1> [[TMP3]], <4 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> [[WIDE_LOAD]])
+; CHECK-NEXT: [[TMP5:%.*]] = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> [[WIDE_LOAD7]])
+; CHECK-NEXT: [[TMP6:%.*]] = fadd fast <4 x float> [[TMP5]], [[TMP4]]
+; CHECK-NEXT: [[TMP7:%.*]] = fsub fast <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD7]]
+; CHECK-NEXT: [[TMP8:%.*]] = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> [[TMP7]])
+; CHECK-NEXT: [[TMP9:%.*]] = fdiv fast <4 x float> [[TMP8]], [[TMP6]]
+; CHECK-NEXT: [[TMP10:%.*]] = fadd fast <4 x float> [[TMP9]], [[VEC_PHI]]
+; CHECK-NEXT: [[PREDPHI]] = select <4 x i1> [[DOTNOT9]], <4 x float> [[VEC_PHI]], <4 x float> [[TMP10]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
-; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[TMP14:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[PREDPHI]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[BLOCKSIZE]]
+; CHECK-NEXT: [[TMP12:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[PREDPHI]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[BLOCKSIZE]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[PA]], [[WHILE_BODY_PREHEADER]] ]
; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi ptr [ [[IND_END1]], [[MIDDLE_BLOCK]] ], [ [[PB]], [[WHILE_BODY_PREHEADER]] ]
; CHECK-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[IND_END3]], [[MIDDLE_BLOCK]] ], [ [[BLOCKSIZE]], [[WHILE_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP14]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[WHILE_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP12]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[WHILE_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK: while.body:
; CHECK-NEXT: [[PA_ADDR_020:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
@@ -66,20 +66,20 @@ define float @test(ptr nocapture readonly %pA, ptr nocapture readonly %pB, i32 %
; CHECK-NEXT: [[BLOCKSIZE_ADDR_018:%.*]] = phi i32 [ [[DEC:%.*]], [[IF_END]] ], [ [[BC_RESUME_VAL4]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[ACCUM_017:%.*]] = phi float [ [[ACCUM_1:%.*]], [[IF_END]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[PA_ADDR_020]], i32 4
-; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[PA_ADDR_020]], align 4
+; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[PA_ADDR_020]], align 4
; CHECK-NEXT: [[INCDEC_PTR1]] = getelementptr inbounds i8, ptr [[PB_ADDR_019]], i32 4
-; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[PB_ADDR_019]], align 4
-; CHECK-NEXT: [[CMP2:%.*]] = fcmp fast une float [[TMP15]], 0.000000e+00
-; CHECK-NEXT: [[CMP3:%.*]] = fcmp fast une float [[TMP16]], 0.000000e+00
+; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[PB_ADDR_019]], align 4
+; CHECK-NEXT: [[CMP2:%.*]] = fcmp fast une float [[TMP13]], 0.000000e+00
+; CHECK-NEXT: [[CMP3:%.*]] = fcmp fast une float [[TMP14]], 0.000000e+00
; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[CMP2]], i1 true, i1 [[CMP3]]
; CHECK-NEXT: br i1 [[OR_COND]], label [[IF_THEN:%.*]], label [[IF_END]]
; CHECK: if.then:
-; CHECK-NEXT: [[TMP17:%.*]] = tail call fast float @llvm.fabs.f32(float [[TMP15]])
-; CHECK-NEXT: [[TMP18:%.*]] = tail call fast float @llvm.fabs.f32(float [[TMP16]])
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP18]], [[TMP17]]
-; CHECK-NEXT: [[SUB:%.*]] = fsub fast float [[TMP15]], [[TMP16]]
-; CHECK-NEXT: [[TMP19:%.*]] = tail call fast float @llvm.fabs.f32(float [[SUB]])
-; CHECK-NEXT: [[DIV:%.*]] = fdiv fast float [[TMP19]], [[ADD]]
+; CHECK-NEXT: [[TMP15:%.*]] = tail call fast float @llvm.fabs.f32(float [[TMP13]])
+; CHECK-NEXT: [[TMP16:%.*]] = tail call fast float @llvm.fabs.f32(float [[TMP14]])
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP16]], [[TMP15]]
+; CHECK-NEXT: [[SUB:%.*]] = fsub fast float [[TMP13]], [[TMP14]]
+; CHECK-NEXT: [[TMP17:%.*]] = tail call fast float @llvm.fabs.f32(float [[SUB]])
+; CHECK-NEXT: [[DIV:%.*]] = fdiv fast float [[TMP17]], [[ADD]]
; CHECK-NEXT: [[ADD4:%.*]] = fadd fast float [[DIV]], [[ACCUM_017]]
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
@@ -88,7 +88,7 @@ define float @test(ptr nocapture readonly %pA, ptr nocapture readonly %pB, i32 %
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[DEC]], 0
; CHECK-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END]], label [[WHILE_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: while.end:
-; CHECK-NEXT: [[ACCUM_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[ACCUM_1]], [[IF_END]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[ACCUM_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[ACCUM_1]], [[IF_END]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret float [[ACCUM_0_LCSSA]]
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/pointer_iv.ll b/llvm/test/Transforms/LoopVectorize/ARM/pointer_iv.ll
index 2269b774d9f31..3432773b4e1b3 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/pointer_iv.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/pointer_iv.ll
@@ -12,16 +12,16 @@ define hidden void @pointer_phi_v4i32_add1(ptr noalias nocapture readonly %A, pt
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 [[TMP0]]
-; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B:%.*]], i32 [[TMP1]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i32 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B:%.*]], i32 [[OFFSET_IDX4]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[NEXT_GEP]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[NEXT_GEP4]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <4 x i32> [[TMP0]], ptr [[NEXT_GEP5]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
-; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
-; CHECK-NEXT: br i1 [[TMP3]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
+; CHECK-NEXT: br i1 [[TMP1]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: end:
; CHECK-NEXT: ret void
;
@@ -53,24 +53,24 @@ define hidden void @pointer_phi_v4i32_add2(ptr noalias nocapture readonly %A, pt
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[INDEX]], 3
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[TMP0]]
-; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i32 [[TMP1]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 3
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i32 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B]], i32 [[OFFSET_IDX4]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[NEXT_GEP]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[STRIDED_VEC]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[NEXT_GEP4]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[STRIDED_VEC]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <4 x i32> [[TMP0]], ptr [[NEXT_GEP5]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
-; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 996
-; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 996
+; CHECK-NEXT: br i1 [[TMP1]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: for.body:
; CHECK-NEXT: [[A_ADDR_09:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 996, [[VECTOR_BODY]] ]
; CHECK-NEXT: [[B_ADDR_07:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END2]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR_09]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_09]], align 4
; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_09]], i32 8
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[Y]]
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[Y]]
; CHECK-NEXT: store i32 [[ADD]], ptr [[B_ADDR_07]], align 4
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[B_ADDR_07]], i32 4
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
@@ -109,22 +109,22 @@ define hidden void @pointer_phi_v4i32_add3(ptr noalias nocapture readonly %A, pt
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[A]], [[ENTRY:%.*]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i32> <i32 0, i32 12, i32 24, i32 36>
-; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i32 [[TMP1]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i32 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[TMP0]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> poison)
-; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[NEXT_GEP]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <4 x i32> [[TMP1]], ptr [[NEXT_GEP]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i32 48
-; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 996
-; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 996
+; CHECK-NEXT: br i1 [[TMP2]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: for.body:
; CHECK-NEXT: [[A_ADDR_09:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 996, [[VECTOR_BODY]] ]
; CHECK-NEXT: [[B_ADDR_07:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END2]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR_09]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_ADDR_09]], align 4
; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_09]], i32 12
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[Y]]
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[Y]]
; CHECK-NEXT: store i32 [[ADD]], ptr [[B_ADDR_07]], align 4
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[B_ADDR_07]], i32 4
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
@@ -160,16 +160,16 @@ define hidden void @pointer_phi_v8i16_add1(ptr noalias nocapture readonly %A, pt
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 [[TMP1]]
-; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B:%.*]], i32 [[TMP2]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 1
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i32 [[INDEX]], 1
+; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B:%.*]], i32 [[OFFSET_IDX4]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2
-; CHECK-NEXT: [[TMP3:%.*]] = add <8 x i16> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <8 x i16> [[TMP3]], ptr [[NEXT_GEP4]], align 2
+; CHECK-NEXT: [[TMP1:%.*]] = add <8 x i16> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <8 x i16> [[TMP1]], ptr [[NEXT_GEP5]], align 2
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
-; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
-; CHECK-NEXT: br i1 [[TMP4]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
+; CHECK-NEXT: br i1 [[TMP2]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: end:
; CHECK-NEXT: ret void
;
@@ -203,17 +203,17 @@ define hidden void @pointer_phi_v8i16_add2(ptr noalias nocapture readonly %A, pt
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[TMP1]]
-; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i32 [[TMP2]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i32 [[INDEX]], 1
+; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B]], i32 [[OFFSET_IDX4]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x i16>, ptr [[NEXT_GEP]], align 2
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i16> [[WIDE_VEC]], <16 x i16> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-; CHECK-NEXT: [[TMP3:%.*]] = add <8 x i16> [[STRIDED_VEC]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <8 x i16> [[TMP3]], ptr [[NEXT_GEP4]], align 2
+; CHECK-NEXT: [[TMP1:%.*]] = add <8 x i16> [[STRIDED_VEC]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <8 x i16> [[TMP1]], ptr [[NEXT_GEP5]], align 2
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
-; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], 992
-; CHECK-NEXT: br i1 [[TMP4]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 992
+; CHECK-NEXT: br i1 [[TMP2]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: for.body:
; CHECK-NEXT: [[A_ADDR_011:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[I_010:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 992, [[VECTOR_BODY]] ]
@@ -352,23 +352,23 @@ define hidden void @pointer_phi_v16i8_add2(ptr noalias nocapture readonly %A, pt
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[TMP1]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 1
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[OFFSET_IDX]]
; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <32 x i8>, ptr [[NEXT_GEP]], align 1
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <32 x i8> [[WIDE_VEC]], <32 x i8> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
-; CHECK-NEXT: [[TMP2:%.*]] = add <16 x i8> [[STRIDED_VEC]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[NEXT_GEP4]], align 1
+; CHECK-NEXT: [[TMP1:%.*]] = add <16 x i8> [[STRIDED_VEC]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr [[NEXT_GEP4]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
-; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 992
-; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 992
+; CHECK-NEXT: br i1 [[TMP2]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: for.body:
; CHECK-NEXT: [[A_ADDR_010:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[I_09:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 992, [[VECTOR_BODY]] ]
; CHECK-NEXT: [[B_ADDR_08:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END2]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[A_ADDR_010]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[A_ADDR_010]], align 1
; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_010]], i32 2
-; CHECK-NEXT: [[CONV1:%.*]] = add i8 [[TMP4]], [[TMP0]]
+; CHECK-NEXT: [[CONV1:%.*]] = add i8 [[TMP3]], [[TMP0]]
; CHECK-NEXT: store i8 [[CONV1]], ptr [[B_ADDR_08]], align 1
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[B_ADDR_08]], i32 1
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_09]], 1
@@ -445,16 +445,16 @@ define hidden void @pointer_phi_v4f32_add1(ptr noalias nocapture readonly %A, pt
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 [[TMP0]]
-; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B:%.*]], i32 [[TMP1]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i32 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B:%.*]], i32 [[OFFSET_IDX4]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[NEXT_GEP]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <4 x float> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <4 x float> [[TMP2]], ptr [[NEXT_GEP4]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = fadd fast <4 x float> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <4 x float> [[TMP0]], ptr [[NEXT_GEP5]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
-; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
-; CHECK-NEXT: br i1 [[TMP3]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
+; CHECK-NEXT: br i1 [[TMP1]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: end:
; CHECK-NEXT: ret void
;
@@ -486,24 +486,24 @@ define hidden void @pointer_phi_v4f32_add2(ptr noalias nocapture readonly %A, pt
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[INDEX]], 3
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[TMP0]]
-; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i32 [[TMP1]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 3
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i32 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B]], i32 [[OFFSET_IDX4]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x float>, ptr [[NEXT_GEP]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x float> [[WIDE_VEC]], <8 x float> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <4 x float> [[STRIDED_VEC]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <4 x float> [[TMP2]], ptr [[NEXT_GEP4]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = fadd fast <4 x float> [[STRIDED_VEC]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <4 x float> [[TMP0]], ptr [[NEXT_GEP5]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
-; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 996
-; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 996
+; CHECK-NEXT: br i1 [[TMP1]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: for.body:
; CHECK-NEXT: [[A_ADDR_09:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 996, [[VECTOR_BODY]] ]
; CHECK-NEXT: [[B_ADDR_07:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END2]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[A_ADDR_09]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[A_ADDR_09]], align 4
; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_09]], i32 8
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP4]], [[Y]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP2]], [[Y]]
; CHECK-NEXT: store float [[ADD]], ptr [[B_ADDR_07]], align 4
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[B_ADDR_07]], i32 4
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
@@ -542,22 +542,22 @@ define hidden void @pointer_phi_v4f32_add3(ptr noalias nocapture readonly %A, pt
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[A]], [[ENTRY:%.*]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i32> <i32 0, i32 12, i32 24, i32 36>
-; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i32 [[TMP1]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i32 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> [[TMP0]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> poison)
-; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <4 x float> [[WIDE_MASKED_GATHER]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <4 x float> [[TMP2]], ptr [[NEXT_GEP]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = fadd fast <4 x float> [[WIDE_MASKED_GATHER]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <4 x float> [[TMP1]], ptr [[NEXT_GEP]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i32 48
-; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 996
-; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 996
+; CHECK-NEXT: br i1 [[TMP2]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: for.body:
; CHECK-NEXT: [[A_ADDR_09:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 996, [[VECTOR_BODY]] ]
; CHECK-NEXT: [[B_ADDR_07:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END2]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[A_ADDR_09]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[A_ADDR_09]], align 4
; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_09]], i32 12
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP4]], [[Y]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP3]], [[Y]]
; CHECK-NEXT: store float [[ADD]], ptr [[B_ADDR_07]], align 4
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[B_ADDR_07]], i32 4
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
@@ -592,16 +592,16 @@ define hidden void @pointer_phi_v4half_add1(ptr noalias nocapture readonly %A, p
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 [[TMP0]]
-; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B:%.*]], i32 [[TMP1]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 1
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i32 [[INDEX]], 1
+; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B:%.*]], i32 [[OFFSET_IDX4]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x half>, ptr [[NEXT_GEP]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <8 x half> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <8 x half> [[TMP2]], ptr [[NEXT_GEP4]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = fadd fast <8 x half> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <8 x half> [[TMP0]], ptr [[NEXT_GEP5]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
-; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
-; CHECK-NEXT: br i1 [[TMP3]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
+; CHECK-NEXT: br i1 [[TMP1]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK: end:
; CHECK-NEXT: ret void
;
@@ -633,24 +633,24 @@ define hidden void @pointer_phi_v4half_add2(ptr noalias nocapture readonly %A, p
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[TMP0]]
-; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i32 [[TMP1]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i32 [[INDEX]], 1
+; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B]], i32 [[OFFSET_IDX4]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x half>, ptr [[NEXT_GEP]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x half> [[WIDE_VEC]], <16 x half> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <8 x half> [[STRIDED_VEC]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <8 x half> [[TMP2]], ptr [[NEXT_GEP4]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = fadd fast <8 x half> [[STRIDED_VEC]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <8 x half> [[TMP0]], ptr [[NEXT_GEP5]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
-; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 992
-; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 992
+; CHECK-NEXT: br i1 [[TMP1]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: for.body:
; CHECK-NEXT: [[A_ADDR_09:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 992, [[VECTOR_BODY]] ]
; CHECK-NEXT: [[B_ADDR_07:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END2]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP4:%.*]] = load half, ptr [[A_ADDR_09]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load half, ptr [[A_ADDR_09]], align 4
; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_09]], i32 4
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast half [[TMP4]], [[Y]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast half [[TMP2]], [[Y]]
; CHECK-NEXT: store half [[ADD]], ptr [[B_ADDR_07]], align 4
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[B_ADDR_07]], i32 2
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
@@ -687,24 +687,24 @@ define hidden void @pointer_phi_v4half_add3(ptr noalias nocapture readonly %A, p
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = mul i32 [[INDEX]], 6
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[TMP0]]
-; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i32 [[TMP1]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i32 [[INDEX]], 6
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i32 [[INDEX]], 1
+; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B]], i32 [[OFFSET_IDX4]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <24 x half>, ptr [[NEXT_GEP]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <24 x half> [[WIDE_VEC]], <24 x half> poison, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
-; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <8 x half> [[STRIDED_VEC]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <8 x half> [[TMP2]], ptr [[NEXT_GEP4]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = fadd fast <8 x half> [[STRIDED_VEC]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <8 x half> [[TMP0]], ptr [[NEXT_GEP5]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
-; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 992
-; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 992
+; CHECK-NEXT: br i1 [[TMP1]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK: for.body:
; CHECK-NEXT: [[A_ADDR_09:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 992, [[VECTOR_BODY]] ]
; CHECK-NEXT: [[B_ADDR_07:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END2]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP4:%.*]] = load half, ptr [[A_ADDR_09]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load half, ptr [[A_ADDR_09]], align 4
; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_09]], i32 6
-; CHECK-NEXT: [[ADD:%.*]] = fadd fast half [[TMP4]], [[Y]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast half [[TMP2]], [[Y]]
; CHECK-NEXT: store half [[ADD]], ptr [[B_ADDR_07]], align 4
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[B_ADDR_07]], i32 2
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
@@ -747,28 +747,28 @@ define hidden void @pointer_phi_v4i32_uf2(ptr noalias nocapture readonly %A, ptr
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i32> <i32 0, i32 24, i32 48, i32 72>
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i32> <i32 96, i32 120, i32 144, i32 168>
-; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i32 [[TMP2]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i32 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[TMP0]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> poison)
; CHECK-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[TMP1]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> poison)
-; CHECK-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER5]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16
-; CHECK-NEXT: store <4 x i32> [[TMP3]], ptr [[NEXT_GEP]], align 4
-; CHECK-NEXT: store <4 x i32> [[TMP4]], ptr [[TMP5]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER5]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16
+; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[NEXT_GEP]], align 4
+; CHECK-NEXT: store <4 x i32> [[TMP3]], ptr [[TMP4]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i32 192
-; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 9992
-; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 9992
+; CHECK-NEXT: br i1 [[TMP5]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[A_ADDR_08:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[I_07:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 9992, [[VECTOR_BODY]] ]
; CHECK-NEXT: [[B_ADDR_06:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END2]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[A_ADDR_08]], align 4
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[A_ADDR_08]], align 4
; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_08]], i32 24
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], [[Y]]
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[Y]]
; CHECK-NEXT: store i32 [[ADD]], ptr [[B_ADDR_06]], align 4
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[B_ADDR_06]], i32 4
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_07]], 1
@@ -814,36 +814,36 @@ define hidden void @pointer_phi_v4i32_uf4(ptr noalias nocapture readonly %A, ptr
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i32> <i32 96, i32 120, i32 144, i32 168>
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i32> <i32 192, i32 216, i32 240, i32 264>
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i32> <i32 288, i32 312, i32 336, i32 360>
-; CHECK-NEXT: [[TMP4:%.*]] = shl i32 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i32 [[TMP4]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i32 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[TMP0]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> poison)
; CHECK-NEXT: [[WIDE_MASKED_GATHER7:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[TMP1]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> poison)
; CHECK-NEXT: [[WIDE_MASKED_GATHER8:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[TMP2]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> poison)
; CHECK-NEXT: [[WIDE_MASKED_GATHER9:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[TMP3]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> poison)
-; CHECK-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER7]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER8]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER9]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 32
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 48
-; CHECK-NEXT: store <4 x i32> [[TMP5]], ptr [[NEXT_GEP]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER7]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER8]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER9]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 32
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 48
+; CHECK-NEXT: store <4 x i32> [[TMP4]], ptr [[NEXT_GEP]], align 4
+; CHECK-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP8]], align 4
; CHECK-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP9]], align 4
; CHECK-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP10]], align 4
-; CHECK-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP11]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i32 384
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], 9984
-; CHECK-NEXT: br i1 [[TMP12]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], 9984
+; CHECK-NEXT: br i1 [[TMP11]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[A_ADDR_08:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[I_07:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 9984, [[VECTOR_BODY]] ]
; CHECK-NEXT: [[B_ADDR_06:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END2]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[A_ADDR_08]], align 4
+; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[A_ADDR_08]], align 4
; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_08]], i32 24
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[Y]]
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[Y]]
; CHECK-NEXT: store i32 [[ADD]], ptr [[B_ADDR_06]], align 4
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[B_ADDR_06]], i32 4
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_07]], 1
@@ -875,8 +875,8 @@ define hidden void @mult_ptr_iv(ptr noalias nocapture readonly %x, ptr noalias n
; CHECK-NEXT: entry:
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[Z:%.*]], i32 3000
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[X:%.*]], i32 3000
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[Z]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[X]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[Z]], [[SCEVGEP1]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[X]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/tail-fold-multiple-icmps.ll b/llvm/test/Transforms/LoopVectorize/ARM/tail-fold-multiple-icmps.ll
index f58d864e1e147..7db5bccd896b2 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/tail-fold-multiple-icmps.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/tail-fold-multiple-icmps.ll
@@ -16,41 +16,41 @@ define arm_aapcs_vfpcc i32 @minmaxval4(ptr nocapture readonly %x, ptr nocapture
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>, [[VECTOR_PH]] ], [ [[TMP1:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
-; CHECK-NEXT: [[TMP2]] = call <4 x i32> @llvm.smax.v4i32(<4 x i32> [[WIDE_LOAD]], <4 x i32> [[VEC_PHI1]])
-; CHECK-NEXT: [[TMP3]] = call <4 x i32> @llvm.smin.v4i32(<4 x i32> [[WIDE_LOAD]], <4 x i32> [[VEC_PHI]])
+; CHECK-NEXT: [[TMP1]] = call <4 x i32> @llvm.smax.v4i32(<4 x i32> [[WIDE_LOAD]], <4 x i32> [[VEC_PHI1]])
+; CHECK-NEXT: [[TMP2]] = call <4 x i32> @llvm.smin.v4i32(<4 x i32> [[WIDE_LOAD]], <4 x i32> [[VEC_PHI]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
-; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP2]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[TMP2]])
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP1]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ 2147483647, [[FOR_BODY_PREHEADER]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP4]], [[MIDDLE_BLOCK]] ], [ 2147483647, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: [[BC_MERGE_RDX2:%.*]] = phi i32 [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ -2147483648, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: [[MAX_0_LCSSA:%.*]] = phi i32 [ -2147483648, [[ENTRY:%.*]] ], [ [[TMP8:%.*]], [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: [[MIN_0_LCSSA:%.*]] = phi i32 [ 2147483647, [[ENTRY]] ], [ [[TMP9:%.*]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[MAX_0_LCSSA:%.*]] = phi i32 [ -2147483648, [[ENTRY:%.*]] ], [ [[COND:%.*]], [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[MIN_0_LCSSA:%.*]] = phi i32 [ 2147483647, [[ENTRY]] ], [ [[COND9:%.*]], [[FOR_BODY]] ], [ [[TMP4]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: store i32 [[MIN_0_LCSSA]], ptr [[MINP:%.*]], align 4
; CHECK-NEXT: ret i32 [[MAX_0_LCSSA]]
; CHECK: for.body:
; CHECK-NEXT: [[I_029:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[MIN_028:%.*]] = phi i32 [ [[TMP9]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[MAX_027:%.*]] = phi i32 [ [[TMP8]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX2]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[MIN_028:%.*]] = phi i32 [ [[COND9]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[MAX_027:%.*]] = phi i32 [ [[COND]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX2]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_029]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[TMP8]] = call i32 @llvm.smax.i32(i32 [[TMP7]], i32 [[MAX_027]])
-; CHECK-NEXT: [[TMP9]] = call i32 @llvm.smin.i32(i32 [[TMP7]], i32 [[MIN_028]])
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[COND]] = call i32 @llvm.smax.i32(i32 [[TMP6]], i32 [[MAX_027]])
+; CHECK-NEXT: [[COND9]] = call i32 @llvm.smin.i32(i32 [[TMP6]], i32 [[MIN_028]])
; CHECK-NEXT: [[INC]] = add nuw i32 [[I_029]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
;
entry:
%cmp26.not = icmp eq i32 %N, 0
diff --git a/llvm/test/Transforms/LoopVectorize/X86/invariant-load-gather.ll b/llvm/test/Transforms/LoopVectorize/X86/invariant-load-gather.ll
index 8783326b1ef1a..9f9db3ad85991 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/invariant-load-gather.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/invariant-load-gather.ll
@@ -15,8 +15,8 @@ define i32 @inv_load_conditional(ptr %a, i64 %n, ptr %b, i32 %k) {
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]]
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[B]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[VEC_EPILOG_SCALAR_PH]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]]
; CHECK: vector.main.loop.iter.check:
diff --git a/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll b/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll
index 6575c5a288f21..a00d24f16a3c6 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll
@@ -18,8 +18,8 @@ define i32 @inv_val_store_to_inv_address_with_reduction(ptr %a, i64 %n, ptr %b)
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]]
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[A]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[B]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[VEC_EPILOG_SCALAR_PH]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]]
; CHECK: vector.main.loop.iter.check:
@@ -132,8 +132,8 @@ define void @inv_val_store_to_inv_address_conditional(ptr %a, i64 %n, ptr %b, i3
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]]
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[B]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[VEC_EPILOG_SCALAR_PH]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]]
; CHECK: vector.main.loop.iter.check:
@@ -245,15 +245,15 @@ define void @variant_val_store_to_inv_address_conditional(ptr %a, i64 %n, ptr %b
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]]
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4
; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 [[TMP0]]
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[B]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
-; CHECK-NEXT: [[BOUND03:%.*]] = icmp ugt ptr [[SCEVGEP2]], [[B]]
-; CHECK-NEXT: [[BOUND14:%.*]] = icmp ugt ptr [[SCEVGEP]], [[C]]
+; CHECK-NEXT: [[BOUND03:%.*]] = icmp ult ptr [[B]], [[SCEVGEP2]]
+; CHECK-NEXT: [[BOUND14:%.*]] = icmp ult ptr [[C]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT5:%.*]] = and i1 [[BOUND03]], [[BOUND14]]
; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]]
-; CHECK-NEXT: [[BOUND06:%.*]] = icmp ugt ptr [[SCEVGEP2]], [[A]]
-; CHECK-NEXT: [[BOUND17:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[C]]
+; CHECK-NEXT: [[BOUND06:%.*]] = icmp ult ptr [[A]], [[SCEVGEP2]]
+; CHECK-NEXT: [[BOUND17:%.*]] = icmp ult ptr [[C]], [[SCEVGEP1]]
; CHECK-NEXT: [[FOUND_CONFLICT8:%.*]] = and i1 [[BOUND06]], [[BOUND17]]
; CHECK-NEXT: [[CONFLICT_RDX9:%.*]] = or i1 [[CONFLICT_RDX]], [[FOUND_CONFLICT8]]
; CHECK-NEXT: br i1 [[CONFLICT_RDX9]], label [[VEC_EPILOG_SCALAR_PH]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/extract-last-veclane.ll b/llvm/test/Transforms/LoopVectorize/extract-last-veclane.ll
index fe6d9b3ec690e..47636b2c66d29 100644
--- a/llvm/test/Transforms/LoopVectorize/extract-last-veclane.ll
+++ b/llvm/test/Transforms/LoopVectorize/extract-last-veclane.ll
@@ -21,7 +21,7 @@ define void @inv_store_last_lane(ptr noalias nocapture %a, ptr noalias nocapture
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP1]], i64 3
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -82,7 +82,7 @@ define float @ret_last_lane(ptr noalias nocapture %a, ptr noalias nocapture read
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x float> [[TMP1]], i64 3
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
diff --git a/llvm/test/Transforms/LoopVectorize/float-induction.ll b/llvm/test/Transforms/LoopVectorize/float-induction.ll
index bd658c31768a8..bf1905bf33487 100644
--- a/llvm/test/Transforms/LoopVectorize/float-induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/float-induction.ll
@@ -66,7 +66,7 @@ define void @fp_iv_loop1_fast_FMF(float %init, ptr noalias nocapture %A, i32 %N)
; VEC4_INTERL1-NEXT: [[ADD]] = fsub fast float [[X_05]], [[FPINC]]
; VEC4_INTERL1-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC4_INTERL1-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC4_INTERL1-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; VEC4_INTERL1: for.end.loopexit:
; VEC4_INTERL1-NEXT: br label [[FOR_END]]
@@ -124,7 +124,7 @@ define void @fp_iv_loop1_fast_FMF(float %init, ptr noalias nocapture %A, i32 %N)
; VEC4_INTERL2-NEXT: [[ADD]] = fsub fast float [[X_05]], [[FPINC]]
; VEC4_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC4_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC4_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; VEC4_INTERL2: for.end.loopexit:
; VEC4_INTERL2-NEXT: br label [[FOR_END]]
@@ -175,7 +175,7 @@ define void @fp_iv_loop1_fast_FMF(float %init, ptr noalias nocapture %A, i32 %N)
; VEC1_INTERL2-NEXT: [[ADD]] = fsub fast float [[X_05]], [[FPINC]]
; VEC1_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC1_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC1_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; VEC1_INTERL2: for.end.loopexit:
; VEC1_INTERL2-NEXT: br label [[FOR_END]]
@@ -226,7 +226,7 @@ define void @fp_iv_loop1_fast_FMF(float %init, ptr noalias nocapture %A, i32 %N)
; VEC2_INTERL1_PRED_STORE-NEXT: [[ADD]] = fsub fast float [[X_05]], [[FPINC]]
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC2_INTERL1_PRED_STORE-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC2_INTERL1_PRED_STORE-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; VEC2_INTERL1_PRED_STORE: for.end:
; VEC2_INTERL1_PRED_STORE-NEXT: ret void
@@ -313,7 +313,7 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, ptr noalias nocapture %A, i32
; VEC4_INTERL1-NEXT: [[ADD]] = fsub reassoc float [[X_05]], [[FPINC]]
; VEC4_INTERL1-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC4_INTERL1-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC4_INTERL1-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; VEC4_INTERL1: for.end.loopexit:
; VEC4_INTERL1-NEXT: br label [[FOR_END]]
@@ -371,7 +371,7 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, ptr noalias nocapture %A, i32
; VEC4_INTERL2-NEXT: [[ADD]] = fsub reassoc float [[X_05]], [[FPINC]]
; VEC4_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC4_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC4_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; VEC4_INTERL2: for.end.loopexit:
; VEC4_INTERL2-NEXT: br label [[FOR_END]]
@@ -424,7 +424,7 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, ptr noalias nocapture %A, i32
; VEC1_INTERL2-NEXT: [[ADD]] = fsub reassoc float [[X_05]], [[FPINC]]
; VEC1_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC1_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC1_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; VEC1_INTERL2: for.end.loopexit:
; VEC1_INTERL2-NEXT: br label [[FOR_END]]
@@ -475,7 +475,7 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, ptr noalias nocapture %A, i32
; VEC2_INTERL1_PRED_STORE-NEXT: [[ADD]] = fsub reassoc float [[X_05]], [[FPINC]]
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC2_INTERL1_PRED_STORE-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC2_INTERL1_PRED_STORE-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; VEC2_INTERL1_PRED_STORE: for.end:
; VEC2_INTERL1_PRED_STORE-NEXT: ret void
@@ -528,7 +528,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 {
; VEC4_INTERL1-NEXT: [[N_VEC:%.*]] = and i64 [[TMP0]], 2147483644
; VEC4_INTERL1-NEXT: [[DOTCAST:%.*]] = uitofp nneg i64 [[N_VEC]] to float
; VEC4_INTERL1-NEXT: [[TMP1:%.*]] = fmul fast float [[DOTCAST]], 5.000000e-01
-; VEC4_INTERL1-NEXT: [[IND_END:%.*]] = fadd fast float [[TMP1]], [[INIT:%.*]]
+; VEC4_INTERL1-NEXT: [[IND_END:%.*]] = fadd fast float [[INIT:%.*]], [[TMP1]]
; VEC4_INTERL1-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0
; VEC4_INTERL1-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
; VEC4_INTERL1-NEXT: [[INDUCTION:%.*]] = fadd fast <4 x float> [[DOTSPLAT]], <float 0.000000e+00, float 5.000000e-01, float 1.000000e+00, float 1.500000e+00>
@@ -557,7 +557,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 {
; VEC4_INTERL1-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01
; VEC4_INTERL1-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC4_INTERL1-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC4_INTERL1-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; VEC4_INTERL1: for.end.loopexit:
; VEC4_INTERL1-NEXT: br label [[FOR_END]]
@@ -576,7 +576,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 {
; VEC4_INTERL2-NEXT: [[N_VEC:%.*]] = and i64 [[TMP0]], 2147483640
; VEC4_INTERL2-NEXT: [[DOTCAST:%.*]] = uitofp nneg i64 [[N_VEC]] to float
; VEC4_INTERL2-NEXT: [[TMP1:%.*]] = fmul fast float [[DOTCAST]], 5.000000e-01
-; VEC4_INTERL2-NEXT: [[IND_END:%.*]] = fadd fast float [[TMP1]], [[INIT:%.*]]
+; VEC4_INTERL2-NEXT: [[IND_END:%.*]] = fadd fast float [[INIT:%.*]], [[TMP1]]
; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0
; VEC4_INTERL2-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
; VEC4_INTERL2-NEXT: [[INDUCTION:%.*]] = fadd fast <4 x float> [[DOTSPLAT]], <float 0.000000e+00, float 5.000000e-01, float 1.000000e+00, float 1.500000e+00>
@@ -608,7 +608,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 {
; VEC4_INTERL2-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01
; VEC4_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC4_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC4_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; VEC4_INTERL2: for.end.loopexit:
; VEC4_INTERL2-NEXT: br label [[FOR_END]]
@@ -627,14 +627,14 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 {
; VEC1_INTERL2-NEXT: [[N_VEC:%.*]] = and i64 [[TMP0]], 2147483646
; VEC1_INTERL2-NEXT: [[DOTCAST:%.*]] = uitofp nneg i64 [[N_VEC]] to float
; VEC1_INTERL2-NEXT: [[TMP1:%.*]] = fmul fast float [[DOTCAST]], 5.000000e-01
-; VEC1_INTERL2-NEXT: [[IND_END:%.*]] = fadd fast float [[TMP1]], [[INIT:%.*]]
+; VEC1_INTERL2-NEXT: [[IND_END:%.*]] = fadd fast float [[INIT:%.*]], [[TMP1]]
; VEC1_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]]
; VEC1_INTERL2: vector.body:
; VEC1_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC1_INTERL2-NEXT: [[TMP2:%.*]] = or disjoint i64 [[INDEX]], 1
; VEC1_INTERL2-NEXT: [[DOTCAST2:%.*]] = sitofp i64 [[INDEX]] to float
; VEC1_INTERL2-NEXT: [[TMP3:%.*]] = fmul fast float [[DOTCAST2]], 5.000000e-01
-; VEC1_INTERL2-NEXT: [[OFFSET_IDX:%.*]] = fadd fast float [[TMP3]], [[INIT]]
+; VEC1_INTERL2-NEXT: [[OFFSET_IDX:%.*]] = fadd fast float [[INIT]], [[TMP3]]
; VEC1_INTERL2-NEXT: [[TMP4:%.*]] = fadd fast float [[OFFSET_IDX]], 5.000000e-01
; VEC1_INTERL2-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
; VEC1_INTERL2-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP2]]
@@ -658,7 +658,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 {
; VEC1_INTERL2-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01
; VEC1_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC1_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC1_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; VEC1_INTERL2: for.end.loopexit:
; VEC1_INTERL2-NEXT: br label [[FOR_END]]
@@ -677,7 +677,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 {
; VEC2_INTERL1_PRED_STORE-NEXT: [[N_VEC:%.*]] = and i64 [[TMP0]], 2147483646
; VEC2_INTERL1_PRED_STORE-NEXT: [[DOTCAST:%.*]] = uitofp nneg i64 [[N_VEC]] to float
; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP1:%.*]] = fmul fast float [[DOTCAST]], 5.000000e-01
-; VEC2_INTERL1_PRED_STORE-NEXT: [[IND_END:%.*]] = fadd fast float [[TMP1]], [[INIT:%.*]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[IND_END:%.*]] = fadd fast float [[INIT:%.*]], [[TMP1]]
; VEC2_INTERL1_PRED_STORE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x float> poison, float [[INIT]], i64 0
; VEC2_INTERL1_PRED_STORE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x float> [[DOTSPLATINSERT]], <2 x float> poison, <2 x i32> zeroinitializer
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDUCTION:%.*]] = fadd fast <2 x float> [[DOTSPLAT]], <float 0.000000e+00, float 5.000000e-01>
@@ -702,7 +702,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 {
; VEC2_INTERL1_PRED_STORE-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC2_INTERL1_PRED_STORE-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC2_INTERL1_PRED_STORE-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; VEC2_INTERL1_PRED_STORE: for.end:
; VEC2_INTERL1_PRED_STORE-NEXT: ret void
@@ -763,7 +763,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC4_INTERL1-NEXT: [[IND_END:%.*]] = fadd fast float [[TMP2]], 0x3FB99999A0000000
; VEC4_INTERL1-NEXT: [[DOTCAST2:%.*]] = uitofp nneg i64 [[N_VEC]] to float
; VEC4_INTERL1-NEXT: [[TMP3:%.*]] = fmul fast float [[TMP0]], [[DOTCAST2]]
-; VEC4_INTERL1-NEXT: [[IND_END3:%.*]] = fadd fast float [[TMP3]], [[INIT:%.*]]
+; VEC4_INTERL1-NEXT: [[IND_END3:%.*]] = fadd fast float [[INIT:%.*]], [[TMP3]]
; VEC4_INTERL1-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0
; VEC4_INTERL1-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
; VEC4_INTERL1-NEXT: [[DOTSPLATINSERT5:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0
@@ -817,7 +817,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC4_INTERL1-NEXT: store float [[CONV1]], ptr [[ARRAYIDX6]], align 4
; VEC4_INTERL1-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC4_INTERL1-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC4_INTERL1-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; VEC4_INTERL1: for.end.loopexit:
; VEC4_INTERL1-NEXT: br label [[FOR_END]]
@@ -840,7 +840,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC4_INTERL2-NEXT: [[IND_END:%.*]] = fadd fast float [[TMP2]], 0x3FB99999A0000000
; VEC4_INTERL2-NEXT: [[DOTCAST2:%.*]] = uitofp nneg i64 [[N_VEC]] to float
; VEC4_INTERL2-NEXT: [[TMP3:%.*]] = fmul fast float [[TMP0]], [[DOTCAST2]]
-; VEC4_INTERL2-NEXT: [[IND_END3:%.*]] = fadd fast float [[TMP3]], [[INIT:%.*]]
+; VEC4_INTERL2-NEXT: [[IND_END3:%.*]] = fadd fast float [[INIT:%.*]], [[TMP3]]
; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0
; VEC4_INTERL2-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT6:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0
@@ -904,7 +904,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC4_INTERL2-NEXT: store float [[CONV1]], ptr [[ARRAYIDX6]], align 4
; VEC4_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC4_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC4_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; VEC4_INTERL2: for.end.loopexit:
; VEC4_INTERL2-NEXT: br label [[FOR_END]]
@@ -927,7 +927,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC1_INTERL2-NEXT: [[IND_END:%.*]] = fadd fast float [[TMP2]], 0x3FB99999A0000000
; VEC1_INTERL2-NEXT: [[DOTCAST2:%.*]] = uitofp nneg i64 [[N_VEC]] to float
; VEC1_INTERL2-NEXT: [[TMP3:%.*]] = fmul fast float [[TMP0]], [[DOTCAST2]]
-; VEC1_INTERL2-NEXT: [[IND_END3:%.*]] = fadd fast float [[TMP3]], [[INIT:%.*]]
+; VEC1_INTERL2-NEXT: [[IND_END3:%.*]] = fadd fast float [[INIT:%.*]], [[TMP3]]
; VEC1_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]]
; VEC1_INTERL2: vector.body:
; VEC1_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -936,7 +936,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC1_INTERL2-NEXT: [[TMP5:%.*]] = fmul fast float [[DOTCAST5]], -5.000000e-01
; VEC1_INTERL2-NEXT: [[DOTCAST6:%.*]] = sitofp i64 [[INDEX]] to float
; VEC1_INTERL2-NEXT: [[TMP6:%.*]] = fmul fast float [[TMP0]], [[DOTCAST6]]
-; VEC1_INTERL2-NEXT: [[OFFSET_IDX7:%.*]] = fadd fast float [[TMP6]], [[INIT]]
+; VEC1_INTERL2-NEXT: [[OFFSET_IDX7:%.*]] = fadd fast float [[INIT]], [[TMP6]]
; VEC1_INTERL2-NEXT: [[TMP7:%.*]] = fadd fast float [[OFFSET_IDX7]], [[TMP0]]
; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
; VEC1_INTERL2-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP4]]
@@ -982,7 +982,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC1_INTERL2-NEXT: store float [[CONV1]], ptr [[ARRAYIDX6]], align 4
; VEC1_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC1_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC1_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; VEC1_INTERL2: for.end.loopexit:
; VEC1_INTERL2-NEXT: br label [[FOR_END]]
@@ -1005,7 +1005,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC2_INTERL1_PRED_STORE-NEXT: [[IND_END:%.*]] = fadd fast float [[TMP2]], 0x3FB99999A0000000
; VEC2_INTERL1_PRED_STORE-NEXT: [[DOTCAST2:%.*]] = uitofp nneg i64 [[N_VEC]] to float
; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP3:%.*]] = fmul fast float [[TMP0]], [[DOTCAST2]]
-; VEC2_INTERL1_PRED_STORE-NEXT: [[IND_END3:%.*]] = fadd fast float [[TMP3]], [[INIT:%.*]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[IND_END3:%.*]] = fadd fast float [[INIT:%.*]], [[TMP3]]
; VEC2_INTERL1_PRED_STORE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x float> poison, float [[INIT]], i64 0
; VEC2_INTERL1_PRED_STORE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x float> [[DOTSPLATINSERT]], <2 x float> poison, <2 x i32> zeroinitializer
; VEC2_INTERL1_PRED_STORE-NEXT: [[DOTSPLATINSERT5:%.*]] = insertelement <2 x float> poison, float [[TMP0]], i64 0
@@ -1054,7 +1054,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca
; VEC2_INTERL1_PRED_STORE-NEXT: store float [[CONV1]], ptr [[ARRAYIDX6]], align 4
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC2_INTERL1_PRED_STORE-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC2_INTERL1_PRED_STORE-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; VEC2_INTERL1_PRED_STORE: for.end:
; VEC2_INTERL1_PRED_STORE-NEXT: ret void
@@ -1141,7 +1141,7 @@ define void @fp_iv_loop4(ptr noalias nocapture %A, i32 %N) {
; VEC4_INTERL1-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01
; VEC4_INTERL1-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC4_INTERL1-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC4_INTERL1-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; VEC4_INTERL1: for.end.loopexit:
; VEC4_INTERL1-NEXT: br label [[FOR_END]]
@@ -1189,7 +1189,7 @@ define void @fp_iv_loop4(ptr noalias nocapture %A, i32 %N) {
; VEC4_INTERL2-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01
; VEC4_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC4_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC4_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; VEC4_INTERL2: for.end.loopexit:
; VEC4_INTERL2-NEXT: br label [[FOR_END]]
@@ -1239,7 +1239,7 @@ define void @fp_iv_loop4(ptr noalias nocapture %A, i32 %N) {
; VEC1_INTERL2-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01
; VEC1_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC1_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC1_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; VEC1_INTERL2: for.end.loopexit:
; VEC1_INTERL2-NEXT: br label [[FOR_END]]
@@ -1280,7 +1280,7 @@ define void @fp_iv_loop4(ptr noalias nocapture %A, i32 %N) {
; VEC2_INTERL1_PRED_STORE-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01
; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; VEC2_INTERL1_PRED_STORE-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; VEC2_INTERL1_PRED_STORE-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; VEC2_INTERL1_PRED_STORE: for.end:
; VEC2_INTERL1_PRED_STORE-NEXT: ret void
diff --git a/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll b/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll
index b55c4214ec599..d19ca172a8c0a 100644
--- a/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll
+++ b/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll
@@ -19,8 +19,8 @@ define i32 @foo(ptr nocapture %A, ptr nocapture %B, i32 %n) {
; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 4
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP4]]
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP4]]
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[A]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[B]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
@@ -34,13 +34,13 @@ define i32 @foo(ptr nocapture %A, ptr nocapture %B, i32 %n) {
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP6]], align 4, !alias.scope [[META3]]
; CHECK-NEXT: [[TMP7:%.*]] = icmp sgt <4 x i32> [[WIDE_LOAD]], [[WIDE_LOAD2]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt <4 x i32> [[WIDE_LOAD]], <i32 19, i32 19, i32 19, i32 19>
-; CHECK-NEXT: [[TMP12:%.*]] = xor <4 x i1> [[TMP8]], <i1 true, i1 true, i1 true, i1 true>
-; CHECK-NEXT: [[TMP13:%.*]] = and <4 x i1> [[TMP7]], [[TMP12]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD2]], <i32 4, i32 4, i32 4, i32 4>
-; CHECK-NEXT: [[TMP10:%.*]] = select <4 x i1> [[TMP9]], <4 x i32> <i32 4, i32 4, i32 4, i32 4>, <4 x i32> <i32 5, i32 5, i32 5, i32 5>
-; CHECK-NEXT: [[TMP11:%.*]] = and <4 x i1> [[TMP7]], [[TMP8]]
-; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP11]], <4 x i32> <i32 3, i32 3, i32 3, i32 3>, <4 x i32> <i32 9, i32 9, i32 9, i32 9>
-; CHECK-NEXT: [[PREDPHI3:%.*]] = select <4 x i1> [[TMP13]], <4 x i32> [[TMP10]], <4 x i32> [[PREDPHI]]
+; CHECK-NEXT: [[TMP9:%.*]] = xor <4 x i1> [[TMP8]], <i1 true, i1 true, i1 true, i1 true>
+; CHECK-NEXT: [[TMP10:%.*]] = and <4 x i1> [[TMP7]], [[TMP9]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp slt <4 x i32> [[WIDE_LOAD2]], <i32 4, i32 4, i32 4, i32 4>
+; CHECK-NEXT: [[TMP12:%.*]] = select <4 x i1> [[TMP11]], <4 x i32> <i32 4, i32 4, i32 4, i32 4>, <4 x i32> <i32 5, i32 5, i32 5, i32 5>
+; CHECK-NEXT: [[TMP13:%.*]] = and <4 x i1> [[TMP7]], [[TMP8]]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP13]], <4 x i32> <i32 3, i32 3, i32 3, i32 3>, <4 x i32> <i32 9, i32 9, i32 9, i32 9>
+; CHECK-NEXT: [[PREDPHI3:%.*]] = select <4 x i1> [[TMP10]], <4 x i32> [[TMP12]], <4 x i32> [[PREDPHI]]
; CHECK-NEXT: store <4 x i32> [[PREDPHI3]], ptr [[TMP5]], align 4, !alias.scope [[META0]], !noalias [[META3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -71,7 +71,7 @@ define i32 @foo(ptr nocapture %A, ptr nocapture %B, i32 %n) {
; CHECK-NEXT: store i32 [[X_0]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: for.end.loopexit:
; CHECK-NEXT: br label [[FOR_END]]
diff --git a/llvm/test/Transforms/LoopVectorize/induction.ll b/llvm/test/Transforms/LoopVectorize/induction.ll
index 50a5cc6774c5c..e77eff38b73f9 100644
--- a/llvm/test/Transforms/LoopVectorize/induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/induction.ll
@@ -90,7 +90,7 @@ define void @multi_int_induction(ptr %A, i32 %N) {
; IND-NEXT: [[INC]] = add nsw i32 [[COUNT_09]], 1
; IND-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; IND-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; IND-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; IND-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; IND-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; IND: for.end:
; IND-NEXT: ret void
@@ -134,7 +134,7 @@ define void @multi_int_induction(ptr %A, i32 %N) {
; UNROLL-NEXT: [[INC]] = add nsw i32 [[COUNT_09]], 1
; UNROLL-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; UNROLL-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; UNROLL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; UNROLL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; UNROLL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; UNROLL: for.end:
; UNROLL-NEXT: ret void
@@ -227,7 +227,7 @@ define void @multi_int_induction(ptr %A, i32 %N) {
; INTERLEAVE-NEXT: [[INC]] = add nsw i32 [[COUNT_09]], 1
; INTERLEAVE-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; INTERLEAVE-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; INTERLEAVE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; INTERLEAVE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; INTERLEAVE-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; INTERLEAVE: for.end:
; INTERLEAVE-NEXT: ret void
@@ -361,7 +361,7 @@ define void @scalar_use(ptr %a, float %b, i64 %offset, i64 %offset2, i64 %n) {
; IND-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; IND-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IND: middle.block:
-; IND-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; IND-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; IND-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]]
; IND: scalar.ph:
; IND-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
@@ -374,7 +374,7 @@ define void @scalar_use(ptr %a, float %b, i64 %offset, i64 %offset2, i64 %n) {
; IND-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]]
; IND-NEXT: [[ARR_IDX2:%.*]] = getelementptr float, ptr [[TMP13]], i64 [[OFFSET2]]
; IND-NEXT: [[L2:%.*]] = load float, ptr [[ARR_IDX2]], align 4
-; IND-NEXT: [[M:%.*]] = fmul fast float [[L2]], [[B]]
+; IND-NEXT: [[M:%.*]] = fmul fast float [[B]], [[L2]]
; IND-NEXT: [[AD:%.*]] = fadd fast float [[L1]], [[M]]
; IND-NEXT: store float [[AD]], ptr [[ARR_IDX]], align 4
; IND-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -428,7 +428,7 @@ define void @scalar_use(ptr %a, float %b, i64 %offset, i64 %offset2, i64 %n) {
; UNROLL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; UNROLL-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; UNROLL: middle.block:
-; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; UNROLL-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]]
; UNROLL: scalar.ph:
; UNROLL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
@@ -441,7 +441,7 @@ define void @scalar_use(ptr %a, float %b, i64 %offset, i64 %offset2, i64 %n) {
; UNROLL-NEXT: [[TMP17:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]]
; UNROLL-NEXT: [[ARR_IDX2:%.*]] = getelementptr float, ptr [[TMP17]], i64 [[OFFSET2]]
; UNROLL-NEXT: [[L2:%.*]] = load float, ptr [[ARR_IDX2]], align 4
-; UNROLL-NEXT: [[M:%.*]] = fmul fast float [[L2]], [[B]]
+; UNROLL-NEXT: [[M:%.*]] = fmul fast float [[B]], [[L2]]
; UNROLL-NEXT: [[AD:%.*]] = fadd fast float [[L1]], [[M]]
; UNROLL-NEXT: store float [[AD]], ptr [[ARR_IDX]], align 4
; UNROLL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -571,7 +571,7 @@ define void @scalar_use(ptr %a, float %b, i64 %offset, i64 %offset2, i64 %n) {
; INTERLEAVE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; INTERLEAVE-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; INTERLEAVE: middle.block:
-; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; INTERLEAVE-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]]
; INTERLEAVE: scalar.ph:
; INTERLEAVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
@@ -584,7 +584,7 @@ define void @scalar_use(ptr %a, float %b, i64 %offset, i64 %offset2, i64 %n) {
; INTERLEAVE-NEXT: [[TMP17:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]]
; INTERLEAVE-NEXT: [[ARR_IDX2:%.*]] = getelementptr float, ptr [[TMP17]], i64 [[OFFSET2]]
; INTERLEAVE-NEXT: [[L2:%.*]] = load float, ptr [[ARR_IDX2]], align 4
-; INTERLEAVE-NEXT: [[M:%.*]] = fmul fast float [[L2]], [[B]]
+; INTERLEAVE-NEXT: [[M:%.*]] = fmul fast float [[B]], [[L2]]
; INTERLEAVE-NEXT: [[AD:%.*]] = fadd fast float [[L1]], [[M]]
; INTERLEAVE-NEXT: store float [[AD]], ptr [[ARR_IDX]], align 4
; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -1636,7 +1636,7 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; IND-NEXT: [[TMP8:%.*]] = or disjoint i64 [[TMP7]], 4
; IND-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP8]]
; IND-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP2]]
-; IND-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[A]]
+; IND-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
; IND-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; IND-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; IND: vector.ph:
@@ -1676,7 +1676,7 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; IND-NEXT: store i32 [[TMP22]], ptr [[TMP23]], align 1
; IND-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
; IND-NEXT: [[TMP24:%.*]] = trunc i64 [[I_NEXT]] to i32
-; IND-NEXT: [[COND:%.*]] = icmp eq i32 [[TMP24]], [[N]]
+; IND-NEXT: [[COND:%.*]] = icmp eq i32 [[N]], [[TMP24]]
; IND-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; IND: for.end:
; IND-NEXT: ret void
@@ -1699,7 +1699,7 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; UNROLL-NEXT: [[TMP8:%.*]] = or disjoint i64 [[TMP7]], 4
; UNROLL-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP8]]
; UNROLL-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP2]]
-; UNROLL-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[A]]
+; UNROLL-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
; UNROLL-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; UNROLL-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; UNROLL: vector.ph:
@@ -1753,7 +1753,7 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; UNROLL-NEXT: store i32 [[TMP33]], ptr [[TMP34]], align 1
; UNROLL-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
; UNROLL-NEXT: [[TMP35:%.*]] = trunc i64 [[I_NEXT]] to i32
-; UNROLL-NEXT: [[COND:%.*]] = icmp eq i32 [[TMP35]], [[N]]
+; UNROLL-NEXT: [[COND:%.*]] = icmp eq i32 [[N]], [[TMP35]]
; UNROLL-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; UNROLL: for.end:
; UNROLL-NEXT: ret void
@@ -1855,7 +1855,7 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; INTERLEAVE-NEXT: [[TMP8:%.*]] = or disjoint i64 [[TMP7]], 4
; INTERLEAVE-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP8]]
; INTERLEAVE-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP2]]
-; INTERLEAVE-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[A]]
+; INTERLEAVE-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
; INTERLEAVE-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; INTERLEAVE-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; INTERLEAVE: vector.ph:
@@ -1920,7 +1920,7 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) {
; INTERLEAVE-NEXT: store i32 [[TMP41]], ptr [[TMP42]], align 1
; INTERLEAVE-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
; INTERLEAVE-NEXT: [[TMP43:%.*]] = trunc i64 [[I_NEXT]] to i32
-; INTERLEAVE-NEXT: [[COND:%.*]] = icmp eq i32 [[TMP43]], [[N]]
+; INTERLEAVE-NEXT: [[COND:%.*]] = icmp eq i32 [[N]], [[TMP43]]
; INTERLEAVE-NEXT: br i1 [[COND]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; INTERLEAVE: for.end:
; INTERLEAVE-NEXT: ret void
@@ -2535,13 +2535,13 @@ define void @iv_vector_and_scalar_users(ptr %p, i32 %a, i32 %n) {
; IND: for.body:
; IND-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; IND-NEXT: [[TMP11:%.*]] = trunc i64 [[I]] to i32
-; IND-NEXT: [[TMP12:%.*]] = add i32 [[TMP11]], [[A]]
+; IND-NEXT: [[TMP12:%.*]] = add i32 [[A]], [[TMP11]]
; IND-NEXT: [[TMP13:%.*]] = trunc i32 [[TMP12]] to i16
; IND-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[PAIR_I16]], ptr [[P]], i64 [[I]], i32 1
; IND-NEXT: store i16 [[TMP13]], ptr [[TMP14]], align 2
; IND-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
; IND-NEXT: [[TMP15:%.*]] = trunc i64 [[I_NEXT]] to i32
-; IND-NEXT: [[COND:%.*]] = icmp eq i32 [[TMP15]], [[N]]
+; IND-NEXT: [[COND:%.*]] = icmp eq i32 [[N]], [[TMP15]]
; IND-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; IND: for.end:
; IND-NEXT: ret void
@@ -2594,13 +2594,13 @@ define void @iv_vector_and_scalar_users(ptr %p, i32 %a, i32 %n) {
; UNROLL: for.body:
; UNROLL-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; UNROLL-NEXT: [[TMP19:%.*]] = trunc i64 [[I]] to i32
-; UNROLL-NEXT: [[TMP20:%.*]] = add i32 [[TMP19]], [[A]]
+; UNROLL-NEXT: [[TMP20:%.*]] = add i32 [[A]], [[TMP19]]
; UNROLL-NEXT: [[TMP21:%.*]] = trunc i32 [[TMP20]] to i16
; UNROLL-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[PAIR_I16]], ptr [[P]], i64 [[I]], i32 1
; UNROLL-NEXT: store i16 [[TMP21]], ptr [[TMP22]], align 2
; UNROLL-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
; UNROLL-NEXT: [[TMP23:%.*]] = trunc i64 [[I_NEXT]] to i32
-; UNROLL-NEXT: [[COND:%.*]] = icmp eq i32 [[TMP23]], [[N]]
+; UNROLL-NEXT: [[COND:%.*]] = icmp eq i32 [[N]], [[TMP23]]
; UNROLL-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; UNROLL: for.end:
; UNROLL-NEXT: ret void
@@ -2730,13 +2730,13 @@ define void @iv_vector_and_scalar_users(ptr %p, i32 %a, i32 %n) {
; INTERLEAVE: for.body:
; INTERLEAVE-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; INTERLEAVE-NEXT: [[TMP31:%.*]] = trunc i64 [[I]] to i32
-; INTERLEAVE-NEXT: [[TMP32:%.*]] = add i32 [[TMP31]], [[A]]
+; INTERLEAVE-NEXT: [[TMP32:%.*]] = add i32 [[A]], [[TMP31]]
; INTERLEAVE-NEXT: [[TMP33:%.*]] = trunc i32 [[TMP32]] to i16
; INTERLEAVE-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[PAIR_I16]], ptr [[P]], i64 [[I]], i32 1
; INTERLEAVE-NEXT: store i16 [[TMP33]], ptr [[TMP34]], align 2
; INTERLEAVE-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
; INTERLEAVE-NEXT: [[TMP35:%.*]] = trunc i64 [[I_NEXT]] to i32
-; INTERLEAVE-NEXT: [[COND:%.*]] = icmp eq i32 [[TMP35]], [[N]]
+; INTERLEAVE-NEXT: [[COND:%.*]] = icmp eq i32 [[N]], [[TMP35]]
; INTERLEAVE-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; INTERLEAVE: for.end:
; INTERLEAVE-NEXT: ret void
@@ -3516,7 +3516,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) {
; IND-NEXT: [[TMP2:%.*]] = xor i8 [[T]], -1
; IND-NEXT: [[TMP3:%.*]] = icmp ult i8 [[TMP2]], [[TMP1]]
; IND-NEXT: [[TMP4:%.*]] = trunc i32 [[LEN]] to i8
-; IND-NEXT: [[TMP5:%.*]] = add i8 [[TMP4]], [[T]]
+; IND-NEXT: [[TMP5:%.*]] = add i8 [[T]], [[TMP4]]
; IND-NEXT: [[TMP6:%.*]] = icmp slt i8 [[TMP5]], [[T]]
; IND-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[LEN]], 255
; IND-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
@@ -3525,7 +3525,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) {
; IND: vector.ph:
; IND-NEXT: [[N_VEC:%.*]] = and i32 [[TMP0]], 510
; IND-NEXT: [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i8
-; IND-NEXT: [[IND_END:%.*]] = add i8 [[DOTCAST]], [[T]]
+; IND-NEXT: [[IND_END:%.*]] = add i8 [[T]], [[DOTCAST]]
; IND-NEXT: [[IND_END2:%.*]] = add nuw nsw i32 [[N_VEC]], [[EXT]]
; IND-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[EXT]], i64 0
; IND-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer
@@ -3535,7 +3535,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) {
; IND-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[DOTCAST4:%.*]] = trunc i32 [[INDEX]] to i8
-; IND-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[DOTCAST4]], [[T]]
+; IND-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[T]], [[DOTCAST4]]
; IND-NEXT: [[TMP10:%.*]] = sext i8 [[OFFSET_IDX]] to i64
; IND-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]]
; IND-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP11]], align 4
@@ -3582,7 +3582,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) {
; UNROLL-NEXT: [[TMP2:%.*]] = xor i8 [[T]], -1
; UNROLL-NEXT: [[TMP3:%.*]] = icmp ult i8 [[TMP2]], [[TMP1]]
; UNROLL-NEXT: [[TMP4:%.*]] = trunc i32 [[LEN]] to i8
-; UNROLL-NEXT: [[TMP5:%.*]] = add i8 [[TMP4]], [[T]]
+; UNROLL-NEXT: [[TMP5:%.*]] = add i8 [[T]], [[TMP4]]
; UNROLL-NEXT: [[TMP6:%.*]] = icmp slt i8 [[TMP5]], [[T]]
; UNROLL-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[LEN]], 255
; UNROLL-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
@@ -3591,7 +3591,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) {
; UNROLL: vector.ph:
; UNROLL-NEXT: [[N_VEC:%.*]] = and i32 [[TMP0]], 508
; UNROLL-NEXT: [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i8
-; UNROLL-NEXT: [[IND_END:%.*]] = add i8 [[DOTCAST]], [[T]]
+; UNROLL-NEXT: [[IND_END:%.*]] = add i8 [[T]], [[DOTCAST]]
; UNROLL-NEXT: [[IND_END2:%.*]] = add nuw nsw i32 [[N_VEC]], [[EXT]]
; UNROLL-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[EXT]], i64 0
; UNROLL-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer
@@ -3602,7 +3602,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) {
; UNROLL-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[STEP_ADD:%.*]] = add <2 x i32> [[VEC_IND]], <i32 2, i32 2>
; UNROLL-NEXT: [[DOTCAST5:%.*]] = trunc i32 [[INDEX]] to i8
-; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[DOTCAST5]], [[T]]
+; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[T]], [[DOTCAST5]]
; UNROLL-NEXT: [[TMP10:%.*]] = sext i8 [[OFFSET_IDX]] to i64
; UNROLL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]]
; UNROLL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 8
@@ -3726,7 +3726,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) {
; INTERLEAVE-NEXT: [[TMP2:%.*]] = xor i8 [[T]], -1
; INTERLEAVE-NEXT: [[TMP3:%.*]] = icmp ult i8 [[TMP2]], [[TMP1]]
; INTERLEAVE-NEXT: [[TMP4:%.*]] = trunc i32 [[LEN]] to i8
-; INTERLEAVE-NEXT: [[TMP5:%.*]] = add i8 [[TMP4]], [[T]]
+; INTERLEAVE-NEXT: [[TMP5:%.*]] = add i8 [[T]], [[TMP4]]
; INTERLEAVE-NEXT: [[TMP6:%.*]] = icmp slt i8 [[TMP5]], [[T]]
; INTERLEAVE-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[LEN]], 255
; INTERLEAVE-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
@@ -3735,7 +3735,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) {
; INTERLEAVE: vector.ph:
; INTERLEAVE-NEXT: [[N_VEC:%.*]] = and i32 [[TMP0]], 504
; INTERLEAVE-NEXT: [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i8
-; INTERLEAVE-NEXT: [[IND_END:%.*]] = add i8 [[DOTCAST]], [[T]]
+; INTERLEAVE-NEXT: [[IND_END:%.*]] = add i8 [[T]], [[DOTCAST]]
; INTERLEAVE-NEXT: [[IND_END2:%.*]] = add nuw nsw i32 [[N_VEC]], [[EXT]]
; INTERLEAVE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[EXT]], i64 0
; INTERLEAVE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
@@ -3746,7 +3746,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) {
; INTERLEAVE-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], <i32 4, i32 4, i32 4, i32 4>
; INTERLEAVE-NEXT: [[DOTCAST5:%.*]] = trunc i32 [[INDEX]] to i8
-; INTERLEAVE-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[DOTCAST5]], [[T]]
+; INTERLEAVE-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[T]], [[DOTCAST5]]
; INTERLEAVE-NEXT: [[TMP10:%.*]] = sext i8 [[OFFSET_IDX]] to i64
; INTERLEAVE-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]]
; INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 16
@@ -3900,7 +3900,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) {
; IND-NEXT: [[TMP2:%.*]] = xor i8 [[T]], -1
; IND-NEXT: [[TMP3:%.*]] = icmp ult i8 [[TMP2]], [[TMP1]]
; IND-NEXT: [[TMP4:%.*]] = trunc i32 [[LEN]] to i8
-; IND-NEXT: [[TMP5:%.*]] = add i8 [[TMP4]], [[T]]
+; IND-NEXT: [[TMP5:%.*]] = add i8 [[T]], [[TMP4]]
; IND-NEXT: [[TMP6:%.*]] = icmp slt i8 [[TMP5]], [[T]]
; IND-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[LEN]], 255
; IND-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
@@ -3909,7 +3909,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) {
; IND: vector.ph:
; IND-NEXT: [[N_VEC:%.*]] = and i32 [[TMP0]], 510
; IND-NEXT: [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i8
-; IND-NEXT: [[IND_END:%.*]] = add i8 [[DOTCAST]], [[T]]
+; IND-NEXT: [[IND_END:%.*]] = add i8 [[T]], [[DOTCAST]]
; IND-NEXT: [[EXT_MUL5:%.*]] = add nuw nsw i32 [[N_VEC]], [[EXT]]
; IND-NEXT: [[IND_END1:%.*]] = shl nuw nsw i32 [[EXT_MUL5]], 2
; IND-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[EXT_MUL]], i64 0
@@ -3920,7 +3920,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) {
; IND-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[DOTCAST4:%.*]] = trunc i32 [[INDEX]] to i8
-; IND-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[DOTCAST4]], [[T]]
+; IND-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[T]], [[DOTCAST4]]
; IND-NEXT: [[TMP10:%.*]] = sext i8 [[OFFSET_IDX]] to i64
; IND-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]]
; IND-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP11]], align 4
@@ -3969,7 +3969,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) {
; UNROLL-NEXT: [[TMP2:%.*]] = xor i8 [[T]], -1
; UNROLL-NEXT: [[TMP3:%.*]] = icmp ult i8 [[TMP2]], [[TMP1]]
; UNROLL-NEXT: [[TMP4:%.*]] = trunc i32 [[LEN]] to i8
-; UNROLL-NEXT: [[TMP5:%.*]] = add i8 [[TMP4]], [[T]]
+; UNROLL-NEXT: [[TMP5:%.*]] = add i8 [[T]], [[TMP4]]
; UNROLL-NEXT: [[TMP6:%.*]] = icmp slt i8 [[TMP5]], [[T]]
; UNROLL-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[LEN]], 255
; UNROLL-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
@@ -3978,7 +3978,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) {
; UNROLL: vector.ph:
; UNROLL-NEXT: [[N_VEC:%.*]] = and i32 [[TMP0]], 508
; UNROLL-NEXT: [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i8
-; UNROLL-NEXT: [[IND_END:%.*]] = add i8 [[DOTCAST]], [[T]]
+; UNROLL-NEXT: [[IND_END:%.*]] = add i8 [[T]], [[DOTCAST]]
; UNROLL-NEXT: [[EXT_MUL6:%.*]] = add nuw nsw i32 [[N_VEC]], [[EXT]]
; UNROLL-NEXT: [[IND_END1:%.*]] = shl nuw nsw i32 [[EXT_MUL6]], 2
; UNROLL-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[EXT_MUL]], i64 0
@@ -3990,7 +3990,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) {
; UNROLL-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[STEP_ADD:%.*]] = add <2 x i32> [[VEC_IND]], <i32 8, i32 8>
; UNROLL-NEXT: [[DOTCAST5:%.*]] = trunc i32 [[INDEX]] to i8
-; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[DOTCAST5]], [[T]]
+; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[T]], [[DOTCAST5]]
; UNROLL-NEXT: [[TMP10:%.*]] = sext i8 [[OFFSET_IDX]] to i64
; UNROLL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]]
; UNROLL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 8
@@ -4119,7 +4119,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) {
; INTERLEAVE-NEXT: [[TMP2:%.*]] = xor i8 [[T]], -1
; INTERLEAVE-NEXT: [[TMP3:%.*]] = icmp ult i8 [[TMP2]], [[TMP1]]
; INTERLEAVE-NEXT: [[TMP4:%.*]] = trunc i32 [[LEN]] to i8
-; INTERLEAVE-NEXT: [[TMP5:%.*]] = add i8 [[TMP4]], [[T]]
+; INTERLEAVE-NEXT: [[TMP5:%.*]] = add i8 [[T]], [[TMP4]]
; INTERLEAVE-NEXT: [[TMP6:%.*]] = icmp slt i8 [[TMP5]], [[T]]
; INTERLEAVE-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[LEN]], 255
; INTERLEAVE-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
@@ -4128,7 +4128,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) {
; INTERLEAVE: vector.ph:
; INTERLEAVE-NEXT: [[N_VEC:%.*]] = and i32 [[TMP0]], 504
; INTERLEAVE-NEXT: [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i8
-; INTERLEAVE-NEXT: [[IND_END:%.*]] = add i8 [[DOTCAST]], [[T]]
+; INTERLEAVE-NEXT: [[IND_END:%.*]] = add i8 [[T]], [[DOTCAST]]
; INTERLEAVE-NEXT: [[EXT_MUL6:%.*]] = add nuw nsw i32 [[N_VEC]], [[EXT]]
; INTERLEAVE-NEXT: [[IND_END1:%.*]] = shl nuw nsw i32 [[EXT_MUL6]], 2
; INTERLEAVE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[EXT_MUL]], i64 0
@@ -4140,7 +4140,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) {
; INTERLEAVE-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], <i32 16, i32 16, i32 16, i32 16>
; INTERLEAVE-NEXT: [[DOTCAST5:%.*]] = trunc i32 [[INDEX]] to i8
-; INTERLEAVE-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[DOTCAST5]], [[T]]
+; INTERLEAVE-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[T]], [[DOTCAST5]]
; INTERLEAVE-NEXT: [[TMP10:%.*]] = sext i8 [[OFFSET_IDX]] to i64
; INTERLEAVE-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]]
; INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 16
@@ -4262,7 +4262,7 @@ define void @veciv(ptr nocapture %a, i32 %start, i32 %k) {
; IND-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; IND-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]]
; IND: middle.block:
-; IND-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[K]]
+; IND-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[K]], [[N_VEC]]
; IND-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; IND: scalar.ph:
; IND-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER:%.*]] ]
@@ -4299,7 +4299,7 @@ define void @veciv(ptr nocapture %a, i32 %start, i32 %k) {
; UNROLL-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; UNROLL-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]]
; UNROLL: middle.block:
-; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[K]]
+; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[K]], [[N_VEC]]
; UNROLL-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; UNROLL: scalar.ph:
; UNROLL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER:%.*]] ]
@@ -4376,7 +4376,7 @@ define void @veciv(ptr nocapture %a, i32 %start, i32 %k) {
; INTERLEAVE-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; INTERLEAVE-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]]
; INTERLEAVE: middle.block:
-; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[K]]
+; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[K]], [[N_VEC]]
; INTERLEAVE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; INTERLEAVE: scalar.ph:
; INTERLEAVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER:%.*]] ]
@@ -4424,14 +4424,14 @@ define void @trunciv(ptr nocapture %a, i32 %start, i64 %k) {
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[K]], [[N_MOD_VF]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[OFFSET_IDX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[OFFSET_IDX]] to i32
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[INDEX]] to i32
; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[TMP6]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i32 0
; CHECK-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP8]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[OFFSET_IDX]], 2
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], <i32 2, i32 2>
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
@@ -4463,18 +4463,18 @@ define void @trunciv(ptr nocapture %a, i32 %start, i64 %k) {
; IND-NEXT: [[N_VEC:%.*]] = and i64 [[K]], 4294967294
; IND-NEXT: br label [[VECTOR_BODY:%.*]]
; IND: vector.body:
-; IND-NEXT: [[OFFSET_IDX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IND-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IND-NEXT: [[SEXT:%.*]] = shl i64 [[OFFSET_IDX]], 32
+; IND-NEXT: [[SEXT:%.*]] = shl i64 [[INDEX]], 32
; IND-NEXT: [[TMP0:%.*]] = ashr exact i64 [[SEXT]], 32
; IND-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]]
; IND-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP1]], align 4
-; IND-NEXT: [[INDEX_NEXT]] = add nuw i64 [[OFFSET_IDX]], 2
+; IND-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; IND-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], <i32 2, i32 2>
; IND-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; IND-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
; IND: middle.block:
-; IND-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[K]]
+; IND-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[K]], [[N_VEC]]
; IND-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; IND: scalar.ph:
; IND-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
@@ -4503,21 +4503,21 @@ define void @trunciv(ptr nocapture %a, i32 %start, i64 %k) {
; UNROLL-NEXT: [[N_VEC:%.*]] = and i64 [[K]], 4294967292
; UNROLL-NEXT: br label [[VECTOR_BODY:%.*]]
; UNROLL: vector.body:
-; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; UNROLL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[STEP_ADD:%.*]] = add <2 x i32> [[VEC_IND]], <i32 2, i32 2>
-; UNROLL-NEXT: [[SEXT:%.*]] = shl i64 [[OFFSET_IDX]], 32
+; UNROLL-NEXT: [[SEXT:%.*]] = shl i64 [[INDEX]], 32
; UNROLL-NEXT: [[TMP0:%.*]] = ashr exact i64 [[SEXT]], 32
; UNROLL-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]]
; UNROLL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 8
; UNROLL-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP1]], align 4
; UNROLL-NEXT: store <2 x i32> [[STEP_ADD]], ptr [[TMP2]], align 4
-; UNROLL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[OFFSET_IDX]], 4
+; UNROLL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; UNROLL-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], <i32 4, i32 4>
; UNROLL-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; UNROLL-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
; UNROLL: middle.block:
-; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[K]]
+; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[K]], [[N_VEC]]
; UNROLL-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; UNROLL: scalar.ph:
; UNROLL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
@@ -4551,10 +4551,10 @@ define void @trunciv(ptr nocapture %a, i32 %start, i64 %k) {
; UNROLL-NO-IC-NEXT: [[N_VEC:%.*]] = sub i64 [[K]], [[N_MOD_VF]]
; UNROLL-NO-IC-NEXT: br label [[VECTOR_BODY:%.*]]
; UNROLL-NO-IC: vector.body:
-; UNROLL-NO-IC-NEXT: [[OFFSET_IDX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; UNROLL-NO-IC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[STEP_ADD:%.*]] = add <2 x i32> [[VEC_IND]], <i32 2, i32 2>
-; UNROLL-NO-IC-NEXT: [[TMP5:%.*]] = trunc i64 [[OFFSET_IDX]] to i32
+; UNROLL-NO-IC-NEXT: [[TMP5:%.*]] = trunc i64 [[INDEX]] to i32
; UNROLL-NO-IC-NEXT: [[TMP6:%.*]] = add i32 [[TMP5]], 0
; UNROLL-NO-IC-NEXT: [[TMP7:%.*]] = add i32 [[TMP5]], 2
; UNROLL-NO-IC-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[TMP6]]
@@ -4563,7 +4563,7 @@ define void @trunciv(ptr nocapture %a, i32 %start, i64 %k) {
; UNROLL-NO-IC-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 2
; UNROLL-NO-IC-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP10]], align 4
; UNROLL-NO-IC-NEXT: store <2 x i32> [[STEP_ADD]], ptr [[TMP11]], align 4
-; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[OFFSET_IDX]], 4
+; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; UNROLL-NO-IC-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[STEP_ADD]], <i32 2, i32 2>
; UNROLL-NO-IC-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; UNROLL-NO-IC-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
@@ -4595,21 +4595,21 @@ define void @trunciv(ptr nocapture %a, i32 %start, i64 %k) {
; INTERLEAVE-NEXT: [[N_VEC:%.*]] = and i64 [[K]], 4294967288
; INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
; INTERLEAVE: vector.body:
-; INTERLEAVE-NEXT: [[OFFSET_IDX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], <i32 4, i32 4, i32 4, i32 4>
-; INTERLEAVE-NEXT: [[SEXT:%.*]] = shl i64 [[OFFSET_IDX]], 32
+; INTERLEAVE-NEXT: [[SEXT:%.*]] = shl i64 [[INDEX]], 32
; INTERLEAVE-NEXT: [[TMP0:%.*]] = ashr exact i64 [[SEXT]], 32
; INTERLEAVE-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]]
; INTERLEAVE-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 16
; INTERLEAVE-NEXT: store <4 x i32> [[VEC_IND]], ptr [[TMP1]], align 4
; INTERLEAVE-NEXT: store <4 x i32> [[STEP_ADD]], ptr [[TMP2]], align 4
-; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[OFFSET_IDX]], 8
+; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; INTERLEAVE-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], <i32 8, i32 8, i32 8, i32 8>
; INTERLEAVE-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; INTERLEAVE-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
; INTERLEAVE: middle.block:
-; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[K]]
+; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[K]], [[N_VEC]]
; INTERLEAVE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; INTERLEAVE: scalar.ph:
; INTERLEAVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
@@ -4694,7 +4694,7 @@ define void @nonprimary(ptr nocapture %a, i32 %start, i32 %i, i32 %k) {
; IND-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; IND: vector.ph:
; IND-NEXT: [[N_VEC:%.*]] = and i32 [[TMP0]], -2
-; IND-NEXT: [[IND_END:%.*]] = add i32 [[N_VEC]], [[I]]
+; IND-NEXT: [[IND_END:%.*]] = add i32 [[I]], [[N_VEC]]
; IND-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[I]], i64 0
; IND-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer
; IND-NEXT: [[INDUCTION:%.*]] = add <2 x i32> [[DOTSPLAT]], <i32 0, i32 1>
@@ -4702,7 +4702,7 @@ define void @nonprimary(ptr nocapture %a, i32 %start, i32 %i, i32 %k) {
; IND: vector.body:
; IND-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IND-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[INDEX]], [[I]]
+; IND-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[I]], [[INDEX]]
; IND-NEXT: [[TMP1:%.*]] = sext i32 [[OFFSET_IDX]] to i64
; IND-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP1]]
; IND-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP2]], align 4
@@ -4734,7 +4734,7 @@ define void @nonprimary(ptr nocapture %a, i32 %start, i32 %i, i32 %k) {
; UNROLL-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; UNROLL: vector.ph:
; UNROLL-NEXT: [[N_VEC:%.*]] = and i32 [[TMP0]], -4
-; UNROLL-NEXT: [[IND_END:%.*]] = add i32 [[N_VEC]], [[I]]
+; UNROLL-NEXT: [[IND_END:%.*]] = add i32 [[I]], [[N_VEC]]
; UNROLL-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[I]], i64 0
; UNROLL-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer
; UNROLL-NEXT: [[INDUCTION:%.*]] = add <2 x i32> [[DOTSPLAT]], <i32 0, i32 1>
@@ -4743,7 +4743,7 @@ define void @nonprimary(ptr nocapture %a, i32 %start, i32 %i, i32 %k) {
; UNROLL-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[STEP_ADD:%.*]] = add <2 x i32> [[VEC_IND]], <i32 2, i32 2>
-; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[INDEX]], [[I]]
+; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[I]], [[INDEX]]
; UNROLL-NEXT: [[TMP1:%.*]] = sext i32 [[OFFSET_IDX]] to i64
; UNROLL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP1]]
; UNROLL-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 8
@@ -4823,7 +4823,7 @@ define void @nonprimary(ptr nocapture %a, i32 %start, i32 %i, i32 %k) {
; INTERLEAVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; INTERLEAVE: vector.ph:
; INTERLEAVE-NEXT: [[N_VEC:%.*]] = and i32 [[TMP0]], -8
-; INTERLEAVE-NEXT: [[IND_END:%.*]] = add i32 [[N_VEC]], [[I]]
+; INTERLEAVE-NEXT: [[IND_END:%.*]] = add i32 [[I]], [[N_VEC]]
; INTERLEAVE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[I]], i64 0
; INTERLEAVE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
; INTERLEAVE-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[DOTSPLAT]], <i32 0, i32 1, i32 2, i32 3>
@@ -4832,7 +4832,7 @@ define void @nonprimary(ptr nocapture %a, i32 %start, i32 %i, i32 %k) {
; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], <i32 4, i32 4, i32 4, i32 4>
-; INTERLEAVE-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[INDEX]], [[I]]
+; INTERLEAVE-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[I]], [[INDEX]]
; INTERLEAVE-NEXT: [[TMP1:%.*]] = sext i32 [[OFFSET_IDX]] to i64
; INTERLEAVE-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP1]]
; INTERLEAVE-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 16
@@ -5951,10 +5951,10 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; CHECK: vector.ph:
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[OFFSET_IDX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ <i32 poison, i32 0>, [[VECTOR_PH]] ], [ [[VEC_IND:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND]] = phi <2 x i32> [ <i32 0, i32 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[OFFSET_IDX]] to i32
+; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[INDEX]] to i32
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], 0
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[VEC_IND]], <2 x i32> <i32 1, i32 2>
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[SRC:%.*]], align 4
@@ -5965,7 +5965,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; CHECK-NEXT: [[TMP6:%.*]] = add <2 x i32> [[VEC_IND]], [[TMP4]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[TMP5]], i32 0
; CHECK-NEXT: store <2 x i32> [[TMP6]], ptr [[TMP7]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[OFFSET_IDX]], 2
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], <i32 2, i32 2>
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP52:![0-9]+]]
@@ -6000,7 +6000,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; IND: vector.ph:
; IND-NEXT: br label [[VECTOR_BODY:%.*]]
; IND: vector.body:
-; IND-NEXT: [[OFFSET_IDX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IND-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ <i32 poison, i32 0>, [[VECTOR_PH]] ], [ [[VEC_IND:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[VEC_IND]] = phi <2 x i32> [ <i32 0, i32 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; IND-NEXT: [[TMP0:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[VEC_IND]], <2 x i32> <i32 1, i32 2>
@@ -6008,12 +6008,12 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; IND-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[TMP1]], i64 0
; IND-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer
; IND-NEXT: [[TMP2:%.*]] = mul nsw <2 x i32> [[BROADCAST_SPLAT]], [[TMP0]]
-; IND-NEXT: [[SEXT:%.*]] = shl i64 [[OFFSET_IDX]], 32
+; IND-NEXT: [[SEXT:%.*]] = shl i64 [[INDEX]], 32
; IND-NEXT: [[TMP3:%.*]] = ashr exact i64 [[SEXT]], 32
; IND-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[DST:%.*]], i64 [[TMP3]]
; IND-NEXT: [[TMP5:%.*]] = add <2 x i32> [[VEC_IND]], [[TMP2]]
; IND-NEXT: store <2 x i32> [[TMP5]], ptr [[TMP4]], align 4
-; IND-NEXT: [[INDEX_NEXT]] = add nuw i64 [[OFFSET_IDX]], 2
+; IND-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; IND-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], <i32 2, i32 2>
; IND-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
; IND-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP52:![0-9]+]]
@@ -6032,7 +6032,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; UNROLL: vector.ph:
; UNROLL-NEXT: br label [[VECTOR_BODY:%.*]]
; UNROLL: vector.body:
-; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; UNROLL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ <i32 poison, i32 0>, [[VECTOR_PH]] ], [ [[STEP_ADD:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NEXT: [[STEP_ADD]] = add <2 x i32> [[VEC_IND]], <i32 2, i32 2>
@@ -6043,7 +6043,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; UNROLL-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT3]], <2 x i32> poison, <2 x i32> zeroinitializer
; UNROLL-NEXT: [[TMP3:%.*]] = mul nsw <2 x i32> [[BROADCAST_SPLAT4]], [[TMP0]]
; UNROLL-NEXT: [[TMP4:%.*]] = mul nsw <2 x i32> [[BROADCAST_SPLAT4]], [[TMP1]]
-; UNROLL-NEXT: [[SEXT:%.*]] = shl i64 [[OFFSET_IDX]], 32
+; UNROLL-NEXT: [[SEXT:%.*]] = shl i64 [[INDEX]], 32
; UNROLL-NEXT: [[TMP5:%.*]] = ashr exact i64 [[SEXT]], 32
; UNROLL-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[DST:%.*]], i64 [[TMP5]]
; UNROLL-NEXT: [[TMP7:%.*]] = add <2 x i32> [[VEC_IND]], [[TMP3]]
@@ -6051,7 +6051,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; UNROLL-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP6]], i64 8
; UNROLL-NEXT: store <2 x i32> [[TMP7]], ptr [[TMP6]], align 4
; UNROLL-NEXT: store <2 x i32> [[TMP8]], ptr [[TMP9]], align 4
-; UNROLL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[OFFSET_IDX]], 4
+; UNROLL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; UNROLL-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], <i32 4, i32 4>
; UNROLL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
; UNROLL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP52:![0-9]+]]
@@ -6070,11 +6070,11 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; UNROLL-NO-IC: vector.ph:
; UNROLL-NO-IC-NEXT: br label [[VECTOR_BODY:%.*]]
; UNROLL-NO-IC: vector.body:
-; UNROLL-NO-IC-NEXT: [[OFFSET_IDX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; UNROLL-NO-IC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR:%.*]] = phi <2 x i32> [ <i32 poison, i32 0>, [[VECTOR_PH]] ], [ [[STEP_ADD:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[STEP_ADD]] = add <2 x i32> [[VEC_IND]], <i32 2, i32 2>
-; UNROLL-NO-IC-NEXT: [[TMP0:%.*]] = trunc i64 [[OFFSET_IDX]] to i32
+; UNROLL-NO-IC-NEXT: [[TMP0:%.*]] = trunc i64 [[INDEX]] to i32
; UNROLL-NO-IC-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], 0
; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = add i32 [[TMP0]], 2
; UNROLL-NO-IC-NEXT: [[TMP3:%.*]] = shufflevector <2 x i32> [[VECTOR_RECUR]], <2 x i32> [[VEC_IND]], <2 x i32> <i32 1, i32 2>
@@ -6092,7 +6092,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; UNROLL-NO-IC-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP8]], i32 2
; UNROLL-NO-IC-NEXT: store <2 x i32> [[TMP10]], ptr [[TMP12]], align 4
; UNROLL-NO-IC-NEXT: store <2 x i32> [[TMP11]], ptr [[TMP13]], align 4
-; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[OFFSET_IDX]], 4
+; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; UNROLL-NO-IC-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[STEP_ADD]], <i32 2, i32 2>
; UNROLL-NO-IC-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
; UNROLL-NO-IC-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP52:![0-9]+]]
@@ -6127,7 +6127,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; INTERLEAVE: vector.ph:
; INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
; INTERLEAVE: vector.body:
-; INTERLEAVE-NEXT: [[OFFSET_IDX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ <i32 poison, i32 poison, i32 poison, i32 0>, [[VECTOR_PH]] ], [ [[STEP_ADD:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; INTERLEAVE-NEXT: [[STEP_ADD]] = add <4 x i32> [[VEC_IND]], <i32 4, i32 4, i32 4, i32 4>
@@ -6138,7 +6138,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; INTERLEAVE-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT3]], <4 x i32> poison, <4 x i32> zeroinitializer
; INTERLEAVE-NEXT: [[TMP3:%.*]] = mul nsw <4 x i32> [[BROADCAST_SPLAT4]], [[TMP0]]
; INTERLEAVE-NEXT: [[TMP4:%.*]] = mul nsw <4 x i32> [[BROADCAST_SPLAT4]], [[TMP1]]
-; INTERLEAVE-NEXT: [[SEXT:%.*]] = shl i64 [[OFFSET_IDX]], 32
+; INTERLEAVE-NEXT: [[SEXT:%.*]] = shl i64 [[INDEX]], 32
; INTERLEAVE-NEXT: [[TMP5:%.*]] = ashr exact i64 [[SEXT]], 32
; INTERLEAVE-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[DST:%.*]], i64 [[TMP5]]
; INTERLEAVE-NEXT: [[TMP7:%.*]] = add <4 x i32> [[VEC_IND]], [[TMP3]]
@@ -6146,7 +6146,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr %
; INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP6]], i64 16
; INTERLEAVE-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP6]], align 4
; INTERLEAVE-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4
-; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[OFFSET_IDX]], 8
+; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; INTERLEAVE-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], <i32 8, i32 8, i32 8, i32 8>
; INTERLEAVE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96
; INTERLEAVE-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP52:![0-9]+]]
@@ -6307,7 +6307,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
; IND: vector.ph:
; IND-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -2
; IND-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
-; IND-NEXT: [[IND_END:%.*]] = mul i32 [[DOTCAST]], [[STEP]]
+; IND-NEXT: [[IND_END:%.*]] = mul i32 [[STEP]], [[DOTCAST]]
; IND-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[STEP]], i64 0
; IND-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer
; IND-NEXT: [[TMP15:%.*]] = mul nuw <2 x i32> [[DOTSPLAT]], <i32 0, i32 1>
@@ -6327,7 +6327,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
; IND-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; IND-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP54:![0-9]+]]
; IND: middle.block:
-; IND-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; IND-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; IND-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <2 x i32> [[VEC_IND]], i64 1
; IND-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; IND: scalar.ph:
@@ -6378,7 +6378,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
; UNROLL: vector.ph:
; UNROLL-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -4
; UNROLL-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
-; UNROLL-NEXT: [[IND_END:%.*]] = mul i32 [[DOTCAST]], [[STEP]]
+; UNROLL-NEXT: [[IND_END:%.*]] = mul i32 [[STEP]], [[DOTCAST]]
; UNROLL-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[STEP]], i64 0
; UNROLL-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer
; UNROLL-NEXT: [[TMP15:%.*]] = mul nuw <2 x i32> [[DOTSPLAT]], <i32 0, i32 1>
@@ -6402,7 +6402,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
; UNROLL-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; UNROLL-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP54:![0-9]+]]
; UNROLL: middle.block:
-; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; UNROLL-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <2 x i32> [[STEP_ADD]], i64 1
; UNROLL-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; UNROLL: scalar.ph:
@@ -6536,7 +6536,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
; INTERLEAVE: vector.ph:
; INTERLEAVE-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -8
; INTERLEAVE-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
-; INTERLEAVE-NEXT: [[IND_END:%.*]] = mul i32 [[DOTCAST]], [[STEP]]
+; INTERLEAVE-NEXT: [[IND_END:%.*]] = mul i32 [[STEP]], [[DOTCAST]]
; INTERLEAVE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[STEP]], i64 0
; INTERLEAVE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
; INTERLEAVE-NEXT: [[TMP15:%.*]] = mul <4 x i32> [[DOTSPLAT]], <i32 0, i32 1, i32 2, i32 3>
@@ -6560,7 +6560,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n
; INTERLEAVE-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; INTERLEAVE-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP54:![0-9]+]]
; INTERLEAVE: middle.block:
-; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; INTERLEAVE-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[STEP_ADD]], i64 3
; INTERLEAVE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; INTERLEAVE: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
index 4c3377255b21a..8182c3c536b8d 100644
--- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
@@ -108,23 +108,23 @@ define void @test_struct_array_load3_store3() {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 12
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr @A, i64 [[TMP0]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 12
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr @A, i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <12 x i32>, ptr [[NEXT_GEP]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
-; CHECK-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[STRIDED_VEC]], <i32 1, i32 1, i32 1, i32 1>
-; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[STRIDED_VEC2]], <i32 2, i32 2, i32 2, i32 2>
-; CHECK-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[STRIDED_VEC3]], <i32 3, i32 3, i32 3, i32 3>
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1024 x %struct.ST3], ptr @S, i64 0, i64 [[INDEX]]
-; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP2]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <8 x i32> [[TMP5]], <8 x i32> [[TMP6]], <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
-; CHECK-NEXT: store <12 x i32> [[INTERLEAVED_VEC]], ptr [[TMP4]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[STRIDED_VEC]], <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[STRIDED_VEC2]], <i32 2, i32 2, i32 2, i32 2>
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[STRIDED_VEC3]], <i32 3, i32 3, i32 3, i32 3>
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1024 x %struct.ST3], ptr @S, i64 0, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <8 x i32> [[TMP4]], <8 x i32> [[TMP5]], <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
+; CHECK-NEXT: store <12 x i32> [[INTERLEAVED_VEC]], ptr [[TMP3]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
@@ -266,21 +266,21 @@ define void @test_struct_store4(ptr noalias nocapture readonly %A, ptr noalias n
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[NEXT_GEP]], align 4
-; CHECK-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], <i32 1, i32 1, i32 1, i32 1>
-; CHECK-NEXT: [[TMP2:%.*]] = shl nsw <4 x i32> [[WIDE_LOAD]], <i32 1, i32 1, i32 1, i32 1>
-; CHECK-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], <i32 3, i32 3, i32 3, i32 3>
-; CHECK-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], <i32 4, i32 4, i32 4, i32 4>
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ST4:%.*]], ptr [[B:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP2]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <8 x i32> [[TMP6]], <8 x i32> [[TMP7]], <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
-; CHECK-NEXT: store <16 x i32> [[INTERLEAVED_VEC]], ptr [[TMP5]], align 4
+; CHECK-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT: [[TMP1:%.*]] = shl nsw <4 x i32> [[WIDE_LOAD]], <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], <i32 3, i32 3, i32 3, i32 3>
+; CHECK-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], <i32 4, i32 4, i32 4, i32 4>
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ST4:%.*]], ptr [[B:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <8 x i32> [[TMP5]], <8 x i32> [[TMP6]], <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
+; CHECK-NEXT: store <16 x i32> [[INTERLEAVED_VEC]], ptr [[TMP4]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
@@ -441,7 +441,7 @@ define void @even_load_static_tc(ptr noalias nocapture readonly %A, ptr noalias
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP]], 1
-; CHECK-NEXT: [[TMP1:%.*]] = lshr exact i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[TMP1]] = lshr exact i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP1]]
; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 2
@@ -518,7 +518,7 @@ define void @even_load_dynamic_tc(ptr noalias nocapture readonly %A, ptr noalias
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP]], 1
-; CHECK-NEXT: [[TMP1:%.*]] = lshr exact i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[TMP1]] = lshr exact i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP1]]
; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 2
@@ -736,23 +736,23 @@ define void @mixed_load3_store3(ptr nocapture %A) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 12
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 12
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <12 x i32>, ptr [[NEXT_GEP]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
-; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[STRIDED_VEC]], [[VEC_IND]]
-; CHECK-NEXT: [[TMP2:%.*]] = add <4 x i32> [[STRIDED_VEC2]], [[VEC_IND]]
-; CHECK-NEXT: [[TMP3:%.*]] = add <4 x i32> [[STRIDED_VEC3]], [[VEC_IND]]
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP2]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <8 x i32> [[TMP4]], <8 x i32> [[TMP5]], <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
+; CHECK-NEXT: [[TMP0:%.*]] = add <4 x i32> [[STRIDED_VEC]], [[VEC_IND]]
+; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i32> [[STRIDED_VEC2]], [[VEC_IND]]
+; CHECK-NEXT: [[TMP2:%.*]] = add <4 x i32> [[STRIDED_VEC3]], [[VEC_IND]]
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> [[TMP1]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <8 x i32> [[TMP3]], <8 x i32> [[TMP4]], <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
; CHECK-NEXT: store <12 x i32> [[INTERLEAVED_VEC]], ptr [[NEXT_GEP]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], <i32 4, i32 4, i32 4, i32 4>
-; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
@@ -837,14 +837,14 @@ define void @int_float_struct(ptr nocapture readonly %A) #0 {
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]])
+; CHECK-NEXT: [[TMP5:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP3]])
+; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]])
; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ poison, [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: [[ADD3_LCSSA:%.*]] = phi float [ poison, [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ poison, [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[ADD3_LCSSA:%.*]] = phi float [ poison, [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: store i32 [[ADD_LCSSA]], ptr @SA, align 4
; CHECK-NEXT: store float [[ADD3_LCSSA]], ptr @SB, align 4
; CHECK-NEXT: ret void
@@ -1481,7 +1481,7 @@ define void @PR34743(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A]], i64 2
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]]
; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[TMP5]], i64 6
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP2]], [[B]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP2]]
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP1]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization-2.ll b/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization-2.ll
index 50c67040cfb2a..45de11141235e 100644
--- a/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization-2.ll
+++ b/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization-2.ll
@@ -28,8 +28,8 @@ define void @inv_val_store_to_inv_address_conditional_diff_values_ic(ptr %a, i64
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]]
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[B]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
@@ -120,7 +120,7 @@ define void @inv_val_store_to_inv_address_conditional_inv(ptr %a, i64 %n, ptr %b
; CHECK-LABEL: @inv_val_store_to_inv_address_conditional_inv(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[NTRUNC:%.*]] = trunc i64 [[N:%.*]] to i32
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[NTRUNC]], [[K:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[K:%.*]], [[NTRUNC]]
; CHECK-NEXT: [[SMAX2:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1)
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp slt i64 [[N]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
@@ -128,8 +128,8 @@ define void @inv_val_store_to_inv_address_conditional_inv(ptr %a, i64 %n, ptr %b
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]]
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[B]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
@@ -217,8 +217,8 @@ define i32 @variant_val_store_to_inv_address(ptr %a, i64 %n, ptr %b, i32 %k) {
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]]
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[A]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[B]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll b/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll
index 20d612a548b15..63381454cc590 100644
--- a/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll
+++ b/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll
@@ -27,8 +27,8 @@ define i32 @inv_val_store_to_inv_address_with_reduction(ptr %a, i64 %n, ptr %b)
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]]
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[A]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[B]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
@@ -101,8 +101,8 @@ define void @inv_val_store_to_inv_address(ptr %a, i64 %n, ptr %b) {
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]]
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[A]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[B]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
@@ -176,8 +176,8 @@ define void @inv_val_store_to_inv_address_conditional(ptr %a, i64 %n, ptr %b, i3
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]]
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[B]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
@@ -360,7 +360,7 @@ define i32 @multiple_uniform_stores(ptr nocapture %var1, ptr nocapture readonly
; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[J_022]] to i64
; CHECK-NEXT: [[ARRAYIDX5_PROMOTED:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = xor i32 [[J_022]], -1
-; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[TMP5]], [[ITR]]
+; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[ITR]], [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = add nuw nsw i64 [[TMP7]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP6]], 3
@@ -369,12 +369,12 @@ define i32 @multiple_uniform_stores(ptr nocapture %var1, ptr nocapture readonly
; CHECK-NEXT: [[TMP9:%.*]] = shl nuw nsw i64 [[TMP4]], 2
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[VAR2]], i64 [[TMP9]]
; CHECK-NEXT: [[TMP10:%.*]] = xor i32 [[J_022]], -1
-; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[TMP10]], [[ITR]]
+; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[ITR]], [[TMP10]]
; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
; CHECK-NEXT: [[TMP13:%.*]] = add nuw nsw i64 [[TMP4]], [[TMP12]]
; CHECK-NEXT: [[TMP14:%.*]] = shl nuw nsw i64 [[TMP13]], 2
; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[SCEVGEP2]], i64 [[TMP14]]
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP3]], [[VAR1]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[VAR1]], [[SCEVGEP3]]
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP1]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
@@ -414,7 +414,7 @@ define i32 @multiple_uniform_stores(ptr nocapture %var1, ptr nocapture readonly
; CHECK-NEXT: store i32 [[TMP22]], ptr [[ARRAYIDX5]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[ITR]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[ITR]], [[LFTR_WIDEIV]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_INC8_LOOPEXIT_LOOPEXIT:%.*]], label [[FOR_BODY3]], !llvm.loop [[LOOP27:![0-9]+]]
; CHECK: for.inc8.loopexit.loopexit:
; CHECK-NEXT: br label [[FOR_INC8_LOOPEXIT]]
@@ -424,7 +424,7 @@ define i32 @multiple_uniform_stores(ptr nocapture %var1, ptr nocapture readonly
; CHECK-NEXT: [[J_1_LCSSA]] = phi i32 [ [[J_022]], [[FOR_COND1_PREHEADER]] ], [ [[ITR]], [[FOR_INC8_LOOPEXIT]] ]
; CHECK-NEXT: [[INDVARS_IV_NEXT24]] = add nuw nsw i64 [[INDVARS_IV23]], 1
; CHECK-NEXT: [[LFTR_WIDEIV25:%.*]] = trunc i64 [[INDVARS_IV_NEXT24]] to i32
-; CHECK-NEXT: [[EXITCOND26:%.*]] = icmp eq i32 [[LFTR_WIDEIV25]], [[ITR]]
+; CHECK-NEXT: [[EXITCOND26:%.*]] = icmp eq i32 [[ITR]], [[LFTR_WIDEIV25]]
; CHECK-NEXT: br i1 [[EXITCOND26]], label [[FOR_END10_LOOPEXIT:%.*]], label [[FOR_COND1_PREHEADER]]
; CHECK: for.end10.loopexit:
; CHECK-NEXT: br label [[FOR_END10]]
@@ -507,7 +507,7 @@ define i32 @multiple_uniform_stores_conditional(ptr nocapture %var1, ptr nocaptu
; CHECK-NEXT: store i32 [[TMP5]], ptr [[ARRAYIDX5]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[ITR]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[ITR]], [[LFTR_WIDEIV]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_INC8_LOOPEXIT:%.*]], label [[FOR_BODY3]]
; CHECK: for.inc8.loopexit:
; CHECK-NEXT: br label [[FOR_INC8]]
@@ -515,7 +515,7 @@ define i32 @multiple_uniform_stores_conditional(ptr nocapture %var1, ptr nocaptu
; CHECK-NEXT: [[J_1_LCSSA]] = phi i32 [ [[J_022]], [[FOR_COND1_PREHEADER]] ], [ [[ITR]], [[FOR_INC8_LOOPEXIT]] ]
; CHECK-NEXT: [[INDVARS_IV_NEXT24]] = add nuw nsw i64 [[INDVARS_IV23]], 1
; CHECK-NEXT: [[LFTR_WIDEIV25:%.*]] = trunc i64 [[INDVARS_IV_NEXT24]] to i32
-; CHECK-NEXT: [[EXITCOND26:%.*]] = icmp eq i32 [[LFTR_WIDEIV25]], [[ITR]]
+; CHECK-NEXT: [[EXITCOND26:%.*]] = icmp eq i32 [[ITR]], [[LFTR_WIDEIV25]]
; CHECK-NEXT: br i1 [[EXITCOND26]], label [[FOR_END10_LOOPEXIT:%.*]], label [[FOR_COND1_PREHEADER]]
; CHECK: for.end10.loopexit:
; CHECK-NEXT: br label [[FOR_END10]]
@@ -589,7 +589,7 @@ define void @unsafe_dep_uniform_load_store(i32 %arg, i32 %arg1, i64 %arg2, ptr %
; CHECK-NEXT: [[I13:%.*]] = add nsw i32 [[I12]], [[I9]]
; CHECK-NEXT: [[I14:%.*]] = trunc i32 [[I13]] to i16
; CHECK-NEXT: [[I15:%.*]] = trunc i64 [[I8]] to i32
-; CHECK-NEXT: [[I16:%.*]] = add i32 [[I15]], [[ARG:%.*]]
+; CHECK-NEXT: [[I16:%.*]] = add i32 [[ARG:%.*]], [[I15]]
; CHECK-NEXT: [[I17:%.*]] = zext i32 [[I16]] to i64
; CHECK-NEXT: [[I18:%.*]] = getelementptr inbounds i16, ptr [[I6]], i64 [[I17]]
; CHECK-NEXT: store i16 [[I14]], ptr [[I18]], align 2
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll
index 873f6364f8281..c50bcf8ae88f5 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll
@@ -61,7 +61,7 @@ define float @cond_fadd(ptr noalias nocapture readonly %a, ptr noalias nocapture
; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -176,7 +176,7 @@ define float @cond_cmp_sel(ptr noalias %a, ptr noalias %cond, i64 %N) {
; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -294,7 +294,7 @@ define i32 @conditional_and(ptr noalias %A, ptr noalias %B, i32 %cond, i64 nound
; CHECK-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -428,7 +428,7 @@ define i32 @simple_chained_rdx(ptr noalias %a, ptr noalias %b, ptr noalias %cond
; CHECK-NEXT: [[TMP47:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP47]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -597,7 +597,7 @@ define i64 @nested_cond_and(ptr noalias nocapture readonly %a, ptr noalias nocap
; CHECK-NEXT: br i1 [[TMP49]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP50:%.*]] = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> [[PREDPHI15]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -732,7 +732,7 @@ define i32 @cond-uncond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP29:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP27]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -896,7 +896,7 @@ define float @cond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: br i1 [[TMP48]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP49:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[PREDPHI15]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -1038,7 +1038,7 @@ define i32 @uncond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond,
; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP29:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PREDPHI]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -1167,7 +1167,7 @@ define i32 @uncond_cond_uncond(ptr noalias %src1, ptr noalias %src2, ptr noalias
; CHECK-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP28]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
index e6936b19415d0..a226a5a36d63b 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
@@ -989,7 +989,7 @@ define float @reduction_fmuladd(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -1132,7 +1132,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[I]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[I]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END7:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -1221,7 +1221,7 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[I]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[I]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END7:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -1292,7 +1292,7 @@ define i32 @predicated_or_dominates_reduction(ptr %b) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE6:%.*]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ undef, [[VECTOR_PH]] ], [ [[TMP51:%.*]], [[PRED_LOAD_CONTINUE6]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ undef, [[VECTOR_PH]] ], [ [[TMP48:%.*]], [[PRED_LOAD_CONTINUE6]] ]
; CHECK-NEXT: [[TMP0:%.*]] = or disjoint i32 [[INDEX]], 1
; CHECK-NEXT: [[TMP1:%.*]] = or disjoint i32 [[INDEX]], 2
; CHECK-NEXT: [[TMP2:%.*]] = or disjoint i32 [[INDEX]], 3
@@ -1354,21 +1354,21 @@ define i32 @predicated_or_dominates_reduction(ptr %b) {
; CHECK: pred.load.continue6:
; CHECK-NEXT: [[TMP43:%.*]] = phi <4 x i32> [ [[TMP37]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP42]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP44:%.*]] = icmp ne <4 x i32> [[TMP43]], zeroinitializer
-; CHECK-NEXT: [[TMP46:%.*]] = xor <4 x i1> [[TMP19]], <i1 true, i1 true, i1 true, i1 true>
-; CHECK-NEXT: [[TMP47:%.*]] = select <4 x i1> [[TMP46]], <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i1> [[TMP44]]
-; CHECK-NEXT: [[TMP48:%.*]] = bitcast <4 x i1> [[TMP47]] to i4
-; CHECK-NEXT: [[TMP49:%.*]] = call range(i4 0, 5) i4 @llvm.ctpop.i4(i4 [[TMP48]])
-; CHECK-NEXT: [[TMP50:%.*]] = zext nneg i4 [[TMP49]] to i32
-; CHECK-NEXT: [[TMP51]] = add i32 [[VEC_PHI]], [[TMP50]]
+; CHECK-NEXT: [[NOT_:%.*]] = xor <4 x i1> [[TMP19]], <i1 true, i1 true, i1 true, i1 true>
+; CHECK-NEXT: [[DOTNOT7:%.*]] = select <4 x i1> [[NOT_]], <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i1> [[TMP44]]
+; CHECK-NEXT: [[TMP45:%.*]] = bitcast <4 x i1> [[DOTNOT7]] to i4
+; CHECK-NEXT: [[TMP46:%.*]] = call range(i4 0, 5) i4 @llvm.ctpop.i4(i4 [[TMP45]])
+; CHECK-NEXT: [[TMP47:%.*]] = zext nneg i4 [[TMP46]] to i32
+; CHECK-NEXT: [[TMP48]] = add i32 [[VEC_PHI]], [[TMP47]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
-; CHECK-NEXT: [[TMP52:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
-; CHECK-NEXT: br i1 [[TMP52]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
+; CHECK-NEXT: [[TMP49:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
+; CHECK-NEXT: br i1 [[TMP49]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: [[A_1_LCSSA:%.*]] = phi i32 [ poison, [[FOR_INC:%.*]] ], [ [[TMP51]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[A_1_LCSSA:%.*]] = phi i32 [ poison, [[FOR_INC:%.*]] ], [ [[TMP48]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[A_1_LCSSA]]
; CHECK: for.body:
; CHECK-NEXT: br i1 poison, label [[LOR_LHS_FALSE:%.*]], label [[IF_THEN:%.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/reduction.ll b/llvm/test/Transforms/LoopVectorize/reduction.ll
index b66ce4047ad95..89fd1a9a73f2f 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction.ll
@@ -49,7 +49,7 @@ define i32 @reduction_sum(i32 %n, ptr %A, ptr %B) {
; CHECK-NEXT: [[TMP17]] = add i32 [[TMP16]], [[TMP13]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE_LOOPEXIT]], label [[DOTLR_PH]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: ._crit_edge.loopexit:
; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], [[DOTLR_PH]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
@@ -130,7 +130,7 @@ define i32 @reduction_prod(i32 %n, ptr %A, ptr %B) {
; CHECK-NEXT: [[TMP17]] = mul i32 [[TMP16]], [[TMP13]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE_LOOPEXIT]], label [[DOTLR_PH]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: ._crit_edge.loopexit:
; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], [[DOTLR_PH]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
@@ -211,7 +211,7 @@ define i32 @reduction_mix(i32 %n, ptr %A, ptr %B) {
; CHECK-NEXT: [[TMP17]] = add i32 [[TMP16]], [[TMP14]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE_LOOPEXIT]], label [[DOTLR_PH]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: ._crit_edge.loopexit:
; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], [[DOTLR_PH]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
@@ -292,7 +292,7 @@ define i32 @reduction_mul(i32 %n, ptr %A, ptr %B) {
; CHECK-NEXT: [[TMP17]] = mul i32 [[TMP16]], [[SUM_02]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE_LOOPEXIT]], label [[DOTLR_PH]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: ._crit_edge.loopexit:
; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], [[DOTLR_PH]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
@@ -369,7 +369,7 @@ define i32 @start_at_non_zero(ptr %in, ptr %coeff, ptr %out, i32 %n) {
; CHECK-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[SUM_09]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: for.end.loopexit:
; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
@@ -445,7 +445,7 @@ define i32 @reduction_and(i32 %n, ptr %A, ptr %B) {
; CHECK-NEXT: [[AND]] = and i32 [[ADD]], [[RESULT_08]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: for.end.loopexit:
; CHECK-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
@@ -521,7 +521,7 @@ define i32 @reduction_or(i32 %n, ptr %A, ptr %B) {
; CHECK-NEXT: [[OR]] = or i32 [[ADD]], [[RESULT_08]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: for.end.loopexit:
; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
@@ -597,7 +597,7 @@ define i32 @reduction_xor(i32 %n, ptr %A, ptr %B) {
; CHECK-NEXT: [[XOR]] = xor i32 [[ADD]], [[RESULT_08]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: for.end.loopexit:
; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
@@ -646,7 +646,7 @@ define i32 @reduction_sub_rhs(i32 %n, ptr %A) {
; CHECK-NEXT: [[SUB]] = sub nsw i32 [[TMP0]], [[X_05]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY]]
; CHECK: for.end.loopexit:
; CHECK-NEXT: br label [[FOR_END]]
@@ -714,7 +714,7 @@ define i32 @reduction_sub_lhs(i32 %n, ptr %A) {
; CHECK-NEXT: [[SUB]] = sub nsw i32 [[X_05]], [[TMP5]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK: for.end.loopexit:
; CHECK-NEXT: [[SUB_LCSSA:%.*]] = phi i32 [ [[SUB]], [[FOR_BODY]] ], [ [[TMP4]], [[MIDDLE_BLOCK]] ]
@@ -1083,7 +1083,7 @@ define i32 @reduction_sum_multiuse(i32 %n, ptr %A, ptr %B) {
; CHECK-NEXT: [[TMP17]] = add i32 [[TMP16]], [[TMP13]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP23:![0-9]+]]
; CHECK: ._crit_edge:
; CHECK-NEXT: [[SUM_COPY:%.*]] = phi i32 [ [[TMP17]], [[DOTLR_PH]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check.ll b/llvm/test/Transforms/LoopVectorize/runtime-check.ll
index d5df8afc80a79..9521c0933fe87 100644
--- a/llvm/test/Transforms/LoopVectorize/runtime-check.ll
+++ b/llvm/test/Transforms/LoopVectorize/runtime-check.ll
@@ -53,7 +53,7 @@ define i32 @foo(ptr nocapture %a, ptr nocapture %b, i32 %n) nounwind uwtable ssp
; CHECK-NEXT: store float [[MUL]], ptr [[ARRAYIDX2]], align 4, !dbg [[DBG9]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1, !dbg [[DBG9]]
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32, !dbg [[DBG9]]
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]], !dbg [[DBG9]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]], !dbg [[DBG9]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !dbg [[DBG9]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: for.end.loopexit:
; CHECK-NEXT: br label [[FOR_END]], !dbg [[DBG14:![0-9]+]]
@@ -144,7 +144,7 @@ define void @test_runtime_check(ptr %a, float %b, i64 %offset, i64 %offset2, i64
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
@@ -157,7 +157,7 @@ define void @test_runtime_check(ptr %a, float %b, i64 %offset, i64 %offset2, i64
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ARR_IDX2:%.*]] = getelementptr float, ptr [[TMP13]], i64 [[OFFSET2]]
; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARR_IDX2]], align 4
-; CHECK-NEXT: [[M:%.*]] = fmul fast float [[L2]], [[B]]
+; CHECK-NEXT: [[M:%.*]] = fmul fast float [[B]], [[L2]]
; CHECK-NEXT: [[AD:%.*]] = fadd fast float [[L1]], [[M]]
; CHECK-NEXT: store float [[AD]], ptr [[ARR_IDX]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -231,7 +231,7 @@ define void @test_runtime_check2(ptr %a, float %b, i64 %offset, i64 %offset2, i6
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ARR_IDX2:%.*]] = getelementptr float, ptr [[TMP1]], i64 [[OFFSET2:%.*]]
; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARR_IDX2]], align 4
-; CHECK-NEXT: [[M:%.*]] = fmul fast float [[L2]], [[B:%.*]]
+; CHECK-NEXT: [[M:%.*]] = fmul fast float [[B:%.*]], [[L2]]
; CHECK-NEXT: [[AD:%.*]] = fadd fast float [[L1]], [[M]]
; CHECK-NEXT: store float [[AD]], ptr [[ARR_IDX]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[C:%.*]], i64 [[IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll b/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll
index 1b9f15a419ea3..861285e30575a 100644
--- a/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll
@@ -12,7 +12,7 @@ define void @add_ind64_unrolled(ptr noalias nocapture %a, ptr noalias nocapture
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
@@ -94,7 +94,7 @@ define void @add_ind64_unrolled_nxv1i64(ptr noalias nocapture %a, ptr noalias no
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 1
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
@@ -180,7 +180,7 @@ define void @add_unique_ind32(ptr noalias nocapture %a, i64 %n) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
@@ -257,7 +257,7 @@ define void @add_unique_indf32(ptr noalias nocapture %a, i64 %n) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
diff --git a/llvm/test/Transforms/LoopVectorize/uniform-args-call-variants.ll b/llvm/test/Transforms/LoopVectorize/uniform-args-call-variants.ll
index 629b15c824f67..63ca45495335f 100644
--- a/llvm/test/Transforms/LoopVectorize/uniform-args-call-variants.ll
+++ b/llvm/test/Transforms/LoopVectorize/uniform-args-call-variants.ll
@@ -23,7 +23,7 @@ define void @test_uniform(ptr noalias %dst, ptr readonly %src, i64 %uniform , i6
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
@@ -87,7 +87,7 @@ define void @test_uniform_not_invariant(ptr noalias %dst, ptr readonly %src, i64
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
diff --git a/llvm/test/Transforms/PGOProfile/chr.ll b/llvm/test/Transforms/PGOProfile/chr.ll
index 38e8f8536a19c..34e39fe37979a 100644
--- a/llvm/test/Transforms/PGOProfile/chr.ll
+++ b/llvm/test/Transforms/PGOProfile/chr.ll
@@ -1931,15 +1931,15 @@ bb4:
define i32 @test_chr_21(i64 %i, i64 %k, i64 %j) !prof !14 {
; CHECK-LABEL: @test_chr_21(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[J_FR:%.*]] = freeze i64 [[J:%.*]]
; CHECK-NEXT: [[I_FR:%.*]] = freeze i64 [[I:%.*]]
-; CHECK-NEXT: [[CMP0:%.*]] = icmp ne i64 [[J_FR]], [[K:%.*]]
+; CHECK-NEXT: [[CMP0:%.*]] = icmp ne i64 [[J:%.*]], [[K:%.*]]
; CHECK-NEXT: [[TMP0:%.*]] = freeze i1 [[CMP0]]
-; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i64 [[I_FR]], [[J_FR]]
+; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i64 [[J]], [[I_FR]]
; CHECK-NEXT: [[CMP_I:%.*]] = icmp ne i64 [[I_FR]], 86
-; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[TMP0]], [[CMP3]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[TMP1]], [[CMP_I]]
-; CHECK-NEXT: br i1 [[TMP2]], label [[BB1:%.*]], label [[ENTRY_SPLIT_NONCHR:%.*]], !prof [[PROF15]]
+; CHECK-NEXT: [[TMP1:%.*]] = freeze i1 [[CMP3]]
+; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[TMP0]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i1 [[TMP2]], [[CMP_I]]
+; CHECK-NEXT: br i1 [[TMP3]], label [[BB1:%.*]], label [[ENTRY_SPLIT_NONCHR:%.*]], !prof [[PROF15]]
; CHECK: bb1:
; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i64 [[I_FR]], 2
; CHECK-NEXT: switch i64 [[I_FR]], label [[BB2:%.*]] [
@@ -1971,7 +1971,7 @@ define i32 @test_chr_21(i64 %i, i64 %k, i64 %j) !prof !14 {
; CHECK-NEXT: [[CMP_I_NONCHR:%.*]] = icmp eq i64 [[I_FR]], 86
; CHECK-NEXT: br i1 [[CMP_I_NONCHR]], label [[BB6_NONCHR:%.*]], label [[BB4_NONCHR:%.*]], !prof [[PROF16]]
; CHECK: bb6.nonchr:
-; CHECK-NEXT: [[CMP3_NONCHR:%.*]] = icmp eq i64 [[J_FR]], [[I_FR]]
+; CHECK-NEXT: [[CMP3_NONCHR:%.*]] = icmp eq i64 [[J]], [[I_FR]]
; CHECK-NEXT: br i1 [[CMP3_NONCHR]], label [[BB8_NONCHR:%.*]], label [[BB7_NONCHR:%.*]], !prof [[PROF16]]
; CHECK: bb8.nonchr:
; CHECK-NEXT: br i1 [[CMP_I_NONCHR]], label [[BB10]], label [[BB9_NONCHR:%.*]], !prof [[PROF16]]
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll
index 55dd28b70170b..0590ee43a46ce 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll
@@ -13,7 +13,7 @@ define i32 @read_only_loop_with_runtime_check(ptr noundef %array, i32 noundef %c
; CHECK: for.body.preheader:
; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[N]], -1
-; CHECK-NEXT: [[DOTNOT_NOT:%.*]] = icmp ult i32 [[TMP1]], [[COUNT]]
+; CHECK-NEXT: [[DOTNOT_NOT:%.*]] = icmp ugt i32 [[COUNT]], [[TMP1]]
; CHECK-NEXT: br i1 [[DOTNOT_NOT]], label [[FOR_BODY_PREHEADER10:%.*]], label [[IF_THEN:%.*]]
; CHECK: for.body.preheader10:
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 8
@@ -128,7 +128,7 @@ define dso_local noundef i32 @sum_prefix_with_sum(ptr %s.coerce0, i64 %s.coerce1
; CHECK-NEXT: br i1 [[CMP5_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY_PREHEADER:%.*]]
; CHECK: for.body.preheader:
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1
-; CHECK-NEXT: [[DOTNOT_NOT:%.*]] = icmp ult i64 [[TMP0]], [[S_COERCE1]]
+; CHECK-NEXT: [[DOTNOT_NOT:%.*]] = icmp ugt i64 [[S_COERCE1]], [[TMP0]]
; CHECK-NEXT: br i1 [[DOTNOT_NOT]], label [[ENTRY:%.*]], label [[COND_FALSE_I:%.*]], !prof [[PROF4:![0-9]+]]
; CHECK: for.body.preheader8:
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 8
@@ -152,7 +152,7 @@ define dso_local noundef i32 @sum_prefix_with_sum(ptr %s.coerce0, i64 %s.coerce1
; CHECK: middle.block:
; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP4]], [[TMP3]]
; CHECK-NEXT: [[ADD:%.*]] = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY_PREHEADER11]]
; CHECK: for.body.preheader11:
; CHECK-NEXT: [[I_07_PH:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[N_VEC]], [[SPAN_CHECKED_ACCESS_EXIT]] ]
@@ -227,7 +227,7 @@ define hidden noundef nonnull align 4 dereferenceable(4) ptr @span_checked_acces
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__SIZE__I:%.*]] = getelementptr inbounds i8, ptr [[THIS]], i64 8
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[__SIZE__I]], align 8
-; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], [[__IDX]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[__IDX]], [[TMP0]]
; CHECK-NEXT: br i1 [[CMP]], label [[COND_END:%.*]], label [[COND_FALSE:%.*]], !prof [[PROF4]]
; CHECK: cond.false:
; CHECK-NEXT: tail call void @llvm.trap()
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll
index ad100c399c08e..33bcab679ba91 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll
@@ -134,11 +134,11 @@ define void @loop2(ptr %A, ptr %B, ptr %C, float %x) {
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 40000
; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 40000
; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 40000
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP2]], [[B]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[C]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP2]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[C]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
-; CHECK-NEXT: [[BOUND04:%.*]] = icmp ugt ptr [[SCEVGEP3]], [[B]]
-; CHECK-NEXT: [[BOUND15:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]]
+; CHECK-NEXT: [[BOUND04:%.*]] = icmp ult ptr [[B]], [[SCEVGEP3]]
+; CHECK-NEXT: [[BOUND15:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT6:%.*]] = and i1 [[BOUND04]], [[BOUND15]]
; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT6]]
; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label [[LOOP_BODY:%.*]], label [[VECTOR_PH:%.*]]
@@ -158,8 +158,8 @@ define void @loop2(ptr %A, ptr %B, ptr %C, float %x) {
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 16
; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x float>, ptr [[TMP4]], align 4, !alias.scope [[META7:![0-9]+]]
; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x float>, ptr [[TMP5]], align 4, !alias.scope [[META7]]
-; CHECK-NEXT: [[TMP6:%.*]] = fmul <4 x float> [[WIDE_LOAD8]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP7:%.*]] = fmul <4 x float> [[WIDE_LOAD9]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP6:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD8]]
+; CHECK-NEXT: [[TMP7:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD9]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i64 16
; CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, ptr [[TMP8]], align 4, !alias.scope [[META9:![0-9]+]], !noalias [[META11:![0-9]+]]
@@ -181,7 +181,7 @@ define void @loop2(ptr %A, ptr %B, ptr %C, float %x) {
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[C_LV]], 20
; CHECK-NEXT: [[A_GEP_0:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]]
; CHECK-NEXT: [[A_LV_0:%.*]] = load float, ptr [[A_GEP_0]], align 4
-; CHECK-NEXT: [[MUL2_I81_I:%.*]] = fmul float [[A_LV_0]], [[X]]
+; CHECK-NEXT: [[MUL2_I81_I:%.*]] = fmul float [[X]], [[A_LV_0]]
; CHECK-NEXT: [[B_GEP_0:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV1]]
; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_LATCH]], label [[ELSE:%.*]]
; CHECK: else:
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll
index 8fc5189e8bc79..e008bd1bc7ee4 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll
@@ -9,79 +9,31 @@
define i64 @sum_2_at_with_int_conversion(ptr %A, ptr %B, i64 %N) {
; CHECK-LABEL: @sum_2_at_with_int_conversion(
-; CHECK-NEXT: at_with_int_conversion.exit11.peel:
+; CHECK-NEXT: entry:
; CHECK-NEXT: [[START_I:%.*]] = load ptr, ptr [[A:%.*]], align 8
; CHECK-NEXT: [[GEP_END_I:%.*]] = getelementptr i8, ptr [[A]], i64 8
; CHECK-NEXT: [[END_I:%.*]] = load ptr, ptr [[GEP_END_I]], align 8
; CHECK-NEXT: [[START_INT_I:%.*]] = ptrtoint ptr [[START_I]] to i64
; CHECK-NEXT: [[END_INT_I:%.*]] = ptrtoint ptr [[END_I]] to i64
; CHECK-NEXT: [[SUB_I:%.*]] = sub i64 [[END_INT_I]], [[START_INT_I]]
+; CHECK-NEXT: [[START_I1:%.*]] = load ptr, ptr [[B:%.*]], align 8
+; CHECK-NEXT: [[GEP_END_I2:%.*]] = getelementptr i8, ptr [[B]], i64 8
+; CHECK-NEXT: [[END_I3:%.*]] = load ptr, ptr [[GEP_END_I2]], align 8
+; CHECK-NEXT: [[START_INT_I4:%.*]] = ptrtoint ptr [[START_I1]] to i64
+; CHECK-NEXT: [[END_INT_I5:%.*]] = ptrtoint ptr [[END_I3]] to i64
+; CHECK-NEXT: [[SUB_I6:%.*]] = sub i64 [[END_INT_I5]], [[START_INT_I4]]
; CHECK-NEXT: [[SMAX:%.*]] = tail call i64 @llvm.smax.i64(i64 [[N:%.*]], i64 0)
-; CHECK-NEXT: [[GEP_END_I2:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 8
-; CHECK-NEXT: [[START_I1_PEEL:%.*]] = load ptr, ptr [[B]], align 8
-; CHECK-NEXT: [[END_I3_PEEL:%.*]] = load ptr, ptr [[GEP_END_I2]], align 8
-; CHECK-NEXT: [[START_INT_I4_PEEL:%.*]] = ptrtoint ptr [[START_I1_PEEL]] to i64
-; CHECK-NEXT: [[END_INT_I5_PEEL:%.*]] = ptrtoint ptr [[END_I3_PEEL]] to i64
-; CHECK-NEXT: [[SUB_I6_PEEL:%.*]] = sub i64 [[END_INT_I5_PEEL]], [[START_INT_I4_PEEL]]
-; CHECK-NEXT: [[LV_I_PEEL:%.*]] = load i64, ptr [[START_I]], align 8
-; CHECK-NEXT: [[LV_I9_PEEL:%.*]] = load i64, ptr [[START_I1_PEEL]], align 8
-; CHECK-NEXT: [[SUM_NEXT_PEEL:%.*]] = add i64 [[LV_I_PEEL]], [[LV_I9_PEEL]]
-; CHECK-NEXT: [[EXITCOND_PEEL_NOT:%.*]] = icmp slt i64 [[N]], 1
-; CHECK-NEXT: br i1 [[EXITCOND_PEEL_NOT]], label [[EXIT:%.*]], label [[LOOP_PREHEADER:%.*]]
-; CHECK: loop.preheader:
-; CHECK-NEXT: [[TMP0:%.*]] = add nsw i64 [[SMAX]], -1
-; CHECK-NEXT: [[UMIN:%.*]] = tail call i64 @llvm.umin.i64(i64 [[SUB_I6_PEEL]], i64 [[TMP0]])
-; CHECK-NEXT: [[TMP1:%.*]] = freeze i64 [[UMIN]]
-; CHECK-NEXT: [[UMIN15:%.*]] = tail call i64 @llvm.umin.i64(i64 [[TMP1]], i64 [[SUB_I]])
-; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[UMIN15]], 1
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 5
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[LOOP_PREHEADER20:%.*]], label [[VECTOR_PH:%.*]]
-; CHECK: vector.ph:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = and i64 [[TMP2]], 3
-; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i64 4, i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP4]]
-; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[N_VEC]], 1
-; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x i64> <i64 poison, i64 0>, i64 [[SUM_NEXT_PEEL]], i64 0
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ [[TMP5]], [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI16:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[OFFSET_IDX:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[START_I]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP6]], i64 16
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP6]], align 8
-; CHECK-NEXT: [[WIDE_LOAD17:%.*]] = load <2 x i64>, ptr [[TMP7]], align 8
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[START_I1_PEEL]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i64 16
-; CHECK-NEXT: [[WIDE_LOAD18:%.*]] = load <2 x i64>, ptr [[TMP8]], align 8
-; CHECK-NEXT: [[WIDE_LOAD19:%.*]] = load <2 x i64>, ptr [[TMP9]], align 8
-; CHECK-NEXT: [[TMP10:%.*]] = add <2 x i64> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP11:%.*]] = add <2 x i64> [[WIDE_LOAD17]], [[VEC_PHI16]]
-; CHECK-NEXT: [[TMP12]] = add <2 x i64> [[TMP10]], [[WIDE_LOAD18]]
-; CHECK-NEXT: [[TMP13]] = add <2 x i64> [[TMP11]], [[WIDE_LOAD19]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[TMP13]], [[TMP12]]
-; CHECK-NEXT: [[TMP15:%.*]] = tail call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]])
-; CHECK-NEXT: br label [[LOOP_PREHEADER20]]
-; CHECK: loop.preheader20:
-; CHECK-NEXT: [[IV_PH:%.*]] = phi i64 [ 1, [[LOOP_PREHEADER]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: [[SUM_PH:%.*]] = phi i64 [ [[SUM_NEXT_PEEL]], [[LOOP_PREHEADER]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[AT_WITH_INT_CONVERSION_EXIT11:%.*]] ], [ [[IV_PH]], [[LOOP_PREHEADER20]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ [[SUM_NEXT:%.*]], [[AT_WITH_INT_CONVERSION_EXIT11]] ], [ [[SUM_PH]], [[LOOP_PREHEADER20]] ]
-; CHECK-NEXT: [[INRANGE_I:%.*]] = icmp ult i64 [[SUB_I]], [[IV]]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[AT_WITH_INT_CONVERSION_EXIT11:%.*]] ]
+; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[SUM_NEXT:%.*]], [[AT_WITH_INT_CONVERSION_EXIT11]] ]
+; CHECK-NEXT: [[INRANGE_I:%.*]] = icmp ugt i64 [[IV]], [[SUB_I]]
; CHECK-NEXT: br i1 [[INRANGE_I]], label [[ERROR_I:%.*]], label [[AT_WITH_INT_CONVERSION_EXIT:%.*]]
; CHECK: error.i:
; CHECK-NEXT: tail call void @error()
; CHECK-NEXT: unreachable
; CHECK: at_with_int_conversion.exit:
-; CHECK-NEXT: [[INRANGE_I7:%.*]] = icmp ult i64 [[SUB_I6_PEEL]], [[IV]]
+; CHECK-NEXT: [[INRANGE_I7:%.*]] = icmp ugt i64 [[IV]], [[SUB_I6]]
; CHECK-NEXT: br i1 [[INRANGE_I7]], label [[ERROR_I10:%.*]], label [[AT_WITH_INT_CONVERSION_EXIT11]]
; CHECK: error.i10:
; CHECK-NEXT: tail call void @error()
@@ -89,16 +41,15 @@ define i64 @sum_2_at_with_int_conversion(ptr %A, ptr %B, i64 %N) {
; CHECK: at_with_int_conversion.exit11:
; CHECK-NEXT: [[GEP_IDX_I:%.*]] = getelementptr i64, ptr [[START_I]], i64 [[IV]]
; CHECK-NEXT: [[LV_I:%.*]] = load i64, ptr [[GEP_IDX_I]], align 8
-; CHECK-NEXT: [[GEP_IDX_I8:%.*]] = getelementptr i64, ptr [[START_I1_PEEL]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_IDX_I8:%.*]] = getelementptr i64, ptr [[START_I1]], i64 [[IV]]
; CHECK-NEXT: [[LV_I9:%.*]] = load i64, ptr [[GEP_IDX_I8]], align 8
; CHECK-NEXT: [[ADD:%.*]] = add i64 [[LV_I]], [[SUM]]
; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[ADD]], [[LV_I9]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], [[SMAX]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT:%.*]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT_PEEL]], [[AT_WITH_INT_CONVERSION_EXIT11_PEEL:%.*]] ], [ [[SUM_NEXT]], [[AT_WITH_INT_CONVERSION_EXIT11]] ]
-; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i64 [[SUM_NEXT]]
;
entry:
br label %loop
@@ -120,89 +71,31 @@ exit:
define i64 @sum_3_at_with_int_conversion(ptr %A, ptr %B, ptr %C, i64 %N) {
; CHECK-LABEL: @sum_3_at_with_int_conversion(
-; CHECK-NEXT: at_with_int_conversion.exit22.peel:
+; CHECK-NEXT: entry:
; CHECK-NEXT: [[START_I:%.*]] = load ptr, ptr [[A:%.*]], align 8
; CHECK-NEXT: [[GEP_END_I:%.*]] = getelementptr i8, ptr [[A]], i64 8
; CHECK-NEXT: [[END_I:%.*]] = load ptr, ptr [[GEP_END_I]], align 8
; CHECK-NEXT: [[START_INT_I:%.*]] = ptrtoint ptr [[START_I]] to i64
; CHECK-NEXT: [[END_INT_I:%.*]] = ptrtoint ptr [[END_I]] to i64
; CHECK-NEXT: [[SUB_I:%.*]] = sub i64 [[END_INT_I]], [[START_INT_I]]
-; CHECK-NEXT: [[GEP_END_I13:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 8
+; CHECK-NEXT: [[START_I1:%.*]] = load ptr, ptr [[B:%.*]], align 8
+; CHECK-NEXT: [[GEP_END_I2:%.*]] = getelementptr i8, ptr [[B]], i64 8
+; CHECK-NEXT: [[END_I3:%.*]] = load ptr, ptr [[GEP_END_I2]], align 8
+; CHECK-NEXT: [[START_INT_I4:%.*]] = ptrtoint ptr [[START_I1]] to i64
+; CHECK-NEXT: [[END_INT_I5:%.*]] = ptrtoint ptr [[END_I3]] to i64
+; CHECK-NEXT: [[SUB_I6:%.*]] = sub i64 [[END_INT_I5]], [[START_INT_I4]]
+; CHECK-NEXT: [[START_I12:%.*]] = load ptr, ptr [[C:%.*]], align 8
+; CHECK-NEXT: [[GEP_END_I13:%.*]] = getelementptr i8, ptr [[C]], i64 8
+; CHECK-NEXT: [[END_I14:%.*]] = load ptr, ptr [[GEP_END_I13]], align 8
+; CHECK-NEXT: [[START_INT_I15:%.*]] = ptrtoint ptr [[START_I12]] to i64
+; CHECK-NEXT: [[END_INT_I16:%.*]] = ptrtoint ptr [[END_I14]] to i64
+; CHECK-NEXT: [[SUB_I17:%.*]] = sub i64 [[END_INT_I16]], [[START_INT_I15]]
; CHECK-NEXT: [[SMAX:%.*]] = tail call i64 @llvm.smax.i64(i64 [[N:%.*]], i64 0)
-; CHECK-NEXT: [[GEP_END_I2:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 8
-; CHECK-NEXT: [[LV_I_PEEL:%.*]] = load i64, ptr [[START_I]], align 8
-; CHECK-NEXT: [[START_I1_PEEL:%.*]] = load ptr, ptr [[B]], align 8
-; CHECK-NEXT: [[END_I3_PEEL:%.*]] = load ptr, ptr [[GEP_END_I2]], align 8
-; CHECK-NEXT: [[START_INT_I4_PEEL:%.*]] = ptrtoint ptr [[START_I1_PEEL]] to i64
-; CHECK-NEXT: [[END_I3_PEEL_FR:%.*]] = freeze ptr [[END_I3_PEEL]]
-; CHECK-NEXT: [[END_INT_I5_PEEL:%.*]] = ptrtoint ptr [[END_I3_PEEL_FR]] to i64
-; CHECK-NEXT: [[SUB_I6_PEEL:%.*]] = sub i64 [[END_INT_I5_PEEL]], [[START_INT_I4_PEEL]]
-; CHECK-NEXT: [[START_I12_PEEL:%.*]] = load ptr, ptr [[C]], align 8
-; CHECK-NEXT: [[END_I14_PEEL:%.*]] = load ptr, ptr [[GEP_END_I13]], align 8
-; CHECK-NEXT: [[START_INT_I15_PEEL:%.*]] = ptrtoint ptr [[START_I12_PEEL]] to i64
-; CHECK-NEXT: [[END_INT_I16_PEEL:%.*]] = ptrtoint ptr [[END_I14_PEEL]] to i64
-; CHECK-NEXT: [[SUB_I17_PEEL:%.*]] = sub i64 [[END_INT_I16_PEEL]], [[START_INT_I15_PEEL]]
-; CHECK-NEXT: [[LV_I9_PEEL:%.*]] = load i64, ptr [[START_I1_PEEL]], align 8
-; CHECK-NEXT: [[LV_I20_PEEL:%.*]] = load i64, ptr [[START_I12_PEEL]], align 8
-; CHECK-NEXT: [[ADD_2_PEEL:%.*]] = add i64 [[LV_I_PEEL]], [[LV_I9_PEEL]]
-; CHECK-NEXT: [[SUM_NEXT_PEEL:%.*]] = add i64 [[ADD_2_PEEL]], [[LV_I20_PEEL]]
-; CHECK-NEXT: [[EXITCOND_PEEL_NOT:%.*]] = icmp slt i64 [[N]], 1
-; CHECK-NEXT: br i1 [[EXITCOND_PEEL_NOT]], label [[EXIT:%.*]], label [[LOOP_PREHEADER:%.*]]
-; CHECK: loop.preheader:
-; CHECK-NEXT: [[TMP0:%.*]] = add nsw i64 [[SMAX]], -1
-; CHECK-NEXT: [[UMIN:%.*]] = tail call i64 @llvm.umin.i64(i64 [[SUB_I17_PEEL]], i64 [[TMP0]])
-; CHECK-NEXT: [[TMP1:%.*]] = freeze i64 [[UMIN]]
-; CHECK-NEXT: [[UMIN26:%.*]] = tail call i64 @llvm.umin.i64(i64 [[TMP1]], i64 [[SUB_I6_PEEL]])
-; CHECK-NEXT: [[UMIN27:%.*]] = tail call i64 @llvm.umin.i64(i64 [[UMIN26]], i64 [[SUB_I]])
-; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[UMIN27]], 1
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 5
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[LOOP_PREHEADER34:%.*]], label [[VECTOR_PH:%.*]]
-; CHECK: vector.ph:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = and i64 [[TMP2]], 3
-; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i64 4, i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP4]]
-; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[N_VEC]], 1
-; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x i64> <i64 poison, i64 0>, i64 [[SUM_NEXT_PEEL]], i64 0
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ [[TMP5]], [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI28:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[OFFSET_IDX:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[START_I]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP6]], i64 16
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP6]], align 8
-; CHECK-NEXT: [[WIDE_LOAD29:%.*]] = load <2 x i64>, ptr [[TMP7]], align 8
-; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[START_I1_PEEL]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i64 16
-; CHECK-NEXT: [[WIDE_LOAD30:%.*]] = load <2 x i64>, ptr [[TMP8]], align 8
-; CHECK-NEXT: [[WIDE_LOAD31:%.*]] = load <2 x i64>, ptr [[TMP9]], align 8
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[START_I12_PEEL]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP10]], i64 16
-; CHECK-NEXT: [[WIDE_LOAD32:%.*]] = load <2 x i64>, ptr [[TMP10]], align 8
-; CHECK-NEXT: [[WIDE_LOAD33:%.*]] = load <2 x i64>, ptr [[TMP11]], align 8
-; CHECK-NEXT: [[TMP12:%.*]] = add <2 x i64> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP13:%.*]] = add <2 x i64> [[WIDE_LOAD29]], [[VEC_PHI28]]
-; CHECK-NEXT: [[TMP14:%.*]] = add <2 x i64> [[TMP12]], [[WIDE_LOAD30]]
-; CHECK-NEXT: [[TMP15:%.*]] = add <2 x i64> [[TMP13]], [[WIDE_LOAD31]]
-; CHECK-NEXT: [[TMP16]] = add <2 x i64> [[TMP14]], [[WIDE_LOAD32]]
-; CHECK-NEXT: [[TMP17]] = add <2 x i64> [[TMP15]], [[WIDE_LOAD33]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[TMP17]], [[TMP16]]
-; CHECK-NEXT: [[TMP19:%.*]] = tail call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]])
-; CHECK-NEXT: br label [[LOOP_PREHEADER34]]
-; CHECK: loop.preheader34:
-; CHECK-NEXT: [[IV_PH:%.*]] = phi i64 [ 1, [[LOOP_PREHEADER]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ]
-; CHECK-NEXT: [[SUM_PH:%.*]] = phi i64 [ [[SUM_NEXT_PEEL]], [[LOOP_PREHEADER]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[AT_WITH_INT_CONVERSION_EXIT22:%.*]] ], [ [[IV_PH]], [[LOOP_PREHEADER34]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ [[SUM_NEXT:%.*]], [[AT_WITH_INT_CONVERSION_EXIT22]] ], [ [[SUM_PH]], [[LOOP_PREHEADER34]] ]
-; CHECK-NEXT: [[INRANGE_I:%.*]] = icmp ult i64 [[SUB_I]], [[IV]]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[AT_WITH_INT_CONVERSION_EXIT22:%.*]] ]
+; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[SUM_NEXT:%.*]], [[AT_WITH_INT_CONVERSION_EXIT22]] ]
+; CHECK-NEXT: [[INRANGE_I:%.*]] = icmp ugt i64 [[IV]], [[SUB_I]]
; CHECK-NEXT: br i1 [[INRANGE_I]], label [[ERROR_I:%.*]], label [[AT_WITH_INT_CONVERSION_EXIT:%.*]]
; CHECK: error.i:
; CHECK-NEXT: tail call void @error()
@@ -210,31 +103,30 @@ define i64 @sum_3_at_with_int_conversion(ptr %A, ptr %B, ptr %C, i64 %N) {
; CHECK: at_with_int_conversion.exit:
; CHECK-NEXT: [[GEP_IDX_I:%.*]] = getelementptr i64, ptr [[START_I]], i64 [[IV]]
; CHECK-NEXT: [[LV_I:%.*]] = load i64, ptr [[GEP_IDX_I]], align 8
-; CHECK-NEXT: [[INRANGE_I7:%.*]] = icmp ult i64 [[SUB_I6_PEEL]], [[IV]]
+; CHECK-NEXT: [[INRANGE_I7:%.*]] = icmp ugt i64 [[IV]], [[SUB_I6]]
; CHECK-NEXT: br i1 [[INRANGE_I7]], label [[ERROR_I10:%.*]], label [[AT_WITH_INT_CONVERSION_EXIT11:%.*]]
; CHECK: error.i10:
; CHECK-NEXT: tail call void @error()
; CHECK-NEXT: unreachable
; CHECK: at_with_int_conversion.exit11:
-; CHECK-NEXT: [[INRANGE_I18:%.*]] = icmp ult i64 [[SUB_I17_PEEL]], [[IV]]
+; CHECK-NEXT: [[INRANGE_I18:%.*]] = icmp ugt i64 [[IV]], [[SUB_I17]]
; CHECK-NEXT: br i1 [[INRANGE_I18]], label [[ERROR_I21:%.*]], label [[AT_WITH_INT_CONVERSION_EXIT22]]
; CHECK: error.i21:
; CHECK-NEXT: tail call void @error()
; CHECK-NEXT: unreachable
; CHECK: at_with_int_conversion.exit22:
-; CHECK-NEXT: [[GEP_IDX_I8:%.*]] = getelementptr i64, ptr [[START_I1_PEEL]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_IDX_I8:%.*]] = getelementptr i64, ptr [[START_I1]], i64 [[IV]]
; CHECK-NEXT: [[LV_I9:%.*]] = load i64, ptr [[GEP_IDX_I8]], align 8
-; CHECK-NEXT: [[GEP_IDX_I19:%.*]] = getelementptr i64, ptr [[START_I12_PEEL]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_IDX_I19:%.*]] = getelementptr i64, ptr [[START_I12]], i64 [[IV]]
; CHECK-NEXT: [[LV_I20:%.*]] = load i64, ptr [[GEP_IDX_I19]], align 8
; CHECK-NEXT: [[ADD_1:%.*]] = add i64 [[LV_I]], [[SUM]]
; CHECK-NEXT: [[ADD_2:%.*]] = add i64 [[ADD_1]], [[LV_I9]]
; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[ADD_2]], [[LV_I20]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], [[SMAX]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT:%.*]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT_PEEL]], [[AT_WITH_INT_CONVERSION_EXIT22_PEEL:%.*]] ], [ [[SUM_NEXT]], [[AT_WITH_INT_CONVERSION_EXIT22]] ]
-; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]]
+; CHECK-NEXT: ret i64 [[SUM_NEXT]]
;
entry:
br label %loop
@@ -265,7 +157,7 @@ define i64 @at_with_int_conversion(ptr %ptr, i64 %idx) {
; CHECK-NEXT: [[START_INT:%.*]] = ptrtoint ptr [[START]] to i64
; CHECK-NEXT: [[END_INT:%.*]] = ptrtoint ptr [[END]] to i64
; CHECK-NEXT: [[SUB:%.*]] = sub i64 [[END_INT]], [[START_INT]]
-; CHECK-NEXT: [[INRANGE:%.*]] = icmp ult i64 [[SUB]], [[IDX:%.*]]
+; CHECK-NEXT: [[INRANGE:%.*]] = icmp ugt i64 [[IDX:%.*]], [[SUB]]
; CHECK-NEXT: br i1 [[INRANGE]], label [[ERROR:%.*]], label [[EXIT:%.*]]
; CHECK: exit:
; CHECK-NEXT: [[GEP_IDX:%.*]] = getelementptr i64, ptr [[START]], i64 [[IDX]]
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll
index c133852f66937..b53d0c211919b 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll
@@ -13,11 +13,11 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DCT]], i64 32
; CHECK-NEXT: [[SCEVGEP23:%.*]] = getelementptr i8, ptr [[BIAS]], i64 32
; CHECK-NEXT: [[SCEVGEP24:%.*]] = getelementptr i8, ptr [[MF]], i64 32
-; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP23]], [[DCT]]
-; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[BIAS]]
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[DCT]], [[SCEVGEP23]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[BIAS]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
-; CHECK-NEXT: [[BOUND025:%.*]] = icmp ugt ptr [[SCEVGEP24]], [[DCT]]
-; CHECK-NEXT: [[BOUND126:%.*]] = icmp ugt ptr [[SCEVGEP]], [[MF]]
+; CHECK-NEXT: [[BOUND025:%.*]] = icmp ult ptr [[DCT]], [[SCEVGEP24]]
+; CHECK-NEXT: [[BOUND126:%.*]] = icmp ult ptr [[MF]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT27:%.*]] = and i1 [[BOUND025]], [[BOUND126]]
; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT27]]
; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY:%.*]]
diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll
index c6126727598ef..6e9abb3813aa1 100644
--- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll
+++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll
@@ -15,7 +15,7 @@ define void @arm_mult_q15(ptr %pSrcA, ptr %pSrcB, ptr noalias %pDst, i32 %blockS
; CHECK-NEXT: br i1 [[CMP_NOT2]], label [[WHILE_END:%.*]], label [[WHILE_BODY_PREHEADER:%.*]]
; CHECK: while.body.preheader:
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[BLOCKSIZE]], 8
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[WHILE_BODY_PREHEADER16:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[WHILE_BODY_PREHEADER18:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[BLOCKSIZE]], -8
; CHECK-NEXT: [[IND_END:%.*]] = and i32 [[BLOCKSIZE]], 7
@@ -28,27 +28,27 @@ define void @arm_mult_q15(ptr %pSrcA, ptr %pSrcB, ptr noalias %pDst, i32 %blockS
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = shl i32 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRCA]], i32 [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = shl i32 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP13:%.*]] = getelementptr i8, ptr [[PDST]], i32 [[TMP4]]
-; CHECK-NEXT: [[TMP5:%.*]] = shl i32 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP14:%.*]] = getelementptr i8, ptr [[PSRCB]], i32 [[TMP5]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 1
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRCA]], i32 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX13:%.*]] = shl i32 [[INDEX]], 1
+; CHECK-NEXT: [[NEXT_GEP14:%.*]] = getelementptr i8, ptr [[PDST]], i32 [[OFFSET_IDX13]]
+; CHECK-NEXT: [[OFFSET_IDX15:%.*]] = shl i32 [[INDEX]], 1
+; CHECK-NEXT: [[NEXT_GEP16:%.*]] = getelementptr i8, ptr [[PSRCB]], i32 [[OFFSET_IDX15]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2
-; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i32>
-; CHECK-NEXT: [[WIDE_LOAD15:%.*]] = load <8 x i16>, ptr [[NEXT_GEP14]], align 2
-; CHECK-NEXT: [[TMP7:%.*]] = sext <8 x i16> [[WIDE_LOAD15]] to <8 x i32>
-; CHECK-NEXT: [[TMP8:%.*]] = mul nsw <8 x i32> [[TMP7]], [[TMP6]]
-; CHECK-NEXT: [[TMP9:%.*]] = ashr <8 x i32> [[TMP8]], <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
-; CHECK-NEXT: [[TMP10:%.*]] = tail call <8 x i32> @llvm.smin.v8i32(<8 x i32> [[TMP9]], <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>)
-; CHECK-NEXT: [[TMP11:%.*]] = trunc <8 x i32> [[TMP10]] to <8 x i16>
-; CHECK-NEXT: store <8 x i16> [[TMP11]], ptr [[NEXT_GEP13]], align 2
+; CHECK-NEXT: [[TMP3:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i32>
+; CHECK-NEXT: [[WIDE_LOAD17:%.*]] = load <8 x i16>, ptr [[NEXT_GEP16]], align 2
+; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i16> [[WIDE_LOAD17]] to <8 x i32>
+; CHECK-NEXT: [[TMP5:%.*]] = mul nsw <8 x i32> [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = ashr <8 x i32> [[TMP5]], <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+; CHECK-NEXT: [[TMP7:%.*]] = tail call <8 x i32> @llvm.smin.v8i32(<8 x i32> [[TMP6]], <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>)
+; CHECK-NEXT: [[TMP8:%.*]] = trunc <8 x i32> [[TMP7]] to <8 x i16>
+; CHECK-NEXT: store <8 x i16> [[TMP8]], ptr [[NEXT_GEP14]], align 2
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[BLOCKSIZE]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END]], label [[WHILE_BODY_PREHEADER16]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[BLOCKSIZE]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END]], label [[WHILE_BODY_PREHEADER18]]
; CHECK: while.body.preheader18:
; CHECK-NEXT: [[BLKCNT_06_PH:%.*]] = phi i32 [ [[BLOCKSIZE]], [[WHILE_BODY_PREHEADER]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: [[PSRCA_ADDR_05_PH:%.*]] = phi ptr [ [[PSRCA]], [[WHILE_BODY_PREHEADER]] ], [ [[IND_END7]], [[MIDDLE_BLOCK]] ]
@@ -56,16 +56,16 @@ define void @arm_mult_q15(ptr %pSrcA, ptr %pSrcB, ptr noalias %pDst, i32 %blockS
; CHECK-NEXT: [[PSRCB_ADDR_03_PH:%.*]] = phi ptr [ [[PSRCB]], [[WHILE_BODY_PREHEADER]] ], [ [[IND_END11]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK: while.body:
-; CHECK-NEXT: [[BLKCNT_06:%.*]] = phi i32 [ [[DEC:%.*]], [[WHILE_BODY]] ], [ [[BLKCNT_06_PH]], [[WHILE_BODY_PREHEADER16]] ]
-; CHECK-NEXT: [[PSRCA_ADDR_05:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[WHILE_BODY]] ], [ [[PSRCA_ADDR_05_PH]], [[WHILE_BODY_PREHEADER16]] ]
-; CHECK-NEXT: [[PDST_ADDR_04:%.*]] = phi ptr [ [[INCDEC_PTR4:%.*]], [[WHILE_BODY]] ], [ [[PDST_ADDR_04_PH]], [[WHILE_BODY_PREHEADER16]] ]
-; CHECK-NEXT: [[PSRCB_ADDR_03:%.*]] = phi ptr [ [[INCDEC_PTR1:%.*]], [[WHILE_BODY]] ], [ [[PSRCB_ADDR_03_PH]], [[WHILE_BODY_PREHEADER16]] ]
+; CHECK-NEXT: [[BLKCNT_06:%.*]] = phi i32 [ [[DEC:%.*]], [[WHILE_BODY]] ], [ [[BLKCNT_06_PH]], [[WHILE_BODY_PREHEADER18]] ]
+; CHECK-NEXT: [[PSRCA_ADDR_05:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[WHILE_BODY]] ], [ [[PSRCA_ADDR_05_PH]], [[WHILE_BODY_PREHEADER18]] ]
+; CHECK-NEXT: [[PDST_ADDR_04:%.*]] = phi ptr [ [[INCDEC_PTR4:%.*]], [[WHILE_BODY]] ], [ [[PDST_ADDR_04_PH]], [[WHILE_BODY_PREHEADER18]] ]
+; CHECK-NEXT: [[PSRCB_ADDR_03:%.*]] = phi ptr [ [[INCDEC_PTR1:%.*]], [[WHILE_BODY]] ], [ [[PSRCB_ADDR_03_PH]], [[WHILE_BODY_PREHEADER18]] ]
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[PSRCA_ADDR_05]], i32 2
-; CHECK-NEXT: [[TMP13:%.*]] = load i16, ptr [[PSRCA_ADDR_05]], align 2
-; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP13]] to i32
+; CHECK-NEXT: [[TMP10:%.*]] = load i16, ptr [[PSRCA_ADDR_05]], align 2
+; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32
; CHECK-NEXT: [[INCDEC_PTR1]] = getelementptr inbounds i8, ptr [[PSRCB_ADDR_03]], i32 2
-; CHECK-NEXT: [[TMP14:%.*]] = load i16, ptr [[PSRCB_ADDR_03]], align 2
-; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[TMP14]] to i32
+; CHECK-NEXT: [[TMP11:%.*]] = load i16, ptr [[PSRCB_ADDR_03]], align 2
+; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[TMP11]] to i32
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV2]], [[CONV]]
; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[MUL]], 15
; CHECK-NEXT: [[SPEC_SELECT_I:%.*]] = tail call i32 @llvm.smin.i32(i32 [[SHR]], i32 32767)
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll b/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll
index f0a1846389580..af68a34587450 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll
@@ -46,7 +46,7 @@ define dso_local void @_Z7computeRSt6vectorIiSaIiEEy(ptr noundef nonnull align 8
; O2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DATA]], align 8
; O2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUMELEMS]], 8
; O2-NEXT: [[N_VEC:%.*]] = and i64 [[NUMELEMS]], -8
-; O2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[NUMELEMS]]
+; O2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[NUMELEMS]], [[N_VEC]]
; O2-NEXT: br label [[FOR_COND1_PREHEADER:%.*]]
; O2: for.cond1.preheader:
; O2-NEXT: [[I_06:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC7:%.*]], [[FOR_COND_CLEANUP3:%.*]] ]
@@ -96,7 +96,7 @@ define dso_local void @_Z7computeRSt6vectorIiSaIiEEy(ptr noundef nonnull align 8
; O3: for.cond1.preheader.us.preheader:
; O3-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUMELEMS]], 8
; O3-NEXT: [[N_VEC:%.*]] = and i64 [[NUMELEMS]], -8
-; O3-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[NUMELEMS]]
+; O3-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[NUMELEMS]], [[N_VEC]]
; O3-NEXT: br label [[FOR_COND1_PREHEADER_US:%.*]]
; O3: for.cond1.preheader.us:
; O3-NEXT: [[I_06_US:%.*]] = phi i64 [ [[INC7_US:%.*]], [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US:%.*]] ], [ 0, [[FOR_COND1_PREHEADER_US_PREHEADER]] ]
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/speculation-vs-tbaa.ll b/llvm/test/Transforms/PhaseOrdering/X86/speculation-vs-tbaa.ll
index c5deb716d8030..5bf7be4362a8e 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/speculation-vs-tbaa.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/speculation-vs-tbaa.ll
@@ -47,7 +47,7 @@ define void @licm(ptr align 8 dereferenceable(8) %_M_start.i, i64 %numElem) {
; O23-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; O23-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; O23: middle.block:
-; O23-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[NUMELEM]]
+; O23-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[NUMELEM]], [[N_VEC]]
; O23-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY_PREHEADER]]
; O23: for.body.preheader:
; O23-NEXT: [[K_02_PH:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[N_VEC]], [[MIDDLE_BLOCK]] ]
diff --git a/llvm/test/Transforms/PhaseOrdering/fast-basictest.ll b/llvm/test/Transforms/PhaseOrdering/fast-basictest.ll
index 0127f05022d71..ec217a9cd31c6 100644
--- a/llvm/test/Transforms/PhaseOrdering/fast-basictest.ll
+++ b/llvm/test/Transforms/PhaseOrdering/fast-basictest.ll
@@ -139,7 +139,7 @@ define float @test15_reassoc_nsz(float %b, float %a) {
define float @test15_reassoc(float %b, float %a) {
; CHECK-LABEL: @test15_reassoc(
; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc float [[A:%.*]], 1.234000e+03
-; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc float [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc float [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = fsub reassoc float 0.000000e+00, [[A]]
; CHECK-NEXT: [[TMP4:%.*]] = fadd reassoc float [[TMP3]], [[TMP2]]
; CHECK-NEXT: ret float [[TMP4]]
diff --git a/llvm/test/Transforms/PhaseOrdering/reassociate-instcombine.ll b/llvm/test/Transforms/PhaseOrdering/reassociate-instcombine.ll
index 7e958e8906c9a..13aeb9e64fc3f 100644
--- a/llvm/test/Transforms/PhaseOrdering/reassociate-instcombine.ll
+++ b/llvm/test/Transforms/PhaseOrdering/reassociate-instcombine.ll
@@ -8,7 +8,7 @@ define i4 @not_reassociate_and_and_not(i4 %a, i4 %b, i4 %c, i4 %d) {
; CHECK-LABEL: @not_reassociate_and_and_not(
; CHECK-NEXT: [[TMP1:%.*]] = or i4 [[B:%.*]], [[C:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = xor i4 [[TMP1]], -1
-; CHECK-NEXT: [[AND2:%.*]] = and i4 [[TMP2]], [[A:%.*]]
+; CHECK-NEXT: [[AND2:%.*]] = and i4 [[A:%.*]], [[TMP2]]
; CHECK-NEXT: [[AND3:%.*]] = and i4 [[AND2]], [[D:%.*]]
; CHECK-NEXT: ret i4 [[AND3]]
;
@@ -25,7 +25,7 @@ define i32 @not_reassociate_or_or_not(i32 %a, i32 %b, i32 %c, i32 %d) {
; CHECK-LABEL: @not_reassociate_or_or_not(
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[B:%.*]], [[C:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], -1
-; CHECK-NEXT: [[B2:%.*]] = or i32 [[TMP2]], [[A:%.*]]
+; CHECK-NEXT: [[B2:%.*]] = or i32 [[A:%.*]], [[TMP2]]
; CHECK-NEXT: [[B3:%.*]] = or i32 [[B2]], [[D:%.*]]
; CHECK-NEXT: ret i32 [[B3]]
;
diff --git a/llvm/test/Transforms/PhaseOrdering/runtime-check-removal.ll b/llvm/test/Transforms/PhaseOrdering/runtime-check-removal.ll
index 89095048f2249..2933249782f44 100644
--- a/llvm/test/Transforms/PhaseOrdering/runtime-check-removal.ll
+++ b/llvm/test/Transforms/PhaseOrdering/runtime-check-removal.ll
@@ -10,7 +10,7 @@ define void @test_remove_check_with_incrementing_integer_induction(i16 %start, i
; CHECK-LABEL: @test_remove_check_with_incrementing_integer_induction(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LEN:%.*]] = zext i8 [[LEN_N:%.*]] to i16
-; CHECK-NEXT: [[LEN_NEG_NOT:%.*]] = icmp ult i16 [[LEN]], [[A:%.*]]
+; CHECK-NEXT: [[LEN_NEG_NOT:%.*]] = icmp ugt i16 [[A:%.*]], [[LEN]]
; CHECK-NEXT: [[C1:%.*]] = icmp ne i8 [[LEN_N]], 0
; CHECK-NEXT: [[OR_COND3:%.*]] = and i1 [[LEN_NEG_NOT]], [[C1]]
; CHECK-NEXT: br i1 [[OR_COND3]], label [[LOOP_LATCH_PREHEADER:%.*]], label [[EXIT:%.*]]
diff --git a/llvm/test/Transforms/Reassociate/fast-ArrayOutOfBounds.ll b/llvm/test/Transforms/Reassociate/fast-ArrayOutOfBounds.ll
index 6dc7b89a9b186..d629ce15c1c92 100644
--- a/llvm/test/Transforms/Reassociate/fast-ArrayOutOfBounds.ll
+++ b/llvm/test/Transforms/Reassociate/fast-ArrayOutOfBounds.ll
@@ -6,14 +6,14 @@ define float @test1(float %a0, float %a1, float %a2, float %a3, float %a4) {
; CHECK-LABEL: define float @test1(
; CHECK-SAME: float [[A0:%.*]], float [[A1:%.*]], float [[A2:%.*]], float [[A3:%.*]], float [[A4:%.*]]) {
; CHECK-NEXT: [[TMP_2:%.*]] = fadd float [[A3]], [[A4]]
-; CHECK-NEXT: [[TMP_4:%.*]] = fadd float [[TMP_2]], [[A2]]
-; CHECK-NEXT: [[TMP_6:%.*]] = fadd float [[TMP_4]], [[A1]]
-; CHECK-NEXT: [[TMP_8:%.*]] = fadd float [[TMP_6]], [[A0]]
+; CHECK-NEXT: [[TMP_4:%.*]] = fadd float [[A2]], [[TMP_2]]
+; CHECK-NEXT: [[TMP_6:%.*]] = fadd float [[A1]], [[TMP_4]]
+; CHECK-NEXT: [[TMP_8:%.*]] = fadd float [[A0]], [[TMP_6]]
; CHECK-NEXT: [[TMP_11:%.*]] = fadd float [[A2]], [[A3]]
-; CHECK-NEXT: [[TMP_13:%.*]] = fadd float [[TMP_11]], [[A1]]
-; CHECK-NEXT: [[TMP_15:%.*]] = fadd float [[TMP_13]], [[A0]]
+; CHECK-NEXT: [[TMP_13:%.*]] = fadd float [[A1]], [[TMP_11]]
+; CHECK-NEXT: [[TMP_15:%.*]] = fadd float [[A0]], [[TMP_13]]
; CHECK-NEXT: [[TMP_18:%.*]] = fadd float [[A1]], [[A2]]
-; CHECK-NEXT: [[TMP_20:%.*]] = fadd float [[TMP_18]], [[A0]]
+; CHECK-NEXT: [[TMP_20:%.*]] = fadd float [[A0]], [[TMP_18]]
; CHECK-NEXT: [[TMP_23:%.*]] = fadd float [[A0]], [[A1]]
; CHECK-NEXT: [[TMP_26:%.*]] = fsub float [[TMP_8]], [[TMP_15]]
; CHECK-NEXT: [[TMP_28:%.*]] = fadd float [[TMP_20]], [[TMP_26]]
diff --git a/llvm/test/Transforms/Reassociate/fast-SubReassociate.ll b/llvm/test/Transforms/Reassociate/fast-SubReassociate.ll
index 5152201ea7c93..2d6f67bbaff6a 100644
--- a/llvm/test/Transforms/Reassociate/fast-SubReassociate.ll
+++ b/llvm/test/Transforms/Reassociate/fast-SubReassociate.ll
@@ -33,8 +33,8 @@ define float @test2(float %A, float %B) {
; Both 'reassoc' and 'nsz' are required.
define float @test2_minimal(float %A, float %B) {
; CHECK-LABEL: @test2_minimal(
-; CHECK-NEXT: [[TMP1:%.*]] = fsub reassoc nsz float [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: ret float [[TMP1]]
+; CHECK-NEXT: [[Z:%.*]] = fsub reassoc nsz float [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: ret float [[Z]]
;
%W = fadd reassoc nsz float %B, 5.000000e+00
%X = fadd reassoc nsz float %A, -7.000000e+00
@@ -81,7 +81,7 @@ define float @test3(float %A, float %B, float %C, float %D) {
define float @test4(float %A, float %B, float %C, float %D) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: [[TMP1:%.*]] = fadd fast float [[B:%.*]], [[A:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = fadd fast float [[TMP1]], [[C:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = fadd fast float [[C:%.*]], [[TMP1]]
; CHECK-NEXT: [[Q:%.*]] = fsub fast float [[D:%.*]], [[TMP2]]
; CHECK-NEXT: ret float [[Q]]
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/cmp_commute-inseltpoison.ll b/llvm/test/Transforms/SLPVectorizer/X86/cmp_commute-inseltpoison.ll
index 997b8ac8add32..fd5f09bf2adc0 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/cmp_commute-inseltpoison.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/cmp_commute-inseltpoison.ll
@@ -8,9 +8,9 @@
define <4 x i32> @icmp_eq_v4i32(<4 x i32> %a, ptr %b) {
; CHECK-LABEL: @icmp_eq_v4i32(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <4 x i32> [[TMP2]], [[A:%.*]]
-; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[R]]
;
%a0 = extractelement <4 x i32> %a, i32 0
@@ -38,9 +38,9 @@ define <4 x i32> @icmp_eq_v4i32(<4 x i32> %a, ptr %b) {
define <4 x i32> @icmp_ne_v4i32(<4 x i32> %a, ptr %b) {
; CHECK-LABEL: @icmp_ne_v4i32(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], [[A:%.*]]
-; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[R]]
;
%a0 = extractelement <4 x i32> %a, i32 0
@@ -68,9 +68,9 @@ define <4 x i32> @icmp_ne_v4i32(<4 x i32> %a, ptr %b) {
define <4 x i32> @fcmp_oeq_v4i32(<4 x float> %a, ptr %b) {
; CHECK-LABEL: @fcmp_oeq_v4i32(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = fcmp oeq <4 x float> [[TMP2]], [[A:%.*]]
-; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fcmp oeq <4 x float> [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[R]]
;
%a0 = extractelement <4 x float> %a, i32 0
@@ -98,9 +98,9 @@ define <4 x i32> @fcmp_oeq_v4i32(<4 x float> %a, ptr %b) {
define <4 x i32> @fcmp_uno_v4i32(<4 x float> %a, ptr %b) {
; CHECK-LABEL: @fcmp_uno_v4i32(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = fcmp uno <4 x float> [[TMP2]], [[A:%.*]]
-; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fcmp uno <4 x float> [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[R]]
;
%a0 = extractelement <4 x float> %a, i32 0
@@ -132,9 +132,9 @@ define <4 x i32> @fcmp_uno_v4i32(<4 x float> %a, ptr %b) {
define <4 x i32> @icmp_sgt_slt_v4i32(<4 x i32> %a, ptr %b) {
; CHECK-LABEL: @icmp_sgt_slt_v4i32(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = icmp slt <4 x i32> [[TMP2]], [[A:%.*]]
-; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <4 x i32> [[A:%.*]], [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[R]]
;
%a0 = extractelement <4 x i32> %a, i32 0
@@ -162,9 +162,9 @@ define <4 x i32> @icmp_sgt_slt_v4i32(<4 x i32> %a, ptr %b) {
define <4 x i32> @icmp_uge_ule_v4i32(<4 x i32> %a, ptr %b) {
; CHECK-LABEL: @icmp_uge_ule_v4i32(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = icmp ule <4 x i32> [[TMP2]], [[A:%.*]]
-; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge <4 x i32> [[A:%.*]], [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[R]]
;
%a0 = extractelement <4 x i32> %a, i32 0
@@ -192,9 +192,9 @@ define <4 x i32> @icmp_uge_ule_v4i32(<4 x i32> %a, ptr %b) {
define <4 x i32> @fcmp_ogt_olt_v4i32(<4 x float> %a, ptr %b) {
; CHECK-LABEL: @fcmp_ogt_olt_v4i32(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = fcmp olt <4 x float> [[TMP2]], [[A:%.*]]
-; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <4 x float> [[A:%.*]], [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[R]]
;
%a0 = extractelement <4 x float> %a, i32 0
@@ -222,11 +222,11 @@ define <4 x i32> @fcmp_ogt_olt_v4i32(<4 x float> %a, ptr %b) {
define <4 x i32> @fcmp_ord_uno_v4i32(<4 x float> %a, ptr %b) {
; CHECK-LABEL: @fcmp_ord_uno_v4i32(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = fcmp ord <4 x float> [[TMP2]], [[A:%.*]]
-; CHECK-NEXT: [[TMP4:%.*]] = fcmp uno <4 x float> [[TMP2]], [[A]]
-; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i1> [[TMP3]], <4 x i1> [[TMP4]], <4 x i32> <i32 0, i32 5, i32 6, i32 3>
-; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP5]] to <4 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fcmp ord <4 x float> [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = fcmp uno <4 x float> [[TMP1]], [[A]]
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i1> [[TMP2]], <4 x i1> [[TMP3]], <4 x i32> <i32 0, i32 5, i32 6, i32 3>
+; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP4]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[R]]
;
%a0 = extractelement <4 x float> %a, i32 0
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/cmp_commute.ll b/llvm/test/Transforms/SLPVectorizer/X86/cmp_commute.ll
index 29cf66a1ea656..35619d6d3ad1d 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/cmp_commute.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/cmp_commute.ll
@@ -8,9 +8,9 @@
define <4 x i32> @icmp_eq_v4i32(<4 x i32> %a, ptr %b) {
; CHECK-LABEL: @icmp_eq_v4i32(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <4 x i32> [[TMP2]], [[A:%.*]]
-; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[R]]
;
%a0 = extractelement <4 x i32> %a, i32 0
@@ -38,9 +38,9 @@ define <4 x i32> @icmp_eq_v4i32(<4 x i32> %a, ptr %b) {
define <4 x i32> @icmp_ne_v4i32(<4 x i32> %a, ptr %b) {
; CHECK-LABEL: @icmp_ne_v4i32(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], [[A:%.*]]
-; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[R]]
;
%a0 = extractelement <4 x i32> %a, i32 0
@@ -68,9 +68,9 @@ define <4 x i32> @icmp_ne_v4i32(<4 x i32> %a, ptr %b) {
define <4 x i32> @fcmp_oeq_v4i32(<4 x float> %a, ptr %b) {
; CHECK-LABEL: @fcmp_oeq_v4i32(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = fcmp oeq <4 x float> [[TMP2]], [[A:%.*]]
-; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fcmp oeq <4 x float> [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[R]]
;
%a0 = extractelement <4 x float> %a, i32 0
@@ -98,9 +98,9 @@ define <4 x i32> @fcmp_oeq_v4i32(<4 x float> %a, ptr %b) {
define <4 x i32> @fcmp_uno_v4i32(<4 x float> %a, ptr %b) {
; CHECK-LABEL: @fcmp_uno_v4i32(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = fcmp uno <4 x float> [[TMP2]], [[A:%.*]]
-; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fcmp uno <4 x float> [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[R]]
;
%a0 = extractelement <4 x float> %a, i32 0
@@ -132,9 +132,9 @@ define <4 x i32> @fcmp_uno_v4i32(<4 x float> %a, ptr %b) {
define <4 x i32> @icmp_sgt_slt_v4i32(<4 x i32> %a, ptr %b) {
; CHECK-LABEL: @icmp_sgt_slt_v4i32(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = icmp slt <4 x i32> [[TMP2]], [[A:%.*]]
-; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <4 x i32> [[A:%.*]], [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[R]]
;
%a0 = extractelement <4 x i32> %a, i32 0
@@ -162,9 +162,9 @@ define <4 x i32> @icmp_sgt_slt_v4i32(<4 x i32> %a, ptr %b) {
define <4 x i32> @icmp_uge_ule_v4i32(<4 x i32> %a, ptr %b) {
; CHECK-LABEL: @icmp_uge_ule_v4i32(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = icmp ule <4 x i32> [[TMP2]], [[A:%.*]]
-; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = icmp uge <4 x i32> [[A:%.*]], [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[R]]
;
%a0 = extractelement <4 x i32> %a, i32 0
@@ -192,9 +192,9 @@ define <4 x i32> @icmp_uge_ule_v4i32(<4 x i32> %a, ptr %b) {
define <4 x i32> @fcmp_ogt_olt_v4i32(<4 x float> %a, ptr %b) {
; CHECK-LABEL: @fcmp_ogt_olt_v4i32(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = fcmp olt <4 x float> [[TMP2]], [[A:%.*]]
-; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <4 x float> [[A:%.*]], [[TMP1]]
+; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[R]]
;
%a0 = extractelement <4 x float> %a, i32 0
@@ -222,11 +222,11 @@ define <4 x i32> @fcmp_ogt_olt_v4i32(<4 x float> %a, ptr %b) {
define <4 x i32> @fcmp_ord_uno_v4i32(<4 x float> %a, ptr %b) {
; CHECK-LABEL: @fcmp_ord_uno_v4i32(
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = fcmp ord <4 x float> [[TMP2]], [[A:%.*]]
-; CHECK-NEXT: [[TMP4:%.*]] = fcmp uno <4 x float> [[TMP2]], [[A]]
-; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i1> [[TMP3]], <4 x i1> [[TMP4]], <4 x i32> <i32 0, i32 5, i32 6, i32 3>
-; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP5]] to <4 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = fcmp ord <4 x float> [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = fcmp uno <4 x float> [[TMP1]], [[A]]
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i1> [[TMP2]], <4 x i1> [[TMP3]], <4 x i32> <i32 0, i32 5, i32 6, i32 3>
+; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP4]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[R]]
;
%a0 = extractelement <4 x float> %a, i32 0
More information about the llvm-commits
mailing list