[llvm] [InstCombine] Remove some of the complexity-based canonicalization (PR #91185)

via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 20 07:29:26 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-amdgpu

Author: Nikita Popov (nikic)

<details>
<summary>Changes</summary>

The idea behind is that the canonicalization allows us to handle less patterns, because we know that some will be canonicalized away. This is indeed very useful to e.g. know that constants are always on the right.

However, this is only useful if the canonicalization is actually reliable. This is the case for constants, but not for arguments: Moving these to the right makes it look like the "more complex" expression is guaranteed to be on the left, but this is not actually the case in practice. It fails as soon as you replace the argument with another instruction.

The end result is that it looks like things correctly work in tests, while they actually don't. We use the "thwart complexity-based canonicalization" trick to handle this in tests, but it's often a challenge for new contributors to get this right, and based on the regressions this PR originally exposed, we clearly don't get this right in many cases.

For this reason, I think that it's better to remove this complexity canonicalization. It will make it much easier to write tests for commuted cases and make sure that they are handled.

---

Patch is 1.03 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/91185.diff


268 Files Affected:

- (modified) llvm/include/llvm/Transforms/InstCombine/InstCombiner.h (+11-14) 
- (modified) llvm/test/Analysis/ValueTracking/known-power-of-two-urem.ll (+11-11) 
- (modified) llvm/test/Analysis/ValueTracking/known-power-of-two.ll (+30-30) 
- (modified) llvm/test/Analysis/ValueTracking/knownbits-and-or-xor-lowbit.ll (+8-8) 
- (modified) llvm/test/Analysis/ValueTracking/knownbits-bmi-pattern.ll (+8-8) 
- (modified) llvm/test/Analysis/ValueTracking/phi-known-bits.ll (+1-1) 
- (modified) llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pow-codegen.ll (+4-4) 
- (modified) llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pow.ll (+23-23) 
- (modified) llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll (+9-9) 
- (modified) llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-powr.ll (+4-4) 
- (modified) llvm/test/Transforms/IndVarSimplify/rewrite-loop-exit-value.ll (+3-3) 
- (modified) llvm/test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll (+13-13) 
- (modified) llvm/test/Transforms/InstCombine/2010-11-23-Distributed.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/abs-1.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/add-mask-neg.ll (+3-3) 
- (modified) llvm/test/Transforms/InstCombine/add.ll (+22-22) 
- (modified) llvm/test/Transforms/InstCombine/add2.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/add_or_sub.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/and-or-icmp-const-icmp.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/and-or-icmps.ll (+55-55) 
- (modified) llvm/test/Transforms/InstCombine/and-or-not.ll (+5-5) 
- (modified) llvm/test/Transforms/InstCombine/and-or.ll (+13-13) 
- (modified) llvm/test/Transforms/InstCombine/and-xor-merge.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/and-xor-or.ll (+111-111) 
- (modified) llvm/test/Transforms/InstCombine/and.ll (+27-27) 
- (modified) llvm/test/Transforms/InstCombine/apint-and-xor-merge.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/apint-or.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/apint-shift.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/apint-sub.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/ashr-lshr.ll (+12-12) 
- (modified) llvm/test/Transforms/InstCombine/assume-align.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/assume-separate_storage.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/avg-lsb.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/binop-and-shifts.ll (+19-19) 
- (modified) llvm/test/Transforms/InstCombine/binop-cast.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/bit-checks.ll (+31-27) 
- (modified) llvm/test/Transforms/InstCombine/bitcast-inseltpoison.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/bitcast.ll (+6-6) 
- (modified) llvm/test/Transforms/InstCombine/bitreverse.ll (+3-3) 
- (modified) llvm/test/Transforms/InstCombine/bswap-fold.ll (+5-5) 
- (modified) llvm/test/Transforms/InstCombine/call-guard.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-eq-to-icmp-ule.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-uge-to-icmp-ule.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ult-to-icmp-ugt.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-eq-to-icmp-ule.ll (+6-6) 
- (modified) llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll (+6-6) 
- (modified) llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-eq-to-icmp-ule.ll (+12-12) 
- (modified) llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-ne-to-icmp-ugt.ll (+12-12) 
- (modified) llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-eq-to-icmp-ule.ll (+5-5) 
- (modified) llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-ne-to-icmp-ugt.ll (+5-5) 
- (modified) llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll (+6-6) 
- (modified) llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll (+6-6) 
- (modified) llvm/test/Transforms/InstCombine/cast-mul-select.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/cast.ll (+3-3) 
- (modified) llvm/test/Transforms/InstCombine/cast_phi.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/cast_ptr.ll (+5-5) 
- (modified) llvm/test/Transforms/InstCombine/cmp-x-vs-neg-x.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/conditional-negation.ll (+9-9) 
- (modified) llvm/test/Transforms/InstCombine/ctpop-cttz.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/ctpop-pow2.ll (+3-3) 
- (modified) llvm/test/Transforms/InstCombine/cttz.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/demorgan.ll (+10-10) 
- (modified) llvm/test/Transforms/InstCombine/dependent-ivs.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/fadd-fsub-factor.ll (+19-19) 
- (modified) llvm/test/Transforms/InstCombine/fadd.ll (+5-5) 
- (modified) llvm/test/Transforms/InstCombine/fast-basictest.ll (+20-15) 
- (modified) llvm/test/Transforms/InstCombine/fast-math.ll (+3-3) 
- (modified) llvm/test/Transforms/InstCombine/fcmp.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/fdiv-sqrt.ll (+3-3) 
- (modified) llvm/test/Transforms/InstCombine/fdiv.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/float-shrink-compare.ll (+14-14) 
- (modified) llvm/test/Transforms/InstCombine/fmul.ll (+10-10) 
- (modified) llvm/test/Transforms/InstCombine/fold-ext-eq-c-with-op.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/fold-inc-of-add-of-not-x-and-y-to-sub-x-from-y.ll (+3-3) 
- (modified) llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll (+16-16) 
- (modified) llvm/test/Transforms/InstCombine/fold-signbit-test-power2.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/fpextend.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/fptrunc.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/free-inversion.ll (+5-5) 
- (modified) llvm/test/Transforms/InstCombine/fsh.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/fsub.ll (+23-23) 
- (modified) llvm/test/Transforms/InstCombine/funnel.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/getelementptr.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/hoist-negation-out-of-bias-calculation.ll (+8-8) 
- (modified) llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/icmp-add.ll (+33-33) 
- (modified) llvm/test/Transforms/InstCombine/icmp-and-add-sub-xor-p2.ll (+12-12) 
- (modified) llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll (+22-22) 
- (modified) llvm/test/Transforms/InstCombine/icmp-and-shift.ll (+5-5) 
- (modified) llvm/test/Transforms/InstCombine/icmp-custom-dl.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/icmp-equality-rotate.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/icmp-equality-xor.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/icmp-ext-ext.ll (+13-13) 
- (modified) llvm/test/Transforms/InstCombine/icmp-gep.ll (+6-6) 
- (modified) llvm/test/Transforms/InstCombine/icmp-mul-zext.ll (+5-5) 
- (modified) llvm/test/Transforms/InstCombine/icmp-mul.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/icmp-of-and-x.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/icmp-of-or-x.ll (+5-5) 
- (modified) llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll (+23-23) 
- (modified) llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll (+21-21) 
- (modified) llvm/test/Transforms/InstCombine/icmp-or-of-select-with-zero.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/icmp-or.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/icmp-range.ll (+33-33) 
- (modified) llvm/test/Transforms/InstCombine/icmp-rotate.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/icmp-select-implies-common-op.ll (+20-20) 
- (modified) llvm/test/Transforms/InstCombine/icmp-select.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/icmp-sub.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/icmp-uge-of-not-of-shl-allones-by-bits-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll (+5-5) 
- (modified) llvm/test/Transforms/InstCombine/icmp-ult-of-not-of-shl-allones-by-bits-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll (+5-5) 
- (modified) llvm/test/Transforms/InstCombine/icmp.ll (+28-28) 
- (modified) llvm/test/Transforms/InstCombine/implies.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-scalar.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-vector.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/ispow2.ll (+7-7) 
- (modified) llvm/test/Transforms/InstCombine/known-bits.ll (+3-3) 
- (modified) llvm/test/Transforms/InstCombine/known-never-nan.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/ldexp-ext.ll (+8-8) 
- (modified) llvm/test/Transforms/InstCombine/log-pow.ll (+3-3) 
- (modified) llvm/test/Transforms/InstCombine/logical-select-inseltpoison.ll (+11-11) 
- (modified) llvm/test/Transforms/InstCombine/logical-select.ll (+25-25) 
- (modified) llvm/test/Transforms/InstCombine/lshr-and-negC-icmpeq-zero.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/lshr.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/masked-merge-add.ll (+8-8) 
- (modified) llvm/test/Transforms/InstCombine/masked-merge-and-of-ors.ll (+21-21) 
- (modified) llvm/test/Transforms/InstCombine/masked-merge-or.ll (+8-8) 
- (modified) llvm/test/Transforms/InstCombine/masked-merge-xor.ll (+20-20) 
- (modified) llvm/test/Transforms/InstCombine/minmax-fold.ll (+5-5) 
- (modified) llvm/test/Transforms/InstCombine/minmax-of-xor-x.ll (+10-10) 
- (modified) llvm/test/Transforms/InstCombine/mul-masked-bits.ll (+3-3) 
- (modified) llvm/test/Transforms/InstCombine/mul-pow2.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/mul.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/mul_fold.ll (+6-6) 
- (modified) llvm/test/Transforms/InstCombine/mul_full_64.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/not-add.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/not.ll (+24-24) 
- (modified) llvm/test/Transforms/InstCombine/onehot_merge.ll (+24-24) 
- (modified) llvm/test/Transforms/InstCombine/or-xor-xor.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/or-xor.ll (+19-19) 
- (modified) llvm/test/Transforms/InstCombine/or.ll (+10-10) 
- (modified) llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll (+3-3) 
- (modified) llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/phi.ll (+5-5) 
- (modified) llvm/test/Transforms/InstCombine/pr44242.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/pr49688.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/pr75369.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/ptr-int-ptr-icmp.ll (+7-7) 
- (modified) llvm/test/Transforms/InstCombine/ptrmask.ll (+11-11) 
- (modified) llvm/test/Transforms/InstCombine/range-check.ll (+22-22) 
- (modified) llvm/test/Transforms/InstCombine/reassociate-nuw.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll (+5-5) 
- (modified) llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll (+10-10) 
- (modified) llvm/test/Transforms/InstCombine/rem.ll (+11-11) 
- (modified) llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll (+18-18) 
- (modified) llvm/test/Transforms/InstCombine/result-of-add-of-negative-or-zero-is-non-zero-and-no-underflow.ll (+16-16) 
- (modified) llvm/test/Transforms/InstCombine/result-of-usub-is-non-zero-and-no-overflow.ll (+28-28) 
- (modified) llvm/test/Transforms/InstCombine/saturating-add-sub.ll (+13-13) 
- (modified) llvm/test/Transforms/InstCombine/scalarization-inseltpoison.ll (+6-6) 
- (modified) llvm/test/Transforms/InstCombine/scalarization.ll (+6-6) 
- (modified) llvm/test/Transforms/InstCombine/select-and-or.ll (+13-13) 
- (modified) llvm/test/Transforms/InstCombine/select-binop-cmp.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll (+12-12) 
- (modified) llvm/test/Transforms/InstCombine/select-cmp-eq-op-fold.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/select-cmp.ll (+29-29) 
- (modified) llvm/test/Transforms/InstCombine/select-ctlz-to-cttz.ll (+6-6) 
- (modified) llvm/test/Transforms/InstCombine/select-divrem.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/select-factorize.ll (+12-12) 
- (modified) llvm/test/Transforms/InstCombine/select-masked_gather.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/select-masked_load.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/select-of-bittest.ll (+9-9) 
- (modified) llvm/test/Transforms/InstCombine/select-safe-transforms.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll (+43-43) 
- (modified) llvm/test/Transforms/InstCombine/select.ll (+19-19) 
- (modified) llvm/test/Transforms/InstCombine/select_meta.ll (+5-5) 
- (modified) llvm/test/Transforms/InstCombine/set.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/shift-add.ll (+6-6) 
- (modified) llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-lshr.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-shl.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/shift-direction-in-bit-test.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/shift-logic.ll (+3-3) 
- (modified) llvm/test/Transforms/InstCombine/shift.ll (+5-5) 
- (modified) llvm/test/Transforms/InstCombine/shl-bo.ll (+16-16) 
- (modified) llvm/test/Transforms/InstCombine/shuffle-binop.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/signed-truncation-check.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/simplify-demanded-fpclass.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/sink-not-into-and.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/sink-not-into-or.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/smax-icmp.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/smin-icmp.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/sub-gep.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/sub-lshr-or-to-icmp-select.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/sub-minmax.ll (+5-5) 
- (modified) llvm/test/Transforms/InstCombine/sub-not.ll (+8-8) 
- (modified) llvm/test/Transforms/InstCombine/sub-of-negatible-inseltpoison.ll (+8-8) 
- (modified) llvm/test/Transforms/InstCombine/sub-of-negatible.ll (+9-9) 
- (modified) llvm/test/Transforms/InstCombine/sub-xor-cmp.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/sub.ll (+13-13) 
- (modified) llvm/test/Transforms/InstCombine/trunc-binop-ext.ll (+20-20) 
- (modified) llvm/test/Transforms/InstCombine/uaddo.ll (+10-10) 
- (modified) llvm/test/Transforms/InstCombine/umax-icmp.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/umin-icmp.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/unordered-compare-and-ordered.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-add.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-xor.ll (+11-11) 
- (modified) llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check.ll (+6-6) 
- (modified) llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-add.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-xor.ll (+11-11) 
- (modified) llvm/test/Transforms/InstCombine/unsigned-add-overflow-check.ll (+6-6) 
- (modified) llvm/test/Transforms/InstCombine/unsigned-sub-lack-of-overflow-check.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/unsigned-sub-overflow-check.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/vec_demanded_elts.ll (+3-3) 
- (modified) llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll (+7-7) 
- (modified) llvm/test/Transforms/InstCombine/vec_shuffle.ll (+7-7) 
- (modified) llvm/test/Transforms/InstCombine/vector-reverse.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/vector-xor.ll (+4-4) 
- (modified) llvm/test/Transforms/InstCombine/widenable-conditions.ll (+8-8) 
- (modified) llvm/test/Transforms/InstCombine/xor.ll (+17-17) 
- (modified) llvm/test/Transforms/InstCombine/xor2.ll (+16-16) 
- (modified) llvm/test/Transforms/InstCombine/zext-bool-add-sub.ll (+8-8) 
- (modified) llvm/test/Transforms/InstCombine/zext-or-icmp.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/zext.ll (+4-4) 
- (modified) llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll (+1-1) 
- (modified) llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll (+9-9) 
- (modified) llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll (+15-15) 
- (modified) llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll (+1-1) 
- (modified) llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll (+1-1) 
- (modified) llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll (+68-68) 
- (modified) llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll (+13-13) 
- (modified) llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse-mask4.ll (+5-5) 
- (modified) llvm/test/Transforms/LoopVectorize/ARM/mve-qabs.ll (+37-37) 
- (modified) llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll (+7-7) 
- (modified) llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll (+32-32) 
- (modified) llvm/test/Transforms/LoopVectorize/ARM/pointer_iv.ll (+132-132) 
- (modified) llvm/test/Transforms/LoopVectorize/ARM/tail-fold-multiple-icmps.ll (+18-18) 
- (modified) llvm/test/Transforms/LoopVectorize/X86/invariant-load-gather.ll (+2-2) 
- (modified) llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll (+10-10) 
- (modified) llvm/test/Transforms/LoopVectorize/X86/pr23997.ll (+1-1) 
- (modified) llvm/test/Transforms/LoopVectorize/extract-last-veclane.ll (+2-2) 
- (modified) llvm/test/Transforms/LoopVectorize/float-induction.ll (+30-30) 
- (modified) llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll (+3-3) 
- (modified) llvm/test/Transforms/LoopVectorize/induction.ll (+57-57) 
- (modified) llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll (+1-1) 
- (modified) llvm/test/Transforms/LoopVectorize/invariant-store-vectorization-2.ll (+7-7) 
- (modified) llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll (+14-14) 
- (modified) llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll (+9-9) 
- (modified) llvm/test/Transforms/LoopVectorize/reduction-inloop.ll (+13-13) 
- (modified) llvm/test/Transforms/LoopVectorize/reduction.ll (+11-11) 
- (modified) llvm/test/Transforms/LoopVectorize/runtime-check.ll (+4-4) 
- (modified) llvm/test/Transforms/LoopVectorize/scalable-inductions.ll (+4-4) 
- (modified) llvm/test/Transforms/LoopVectorize/uniform-args-call-variants.ll (+2-2) 
- (modified) llvm/test/Transforms/PGOProfile/chr.ll (+7-7) 
- (modified) llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll (+4-4) 
- (modified) llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll (+7-7) 
- (modified) llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll (+8-8) 
- (modified) llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll (+1-1) 
- (modified) llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll (+4-4) 
- (modified) llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll (+1-1) 
- (modified) llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll (+2-2) 
- (modified) llvm/test/Transforms/PhaseOrdering/X86/speculation-vs-tbaa.ll (+1-1) 
- (modified) llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-logical.ll (+2-2) 


``````````diff
diff --git a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
index ebcbd5d9e88800..ed2e7f58ca853c 100644
--- a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
+++ b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
@@ -132,21 +132,18 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner {
   /// This routine maps IR values to various complexity ranks:
   ///   0 -> undef
   ///   1 -> Constants
-  ///   2 -> Other non-instructions
-  ///   3 -> Arguments
-  ///   4 -> Cast and (f)neg/not instructions
-  ///   5 -> Other instructions
+  ///   2 -> Cast and (f)neg/not instructions
+  ///   3 -> Other instructions and arguments
   static unsigned getComplexity(Value *V) {
-    if (isa<Instruction>(V)) {
-      if (isa<CastInst>(V) || match(V, m_Neg(PatternMatch::m_Value())) ||
-          match(V, m_Not(PatternMatch::m_Value())) ||
-          match(V, m_FNeg(PatternMatch::m_Value())))
-        return 4;
-      return 5;
-    }
-    if (isa<Argument>(V))
-      return 3;
-    return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2;
+    if (isa<Constant>(V))
+      return isa<UndefValue>(V) ? 0 : 1;
+
+    if (isa<CastInst>(V) || match(V, m_Neg(PatternMatch::m_Value())) ||
+        match(V, m_Not(PatternMatch::m_Value())) ||
+        match(V, m_FNeg(PatternMatch::m_Value())))
+      return 2;
+
+    return 3;
   }
 
   /// Predicate canonicalization reduces the number of patterns that need to be
diff --git a/llvm/test/Analysis/ValueTracking/known-power-of-two-urem.ll b/llvm/test/Analysis/ValueTracking/known-power-of-two-urem.ll
index ba3a484441e9e3..55c3e7779478ef 100644
--- a/llvm/test/Analysis/ValueTracking/known-power-of-two-urem.ll
+++ b/llvm/test/Analysis/ValueTracking/known-power-of-two-urem.ll
@@ -19,7 +19,7 @@ define i64 @known_power_of_two_urem_phi(i64 %size, i1 %cmp, i1 %cmp1) {
 ; CHECK-NEXT:    br label [[COND_END]]
 ; CHECK:       cond.end:
 ; CHECK-NEXT:    [[PHI1:%.*]] = phi i64 [ 4095, [[ENTRY:%.*]] ], [ [[PHI]], [[COND_TRUE_END]] ]
-; CHECK-NEXT:    [[UREM:%.*]] = and i64 [[PHI1]], [[SIZE:%.*]]
+; CHECK-NEXT:    [[UREM:%.*]] = and i64 [[SIZE:%.*]], [[PHI1]]
 ; CHECK-NEXT:    ret i64 [[UREM]]
 ;
 entry:
@@ -57,7 +57,7 @@ define i64 @known_power_of_two_urem_nested_expr(i64 %size, i1 %cmp, i1 %cmp1, i6
 ; CHECK:       cond.end:
 ; CHECK-NEXT:    [[PHI:%.*]] = phi i64 [ [[SELECT]], [[COND_FALSE]] ], [ [[TMP1]], [[COND_TRUE]] ], [ [[PHI]], [[COND_END]] ]
 ; CHECK-NEXT:    [[TMP2:%.*]] = add i64 [[PHI]], -1
-; CHECK-NEXT:    [[UREM:%.*]] = and i64 [[TMP2]], [[SIZE:%.*]]
+; CHECK-NEXT:    [[UREM:%.*]] = and i64 [[SIZE:%.*]], [[TMP2]]
 ; CHECK-NEXT:    [[CMP2:%.*]] = icmp ult i64 [[UREM]], 10
 ; CHECK-NEXT:    br i1 [[CMP2]], label [[COND_END]], label [[END:%.*]]
 ; CHECK:       end:
@@ -119,7 +119,7 @@ define i64 @known_power_of_two_urem_loop_mul(i64 %size, i64 %a) {
 ; CHECK-NEXT:    [[PHI:%.*]] = phi i64 [ [[START]], [[ENTRY:%.*]] ], [ [[I:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[SUM:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[PHI]], -1
-; CHECK-NEXT:    [[UREM:%.*]] = and i64 [[TMP0]], [[SIZE:%.*]]
+; CHECK-NEXT:    [[UREM:%.*]] = and i64 [[SIZE:%.*]], [[TMP0]]
 ; CHECK-NEXT:    [[ADD]] = add nuw i64 [[SUM]], [[UREM]]
 ; CHECK-NEXT:    [[I]] = shl nuw i64 [[PHI]], 2
 ; CHECK-NEXT:    [[ICMP:%.*]] = icmp ult i64 [[PHI]], 25000000
@@ -190,7 +190,7 @@ define i64 @known_power_of_two_urem_loop_shl(i64 %size, i64 %a) {
 ; CHECK-NEXT:    [[PHI:%.*]] = phi i64 [ [[START]], [[ENTRY:%.*]] ], [ [[I:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[SUM:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[PHI]], -1
-; CHECK-NEXT:    [[UREM:%.*]] = and i64 [[TMP0]], [[SIZE:%.*]]
+; CHECK-NEXT:    [[UREM:%.*]] = and i64 [[SIZE:%.*]], [[TMP0]]
 ; CHECK-NEXT:    [[ADD]] = add nuw i64 [[SUM]], [[UREM]]
 ; CHECK-NEXT:    [[I]] = shl nuw i64 [[PHI]], 1
 ; CHECK-NEXT:    [[ICMP:%.*]] = icmp ult i64 [[PHI]], 50000000
@@ -225,7 +225,7 @@ define i64 @known_power_of_two_urem_loop_lshr(i64 %size, i64 %a) {
 ; CHECK-NEXT:    [[PHI:%.*]] = phi i64 [ [[START]], [[ENTRY:%.*]] ], [ [[I:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[SUM:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[PHI]], -1
-; CHECK-NEXT:    [[UREM:%.*]] = and i64 [[TMP0]], [[SIZE:%.*]]
+; CHECK-NEXT:    [[UREM:%.*]] = and i64 [[SIZE:%.*]], [[TMP0]]
 ; CHECK-NEXT:    [[ADD]] = add nuw i64 [[SUM]], [[UREM]]
 ; CHECK-NEXT:    [[I]] = lshr i64 [[PHI]], 1
 ; CHECK-NEXT:    [[ICMP_NOT:%.*]] = icmp ult i64 [[PHI]], 2
@@ -260,7 +260,7 @@ define i64 @known_power_of_two_urem_loop_ashr(i64 %size, i64 %a) {
 ; CHECK-NEXT:    [[PHI:%.*]] = phi i64 [ 4096, [[ENTRY:%.*]] ], [ [[I:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[SUM:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP0:%.*]] = add nsw i64 [[PHI]], -1
-; CHECK-NEXT:    [[UREM:%.*]] = and i64 [[TMP0]], [[SIZE:%.*]]
+; CHECK-NEXT:    [[UREM:%.*]] = and i64 [[SIZE:%.*]], [[TMP0]]
 ; CHECK-NEXT:    [[ADD]] = add nsw i64 [[SUM]], [[UREM]]
 ; CHECK-NEXT:    [[I]] = lshr i64 [[PHI]], [[A:%.*]]
 ; CHECK-NEXT:    [[ICMP_NOT:%.*]] = icmp eq i64 [[I]], 0
@@ -396,7 +396,7 @@ define i8 @known_power_of_two_rust_next_power_of_two(i8 %x, i8 %y) {
 ; CHECK-NEXT:    [[TMP3:%.*]] = lshr i8 -1, [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ugt i8 [[X]], 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = select i1 [[TMP4]], i8 [[TMP3]], i8 0
-; CHECK-NEXT:    [[R:%.*]] = and i8 [[TMP5]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = and i8 [[Y:%.*]], [[TMP5]]
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %2 = add i8 %x, -1
@@ -414,7 +414,7 @@ define i8 @known_power_of_two_rust_next_power_of_two(i8 %x, i8 %y) {
 define i8 @known_power_of_two_lshr_add_one_allow_zero(i8 %x, i8 %y) {
 ; CHECK-LABEL: @known_power_of_two_lshr_add_one_allow_zero(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i8 -1, [[X:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = and i8 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = and i8 [[Y:%.*]], [[TMP1]]
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %4 = lshr i8 -1, %x
@@ -429,7 +429,7 @@ define i1 @known_power_of_two_lshr_add_one_nuw_deny_zero(i8 %x, i8 %y) {
 ; CHECK-LABEL: @known_power_of_two_lshr_add_one_nuw_deny_zero(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i8 -1, [[X:%.*]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = sub i8 -2, [[TMP1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = or i8 [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT:    [[TMP3:%.*]] = or i8 [[Y:%.*]], [[TMP2]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp ne i8 [[TMP3]], -1
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -446,7 +446,7 @@ define i1 @negative_known_power_of_two_lshr_add_one_deny_zero(i8 %x, i8 %y) {
 ; CHECK-LABEL: @negative_known_power_of_two_lshr_add_one_deny_zero(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i8 -1, [[X:%.*]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = sub i8 -2, [[TMP1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = or i8 [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT:    [[TMP3:%.*]] = or i8 [[Y:%.*]], [[TMP2]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp ne i8 [[TMP3]], -1
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -463,7 +463,7 @@ define i1 @negative_known_power_of_two_lshr_add_one_nsw_deny_zero(i8 %x, i8 %y)
 ; CHECK-LABEL: @negative_known_power_of_two_lshr_add_one_nsw_deny_zero(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i8 -1, [[X:%.*]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = sub i8 -2, [[TMP1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = or i8 [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT:    [[TMP3:%.*]] = or i8 [[Y:%.*]], [[TMP2]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp ne i8 [[TMP3]], -1
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
diff --git a/llvm/test/Analysis/ValueTracking/known-power-of-two.ll b/llvm/test/Analysis/ValueTracking/known-power-of-two.ll
index 7bcf96065a69d9..7cfb6af0d7b95d 100644
--- a/llvm/test/Analysis/ValueTracking/known-power-of-two.ll
+++ b/llvm/test/Analysis/ValueTracking/known-power-of-two.ll
@@ -16,8 +16,8 @@ declare i16 @llvm.umax.i16(i16, i16)
 define i32 @pr25900(i32 %d) {
 ; CHECK-LABEL: define i32 @pr25900
 ; CHECK-SAME: (i32 [[D:%.*]]) {
-; CHECK-NEXT:    [[AND:%.*]] = ashr i32 [[D]], 31
-; CHECK-NEXT:    [[DIV:%.*]] = sdiv i32 4, [[AND]]
+; CHECK-NEXT:    [[ASHR:%.*]] = ashr i32 [[D]], 31
+; CHECK-NEXT:    [[DIV:%.*]] = sdiv i32 4, [[ASHR]]
 ; CHECK-NEXT:    ret i32 [[DIV]]
 ;
   %and = and i32 %d, -2147483648
@@ -37,7 +37,7 @@ define i8 @trunc_is_pow2_or_zero(i16 %x, i8 %y) {
 ; CHECK-NEXT:    [[XP2:%.*]] = shl i16 4, [[X]]
 ; CHECK-NEXT:    [[XX:%.*]] = trunc i16 [[XP2]] to i8
 ; CHECK-NEXT:    [[TMP1:%.*]] = add i8 [[XX]], -1
-; CHECK-NEXT:    [[R:%.*]] = and i8 [[TMP1]], [[Y]]
+; CHECK-NEXT:    [[R:%.*]] = and i8 [[Y]], [[TMP1]]
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %xp2 = shl i16 4, %x
@@ -67,7 +67,7 @@ define i1 @trunc_is_pow2_fail(i16 %x, i8 %y) {
 ; CHECK-SAME: (i16 [[X:%.*]], i8 [[Y:%.*]]) {
 ; CHECK-NEXT:    [[XP2:%.*]] = shl i16 4, [[X]]
 ; CHECK-NEXT:    [[XX:%.*]] = trunc i16 [[XP2]] to i8
-; CHECK-NEXT:    [[AND:%.*]] = and i8 [[XX]], [[Y]]
+; CHECK-NEXT:    [[AND:%.*]] = and i8 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[AND]], [[XX]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -85,7 +85,7 @@ define i16 @bswap_is_pow2_or_zero(i16 %x, i16 %y) {
 ; CHECK-NEXT:    [[XP2:%.*]] = shl i16 4, [[X]]
 ; CHECK-NEXT:    [[XX:%.*]] = call i16 @llvm.bswap.i16(i16 [[XP2]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = add i16 [[XX]], -1
-; CHECK-NEXT:    [[R:%.*]] = and i16 [[TMP1]], [[Y]]
+; CHECK-NEXT:    [[R:%.*]] = and i16 [[Y]], [[TMP1]]
 ; CHECK-NEXT:    ret i16 [[R]]
 ;
   %xp2 = shl i16 4, %x
@@ -115,7 +115,7 @@ define i1 @bswap_is_pow2(i16 %x, i16 %y) {
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) {
 ; CHECK-NEXT:    [[XP2:%.*]] = shl nuw i16 1, [[X]]
 ; CHECK-NEXT:    [[XX:%.*]] = call i16 @llvm.bswap.i16(i16 [[XP2]])
-; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[AND]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -132,7 +132,7 @@ define i1 @bswap_is_pow2_fail(i16 %x, i16 %y) {
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) {
 ; CHECK-NEXT:    [[XP2:%.*]] = shl i16 2, [[X]]
 ; CHECK-NEXT:    [[XX:%.*]] = call i16 @llvm.bswap.i16(i16 [[XP2]])
-; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], [[XX]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -150,7 +150,7 @@ define i16 @bitreverse_is_pow2_or_zero(i16 %x, i16 %y) {
 ; CHECK-NEXT:    [[XP2:%.*]] = shl i16 4, [[X]]
 ; CHECK-NEXT:    [[XX:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[XP2]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = add nsw i16 [[XX]], -1
-; CHECK-NEXT:    [[R:%.*]] = and i16 [[TMP1]], [[Y]]
+; CHECK-NEXT:    [[R:%.*]] = and i16 [[Y]], [[TMP1]]
 ; CHECK-NEXT:    ret i16 [[R]]
 ;
   %xp2 = shl i16 4, %x
@@ -180,7 +180,7 @@ define i1 @bitreverse_is_pow2(i16 %x, i16 %y) {
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) {
 ; CHECK-NEXT:    [[XP2:%.*]] = shl nuw i16 1, [[X]]
 ; CHECK-NEXT:    [[XX:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[XP2]])
-; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[AND]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -197,7 +197,7 @@ define i1 @bitreverse_is_pow2_fail(i16 %x, i16 %y) {
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) {
 ; CHECK-NEXT:    [[XP2:%.*]] = shl i16 2, [[X]]
 ; CHECK-NEXT:    [[XX:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[XP2]])
-; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], [[XX]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -215,7 +215,7 @@ define i16 @fshl_is_pow2_or_zero(i16 %x, i16 %y, i16 %z) {
 ; CHECK-NEXT:    [[XP2:%.*]] = shl i16 4, [[X]]
 ; CHECK-NEXT:    [[XX:%.*]] = call i16 @llvm.fshl.i16(i16 [[XP2]], i16 [[XP2]], i16 [[Z]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = add i16 [[XX]], -1
-; CHECK-NEXT:    [[R:%.*]] = and i16 [[TMP1]], [[Y]]
+; CHECK-NEXT:    [[R:%.*]] = and i16 [[Y]], [[TMP1]]
 ; CHECK-NEXT:    ret i16 [[R]]
 ;
   %xp2 = shl i16 4, %x
@@ -262,7 +262,7 @@ define i1 @fshl_is_pow2(i16 %x, i16 %y, i16 %z) {
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]], i16 [[Z:%.*]]) {
 ; CHECK-NEXT:    [[XP2:%.*]] = shl nuw i16 1, [[X]]
 ; CHECK-NEXT:    [[XX:%.*]] = call i16 @llvm.fshl.i16(i16 [[XP2]], i16 [[XP2]], i16 [[Z]])
-; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[AND]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -279,7 +279,7 @@ define i1 @fshl_is_pow2_fail(i16 %x, i16 %y, i16 %z) {
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]], i16 [[Z:%.*]]) {
 ; CHECK-NEXT:    [[XP2:%.*]] = shl i16 2, [[X]]
 ; CHECK-NEXT:    [[XX:%.*]] = call i16 @llvm.fshl.i16(i16 [[XP2]], i16 [[XP2]], i16 [[Z]])
-; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], [[XX]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -297,7 +297,7 @@ define i16 @fshr_is_pow2_or_zero(i16 %x, i16 %y, i16 %z) {
 ; CHECK-NEXT:    [[XP2:%.*]] = shl i16 4, [[X]]
 ; CHECK-NEXT:    [[XX:%.*]] = call i16 @llvm.fshr.i16(i16 [[XP2]], i16 [[XP2]], i16 [[Z]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = add i16 [[XX]], -1
-; CHECK-NEXT:    [[R:%.*]] = and i16 [[TMP1]], [[Y]]
+; CHECK-NEXT:    [[R:%.*]] = and i16 [[Y]], [[TMP1]]
 ; CHECK-NEXT:    ret i16 [[R]]
 ;
   %xp2 = shl i16 4, %x
@@ -344,7 +344,7 @@ define i1 @fshr_is_pow2(i16 %x, i16 %y, i16 %z) {
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]], i16 [[Z:%.*]]) {
 ; CHECK-NEXT:    [[XP2:%.*]] = shl nuw i16 1, [[X]]
 ; CHECK-NEXT:    [[XX:%.*]] = call i16 @llvm.fshr.i16(i16 [[XP2]], i16 [[XP2]], i16 [[Z]])
-; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[AND]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -361,7 +361,7 @@ define i1 @fshr_is_pow2_fail(i16 %x, i16 %y, i16 %z) {
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]], i16 [[Z:%.*]]) {
 ; CHECK-NEXT:    [[XP2:%.*]] = shl i16 2, [[X]]
 ; CHECK-NEXT:    [[XX:%.*]] = call i16 @llvm.fshr.i16(i16 [[XP2]], i16 [[XP2]], i16 [[Z]])
-; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], [[XX]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -380,7 +380,7 @@ define i16 @mul_is_pow2_or_zero(i16 %x, i16 %y, i16 %z) {
 ; CHECK-NEXT:    [[ZP2:%.*]] = shl i16 2, [[Z]]
 ; CHECK-NEXT:    [[XX:%.*]] = mul i16 [[XP2]], [[ZP2]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = add i16 [[XX]], -1
-; CHECK-NEXT:    [[R:%.*]] = and i16 [[TMP1]], [[Y]]
+; CHECK-NEXT:    [[R:%.*]] = and i16 [[Y]], [[TMP1]]
 ; CHECK-NEXT:    ret i16 [[R]]
 ;
   %xp2 = shl i16 4, %x
@@ -416,7 +416,7 @@ define i1 @mul_is_pow2(i16 %x, i16 %y, i16 %z) {
 ; CHECK-NEXT:    [[ZP2:%.*]] = shl nuw nsw i16 2, [[ZSMALL]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = add nuw nsw i16 [[XSMALL]], 2
 ; CHECK-NEXT:    [[XX:%.*]] = shl nuw nsw i16 [[ZP2]], [[TMP1]]
-; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[AND]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -439,7 +439,7 @@ define i1 @mul_is_pow2_fail(i16 %x, i16 %y, i16 %z) {
 ; CHECK-NEXT:    [[ZP2:%.*]] = shl nuw nsw i16 2, [[ZSMALL]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = add nuw nsw i16 [[XSMALL]], 2
 ; CHECK-NEXT:    [[XX:%.*]] = shl i16 [[ZP2]], [[TMP1]]
-; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], [[XX]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -462,7 +462,7 @@ define i1 @mul_is_pow2_fail2(i16 %x, i16 %y, i16 %z) {
 ; CHECK-NEXT:    [[XP2:%.*]] = shl nuw nsw i16 3, [[XSMALL]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = add nuw nsw i16 [[ZSMALL]], 1
 ; CHECK-NEXT:    [[XX:%.*]] = shl nuw nsw i16 [[XP2]], [[TMP1]]
-; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], [[XX]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -482,7 +482,7 @@ define i1 @shl_is_pow2(i16 %x, i16 %y) {
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) {
 ; CHECK-NEXT:    [[XSMALL:%.*]] = and i16 [[X]], 7
 ; CHECK-NEXT:    [[XX:%.*]] = shl nuw nsw i16 4, [[XSMALL]]
-; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[AND]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -499,7 +499,7 @@ define i1 @shl_is_pow2_fail(i16 %x, i16 %y) {
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) {
 ; CHECK-NEXT:    [[XSMALL:%.*]] = and i16 [[X]], 7
 ; CHECK-NEXT:    [[XX:%.*]] = shl i16 512, [[XSMALL]]
-; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], [[XX]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -516,7 +516,7 @@ define i1 @shl_is_pow2_fail2(i16 %x, i16 %y) {
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) {
 ; CHECK-NEXT:    [[XSMALL:%.*]] = and i16 [[X]], 7
 ; CHECK-NEXT:    [[XX:%.*]] = shl nuw nsw i16 5, [[XSMALL]]
-; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], [[XX]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -533,7 +533,7 @@ define i1 @lshr_is_pow2(i16 %x, i16 %y) {
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) {
 ; CHECK-NEXT:    [[XSMALL:%.*]] = and i16 [[X]], 7
 ; CHECK-NEXT:    [[XX:%.*]] = lshr exact i16 512, [[XSMALL]]
-; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[AND]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -550,7 +550,7 @@ define i1 @lshr_is_pow2_fail(i16 %x, i16 %y) {
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) {
 ; CHECK-NEXT:    [[XSMALL:%.*]] = and i16 [[X]], 7
 ; CHECK-NEXT:    [[XX:%.*]] = lshr i16 4, [[XSMALL]]
-; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], [[XX]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -567,7 +567,7 @@ define i1 @lshr_is_pow2_fail2(i16 %x, i16 %y) {
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) {
 ; CHECK-NEXT:    [[XSMALL:%.*]] = and i16 [[X]], 7
 ; CHECK-NEXT:    [[XX:%.*]] = lshr i16 513, [[XSMALL]]
-; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], [[XX]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -584,7 +584,7 @@ define i1 @and_is_pow2(i16 %x, i16 %y) {
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) {
 ; CHECK-NEXT:    [[XNZ:%.*]] = or i16 [[X]], 4
 ; CHECK-NEXT:    [[X_NEG:%.*]] = sub nsw i16 0, [[XNZ]]
-; CHECK-NEXT:    [[TMP1:%.*]] = and i16 [[X_NEG]], [[Y]]
+; CHECK-NEXT:    [[TMP1:%.*]] = and i16 [[Y]], [[X_NEG]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i16 [[TMP1]], [[XNZ]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[AND]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
@@ -602,8 +602,8 @@ define i1 @and_is_pow2_fail(i16 %x, i16 %y) {
 ; CHECK-LABEL: define i1 @and_is_pow2_fail
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) {
 ; CHECK-NEXT:    [[X_NEG:%.*]] = sub i16 0, [[X]]
-; CHECK-NEXT:    [[XX:%.*]] = and i16 [[X_NEG]], [[X]]
-; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[XX:%.*]] = and i16 [[X]], [[X_NEG]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], [[XX]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -619,7 +619,7 @@ define i16 @i1_is_pow2_or_zero(i1 %x, i16 %y) {
 ; CHECK-LABEL: define i16 @i1_is_pow2_or_zero
 ; CHECK-SAME: (i1 [[X:%.*]], i16 [[Y:%.*]]) {
 ; CHECK-NEXT:    [[XX:%.*]] = zext i1 [[X]] to i16
-; CHECK-NEXT:    [[R:%.*]] = or i16 [[XX]], [[Y]]
+; CHECK-NEXT:    [[R:%.*]] = or i16 [[Y]], [[XX]]
 ; CHECK-NEXT:    ret i16 [[R]]
 ;
   %xx = zext i1 %x to i16
diff --git a/llvm/test/Analysis/ValueTracking/knownbits-and-or-xor-lowbit.ll b/llvm/test/Analysis/ValueTracking/knownbits-and-or-xor-lowbit.ll
index 4ca7ed9eda7bbc..fba907ab731b0b 100644
--- a/llvm/test/Analysis/ValueTracking/knownbits-and-or-xor-lowbit.ll
+++ b/llvm/test/Analysis/ValueTracking...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/91185


More information about the llvm-commits mailing list