[llvm] eecb99c - [Tests] Add disjoint flag to some tests (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 5 05:10:17 PST 2023


Author: Nikita Popov
Date: 2023-12-05T14:09:36+01:00
New Revision: eecb99c5f66c8491766628a2925587e20f3b1dbd

URL: https://github.com/llvm/llvm-project/commit/eecb99c5f66c8491766628a2925587e20f3b1dbd
DIFF: https://github.com/llvm/llvm-project/commit/eecb99c5f66c8491766628a2925587e20f3b1dbd.diff

LOG: [Tests] Add disjoint flag to some tests (NFC)

These tests rely on SCEV looking recognizing an "or" with no common
bits as an "add". Add the disjoint flag to relevant or instructions
in preparation for switching SCEV to use the flag instead of the
ValueTracking query. The IR with disjoint flag matches what
InstCombine would produce.

Added: 
    

Modified: 
    llvm/test/Analysis/CostModel/X86/interleaved-load-half.ll
    llvm/test/Analysis/CostModel/X86/masked-interleaved-load-i16.ll
    llvm/test/Analysis/CostModel/X86/masked-interleaved-store-i16.ll
    llvm/test/Analysis/DependenceAnalysis/ExactRDIV.ll
    llvm/test/Analysis/DependenceAnalysis/ExactSIV.ll
    llvm/test/Analysis/DependenceAnalysis/GCD.ll
    llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll
    llvm/test/Analysis/ScalarEvolution/nsw-offset-assume.ll
    llvm/test/Analysis/ScalarEvolution/nsw-offset.ll
    llvm/test/Analysis/ScalarEvolution/sext-mul.ll
    llvm/test/CodeGen/AArch64/aarch64-sched-store.ll
    llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
    llvm/test/CodeGen/ARM/ParallelDSP/smlad0.ll
    llvm/test/CodeGen/ARM/ParallelDSP/smlad11.ll
    llvm/test/CodeGen/ARM/ParallelDSP/smladx-1.ll
    llvm/test/CodeGen/ARM/ParallelDSP/smlald0.ll
    llvm/test/CodeGen/ARM/ParallelDSP/smlald2.ll
    llvm/test/CodeGen/ARM/ParallelDSP/smlaldx-1.ll
    llvm/test/CodeGen/ARM/ParallelDSP/smlaldx-2.ll
    llvm/test/CodeGen/ARM/ParallelDSP/unroll-n-jam-smlad.ll
    llvm/test/CodeGen/ARM/dsp-loop-indexing.ll
    llvm/test/CodeGen/ARM/fpclamptosat.ll
    llvm/test/CodeGen/ARM/loop-indexing.ll
    llvm/test/CodeGen/ARM/shifter_operand.ll
    llvm/test/CodeGen/Hexagon/autohvx/interleave.ll
    llvm/test/CodeGen/PowerPC/rematerializable-instruction-machine-licm.ll
    llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll
    llvm/test/CodeGen/SystemZ/vec-load-element.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-float-loops.ll
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll
    llvm/test/CodeGen/Thumb2/mve-blockplacement.ll
    llvm/test/CodeGen/Thumb2/mve-tailpred-loopinvariant.ll
    llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll
    llvm/test/CodeGen/WebAssembly/unrolled-mem-indices.ll
    llvm/test/CodeGen/X86/2008-08-06-CmpStride.ll
    llvm/test/CodeGen/X86/MergeConsecutiveStores.ll
    llvm/test/CodeGen/X86/avx512vnni-combine.ll
    llvm/test/CodeGen/X86/avxvnni-combine.ll
    llvm/test/CodeGen/X86/loop-strength-reduce4.ll
    llvm/test/CodeGen/X86/lsr-addrecloops.ll
    llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
    llvm/test/CodeGen/X86/merge_store.ll
    llvm/test/CodeGen/X86/optimize-max-0.ll
    llvm/test/CodeGen/X86/unused_stackslots.ll
    llvm/test/Transforms/IRCE/stride_more_than_1.ll
    llvm/test/Transforms/IndVarSimplify/ashr-tripcount.ll
    llvm/test/Transforms/IndVarSimplify/lcssa-preservation.ll
    llvm/test/Transforms/IndVarSimplify/no-iv-rewrite.ll
    llvm/test/Transforms/IndVarSimplify/pr58702-invalidate-scev-when-replacing-congruent-phis.ll
    llvm/test/Transforms/IndVarSimplify/pr64891.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/complex-index.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll
    llvm/test/Transforms/LoopIdiom/unroll-custom-dl.ll
    llvm/test/Transforms/LoopIdiom/unroll.ll
    llvm/test/Transforms/LoopInterchange/pr57148.ll
    llvm/test/Transforms/LoopReroll/basic32iters.ll
    llvm/test/Transforms/LoopReroll/indvar_with_ext.ll
    llvm/test/Transforms/LoopReroll/reduction.ll
    llvm/test/Transforms/LoopReroll/reroll_with_dbg.ll
    llvm/test/Transforms/LoopStrengthReduce/ARM/complexity.ll
    llvm/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
    llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
    llvm/test/Transforms/LoopStrengthReduce/X86/missing-phi-operand-update.ll
    llvm/test/Transforms/LoopUnroll/X86/high-cost-expansion.ll
    llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
    llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll
    llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
    llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll
    llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-option.ll
    llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
    llvm/test/Transforms/LoopVectorize/X86/interleaving.ll
    llvm/test/Transforms/LoopVectorize/X86/pr47437.ll
    llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll
    llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll
    llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-store-accesses-with-gaps.ll
    llvm/test/Transforms/LoopVectorize/interleaved-accesses-masked-group.ll
    llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
    llvm/test/Transforms/LoopVectorize/pr39099.ll
    llvm/test/Transforms/LoopVectorize/reduction-with-invariant-store.ll
    llvm/test/Transforms/LoopVectorize/unroll_nonlatch.ll
    llvm/test/Transforms/SLPVectorizer/AArch64/loadorder.ll
    llvm/test/Transforms/SLPVectorizer/NVPTX/v2f16.ll
    llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll
    llvm/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
    llvm/test/Transforms/SLPVectorizer/X86/hoist.ll
    llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll
    llvm/test/Transforms/SLPVectorizer/X86/in-tree-user.ll
    llvm/test/Transforms/SLPVectorizer/X86/loopinvariant.ll
    llvm/test/Transforms/SLPVectorizer/X86/reduction.ll
    llvm/test/Transforms/SLPVectorizer/X86/reduction2.ll
    llvm/test/Transforms/SLPVectorizer/X86/remark_horcost.ll
    llvm/test/Transforms/SLPVectorizer/X86/remark_not_all_parts.ll
    llvm/test/Transforms/SLPVectorizer/X86/scheduling.ll
    llvm/test/Transforms/SLPVectorizer/X86/simple-loop.ll
    llvm/test/Transforms/SLPVectorizer/X86/slp-throttle.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-half.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-half.ll
index a0cbd740e4736..457b00dea4bbe 100644
--- a/llvm/test/Analysis/CostModel/X86/interleaved-load-half.ll
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-half.ll
@@ -33,7 +33,7 @@ for.body:                                         ; preds = %for.body.lr.ph, %fo
   %1 = load half, ptr %arrayidx2, align 4
   %add3 = fadd fast half %1, %mul
   store half %add3, ptr %arrayidx2, align 4
-  %add4 = or i32 %i.073, 1
+  %add4 = or disjoint i32 %i.073, 1
   %arrayidx5 = getelementptr inbounds [120 x half], ptr @src, i32 0, i32 %add4
   %2 = load half, ptr %arrayidx5, align 4
   %mul6 = fmul fast half %2, %k
@@ -41,7 +41,7 @@ for.body:                                         ; preds = %for.body.lr.ph, %fo
   %3 = load half, ptr %arrayidx8, align 4
   %add9 = fadd fast half %3, %mul6
   store half %add9, ptr %arrayidx8, align 4
-  %add10 = or i32 %i.073, 2
+  %add10 = or disjoint i32 %i.073, 2
   %arrayidx11 = getelementptr inbounds [120 x half], ptr @src, i32 0, i32 %add10
   %4 = load half, ptr %arrayidx11, align 4
   %mul12 = fmul fast half %4, %k
@@ -49,7 +49,7 @@ for.body:                                         ; preds = %for.body.lr.ph, %fo
   %5 = load half, ptr %arrayidx14, align 4
   %add15 = fadd fast half %5, %mul12
   store half %add15, ptr %arrayidx14, align 4
-  %add16 = or i32 %i.073, 3
+  %add16 = or disjoint i32 %i.073, 3
   %arrayidx17 = getelementptr inbounds [120 x half], ptr @src, i32 0, i32 %add16
   %6 = load half, ptr %arrayidx17, align 4
   %mul18 = fmul fast half %6, %k
@@ -57,7 +57,7 @@ for.body:                                         ; preds = %for.body.lr.ph, %fo
   %7 = load half, ptr %arrayidx20, align 4
   %add21 = fadd fast half %7, %mul18
   store half %add21, ptr %arrayidx20, align 4
-  %add22 = or i32 %i.073, 4
+  %add22 = or disjoint i32 %i.073, 4
   %arrayidx23 = getelementptr inbounds [120 x half], ptr @src, i32 0, i32 %add22
   %8 = load half, ptr %arrayidx23, align 4
   %mul24 = fmul fast half %8, %k
@@ -65,7 +65,7 @@ for.body:                                         ; preds = %for.body.lr.ph, %fo
   %9 = load half, ptr %arrayidx26, align 4
   %add27 = fadd fast half %9, %mul24
   store half %add27, ptr %arrayidx26, align 4
-  %add28 = or i32 %i.073, 5
+  %add28 = or disjoint i32 %i.073, 5
   %arrayidx29 = getelementptr inbounds [120 x half], ptr @src, i32 0, i32 %add28
   %10 = load half, ptr %arrayidx29, align 4
   %mul30 = fmul fast half %10, %k
@@ -73,7 +73,7 @@ for.body:                                         ; preds = %for.body.lr.ph, %fo
   %11 = load half, ptr %arrayidx32, align 4
   %add33 = fadd fast half %11, %mul30
   store half %add33, ptr %arrayidx32, align 4
-  %add34 = or i32 %i.073, 6
+  %add34 = or disjoint i32 %i.073, 6
   %arrayidx35 = getelementptr inbounds [120 x half], ptr @src, i32 0, i32 %add34
   %12 = load half, ptr %arrayidx35, align 4
   %mul36 = fmul fast half %12, %k
@@ -81,7 +81,7 @@ for.body:                                         ; preds = %for.body.lr.ph, %fo
   %13 = load half, ptr %arrayidx38, align 4
   %add39 = fadd fast half %13, %mul36
   store half %add39, ptr %arrayidx38, align 4
-  %add40 = or i32 %i.073, 7
+  %add40 = or disjoint i32 %i.073, 7
   %arrayidx41 = getelementptr inbounds [120 x half], ptr @src, i32 0, i32 %add40
   %14 = load half, ptr %arrayidx41, align 4
   %mul42 = fmul fast half %14, %k

diff  --git a/llvm/test/Analysis/CostModel/X86/masked-interleaved-load-i16.ll b/llvm/test/Analysis/CostModel/X86/masked-interleaved-load-i16.ll
index d42e43173d254..e867e5f5bcfc7 100644
--- a/llvm/test/Analysis/CostModel/X86/masked-interleaved-load-i16.ll
+++ b/llvm/test/Analysis/CostModel/X86/masked-interleaved-load-i16.ll
@@ -51,7 +51,7 @@ for.body:
   %i1 = shl nuw nsw i64 %indvars.iv, 2
   %arrayidx2 = getelementptr inbounds i16, ptr %points, i64 %i1
   %i2 = load i16, ptr %arrayidx2, align 2
-  %i3 = or i64 %i1, 1
+  %i3 = or disjoint i64 %i1, 1
   %arrayidx7 = getelementptr inbounds i16, ptr %points, i64 %i3
   %i4 = load i16, ptr %arrayidx7, align 2
   %arrayidx = getelementptr inbounds i16, ptr %x, i64 %indvars.iv
@@ -113,7 +113,7 @@ for.body:
   %i1 = shl nuw nsw i64 %indvars.iv, 2
   %arrayidx2 = getelementptr inbounds i16, ptr %points, i64 %i1
   %i2 = load i16, ptr %arrayidx2, align 2
-  %i3 = or i64 %i1, 1
+  %i3 = or disjoint i64 %i1, 1
   %arrayidx7 = getelementptr inbounds i16, ptr %points, i64 %i3
   %i4 = load i16, ptr %arrayidx7, align 2
   %arrayidx = getelementptr inbounds i16, ptr %x, i64 %indvars.iv

diff  --git a/llvm/test/Analysis/CostModel/X86/masked-interleaved-store-i16.ll b/llvm/test/Analysis/CostModel/X86/masked-interleaved-store-i16.ll
index 171b8d032e9c0..741dd0746b744 100644
--- a/llvm/test/Analysis/CostModel/X86/masked-interleaved-store-i16.ll
+++ b/llvm/test/Analysis/CostModel/X86/masked-interleaved-store-i16.ll
@@ -55,7 +55,7 @@ for.body:
   store i16 %0, ptr %arrayidx2, align 2
   %arrayidx4 = getelementptr inbounds i16, ptr %y, i64 %indvars.iv
   %2 = load i16, ptr %arrayidx4, align 2
-  %3 = or i64 %1, 1
+  %3 = or disjoint i64 %1, 1
   %arrayidx7 = getelementptr inbounds i16, ptr %points, i64 %3
   store i16 %2, ptr %arrayidx7, align 2
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -117,7 +117,7 @@ for.body:
   store i16 %0, ptr %arrayidx2, align 2
   %arrayidx4 = getelementptr inbounds i16, ptr %y, i64 %indvars.iv
   %2 = load i16, ptr %arrayidx4, align 2
-  %3 = or i64 %1, 1
+  %3 = or disjoint i64 %1, 1
   %arrayidx7 = getelementptr inbounds i16, ptr %points, i64 %3
   store i16 %2, ptr %arrayidx7, align 2
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1

diff  --git a/llvm/test/Analysis/DependenceAnalysis/ExactRDIV.ll b/llvm/test/Analysis/DependenceAnalysis/ExactRDIV.ll
index 1c7bcda1c7b15..4aa2458dcd8cd 100644
--- a/llvm/test/Analysis/DependenceAnalysis/ExactRDIV.ll
+++ b/llvm/test/Analysis/DependenceAnalysis/ExactRDIV.ll
@@ -40,7 +40,7 @@ for.body4:                                        ; preds = %for.body4.preheader
   %j.02 = phi i64 [ %inc9, %for.body4 ], [ 0, %for.body4.preheader ]
   %B.addr.01 = phi ptr [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
   %mul5 = shl nsw i64 %j.02, 1
-  %add64 = or i64 %mul5, 1
+  %add64 = or disjoint i64 %mul5, 1
   %arrayidx7 = getelementptr inbounds i32, ptr %A, i64 %add64
   %0 = load i32, ptr %arrayidx7, align 4
   %incdec.ptr = getelementptr inbounds i32, ptr %B.addr.01, i64 1

diff  --git a/llvm/test/Analysis/DependenceAnalysis/ExactSIV.ll b/llvm/test/Analysis/DependenceAnalysis/ExactSIV.ll
index 9a76154ce4e5c..7822e61cf3aa0 100644
--- a/llvm/test/Analysis/DependenceAnalysis/ExactSIV.ll
+++ b/llvm/test/Analysis/DependenceAnalysis/ExactSIV.ll
@@ -29,7 +29,7 @@ for.body:                                         ; preds = %entry, %for.body
   %arrayidx = getelementptr inbounds i32, ptr %A, i64 %add
   store i32 %conv, ptr %arrayidx, align 4
   %mul = shl i64 %i.02, 1
-  %add13 = or i64 %mul, 1
+  %add13 = or disjoint i64 %mul, 1
   %arrayidx2 = getelementptr inbounds i32, ptr %A, i64 %add13
   %0 = load i32, ptr %arrayidx2, align 4
   %incdec.ptr = getelementptr inbounds i32, ptr %B.addr.01, i64 1
@@ -68,7 +68,7 @@ for.body:                                         ; preds = %entry, %for.body
   %arrayidx = getelementptr inbounds i32, ptr %A, i64 %add
   store i32 %conv, ptr %arrayidx, align 4
   %mul1 = shl i64 %i.02, 1
-  %add23 = or i64 %mul1, 1
+  %add23 = or disjoint i64 %mul1, 1
   %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %add23
   %0 = load i32, ptr %arrayidx3, align 4
   %incdec.ptr = getelementptr inbounds i32, ptr %B.addr.01, i64 1

diff  --git a/llvm/test/Analysis/DependenceAnalysis/GCD.ll b/llvm/test/Analysis/DependenceAnalysis/GCD.ll
index 898158d983b26..f4890a269606b 100644
--- a/llvm/test/Analysis/DependenceAnalysis/GCD.ll
+++ b/llvm/test/Analysis/DependenceAnalysis/GCD.ll
@@ -93,7 +93,7 @@ for.body3:                                        ; preds = %for.cond1.preheader
   %mul5 = mul nsw i64 %i.03, 6
   %mul6 = shl nsw i64 %j.02, 3
   %add = add nsw i64 %mul5, %mul6
-  %add7 = or i64 %add, 1
+  %add7 = or disjoint i64 %add, 1
   %arrayidx8 = getelementptr inbounds i32, ptr %A, i64 %add7
   %0 = load i32, ptr %arrayidx8, align 4
   %incdec.ptr = getelementptr inbounds i32, ptr %B.addr.11, i64 1
@@ -142,7 +142,7 @@ for.body3:                                        ; preds = %for.cond1.preheader
   %mul = shl nsw i64 %i.03, 1
   %mul4 = shl nsw i64 %j.02, 2
   %sub = sub nsw i64 %mul, %mul4
-  %add5 = or i64 %sub, 1
+  %add5 = or disjoint i64 %sub, 1
   %arrayidx = getelementptr inbounds i32, ptr %A, i64 %add5
   store i32 %conv, ptr %arrayidx, align 4
   %mul5 = mul nsw i64 %i.03, 6
@@ -384,7 +384,7 @@ for.body3:                                        ; preds = %for.body3.preheader
   %arrayidx5 = getelementptr inbounds i32, ptr %A, i64 %arrayidx.sum
   store i32 %conv, ptr %arrayidx5, align 4
   %mul6 = mul nsw i64 %j.03, 6
-  %add7 = or i64 %mul6, 1
+  %add7 = or disjoint i64 %mul6, 1
   %mul7 = shl nsw i64 %i.06, 3
   %1 = mul nsw i64 %mul7, %n
   %arrayidx8.sum = add i64 %1, %add7
@@ -464,7 +464,7 @@ for.body3:                                        ; preds = %for.body3.preheader
   store i32 %7, ptr %arrayidx6, align 4
   %8 = trunc i64 %indvars.iv to i32
   %mul7 = mul nsw i32 %8, 6
-  %add7 = or i32 %mul7, 1
+  %add7 = or disjoint i32 %mul7, 1
   %idxprom8 = sext i32 %add7 to i64
   %9 = trunc i64 %indvars.iv8 to i32
   %mul9 = shl nsw i32 %9, 3
@@ -548,7 +548,7 @@ for.body3:                                        ; preds = %for.body3.preheader
   %4 = trunc i64 %indvars.iv to i32
   %mul8 = mul nsw i32 %4, 6
   %add9 = add nsw i32 %mul7, %mul8
-  %add10 = or i32 %add9, 1
+  %add10 = or disjoint i32 %add9, 1
   %idxprom11 = sext i32 %add10 to i64
   %arrayidx12 = getelementptr inbounds i32, ptr %A, i64 %idxprom11
   %5 = load i32, ptr %arrayidx12, align 4
@@ -627,7 +627,7 @@ for.body3:                                        ; preds = %for.body3.preheader
   store i32 %7, ptr %arrayidx6, align 4
   %8 = trunc i64 %indvars.iv to i32
   %mul7 = mul i32 %8, 6
-  %add7 = or i32 %mul7, 1
+  %add7 = or disjoint i32 %mul7, 1
   %idxprom8 = zext i32 %add7 to i64
   %9 = trunc i64 %indvars.iv8 to i32
   %mul9 = shl i32 %9, 3

diff  --git a/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll b/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll
index 145117a712ac5..bfdd15f170d06 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll
@@ -65,7 +65,7 @@ for.body:                                         ; preds = %entry, %for.body
   %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
   %0 = trunc i64 %indvars.iv to i32
   store i32 %0, ptr %arrayidx, align 4
-  %1 = or i64 %indvars.iv, 3
+  %1 = or disjoint i64 %indvars.iv, 3
   %arrayidx2 = getelementptr inbounds i32, ptr %A, i64 %1
   %2 = load i32, ptr %arrayidx2, align 4
   %add3 = add nsw i32 %2, %sum.013
@@ -99,7 +99,7 @@ for.body:                                         ; preds = %entry, %for.body
   %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
   %0 = trunc i64 %indvars.iv to i32
   store i32 %0, ptr %arrayidx, align 4
-  %1 = or i64 %indvars.iv, 1
+  %1 = or disjoint i64 %indvars.iv, 1
   %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %1
   %2 = trunc i64 %1 to i32
   store i32 %2, ptr %arrayidx3, align 4
@@ -213,7 +213,7 @@ for.body:                                         ; preds = %entry, %for.body
   %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
   %0 = trunc i64 %indvars.iv to i32
   store i32 %0, ptr %arrayidx, align 4
-  %1 = or i64 %indvars.iv, 1
+  %1 = or disjoint i64 %indvars.iv, 1
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
   %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
   %2 = trunc i64 %1 to i32
@@ -328,7 +328,7 @@ for.body:                                         ; preds = %entry, %for.body
   %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
   %0 = trunc i64 %indvars.iv to i32
   store i32 %0, ptr %arrayidx, align 4
-  %1 = or i64 %indvars.iv, 1
+  %1 = or disjoint i64 %indvars.iv, 1
   %arrayidx2 = getelementptr inbounds i32, ptr %add.ptr, i64 %indvars.iv
   %2 = trunc i64 %1 to i32
   store i32 %2, ptr %arrayidx2, align 4
@@ -525,7 +525,7 @@ for.body:                                         ; preds = %entry, %for.body
   %0 = trunc i64 %indvars.iv to i32
   %arrayidx2 = getelementptr inbounds i32, ptr %incdec.ptr, i64 %indvars.iv
   store i32 %0, ptr %arrayidx2, align 4
-  %1 = or i64 %indvars.iv, 1
+  %1 = or disjoint i64 %indvars.iv, 1
   %arrayidx5 = getelementptr inbounds i32, ptr %A, i64 %1
   %2 = trunc i64 %1 to i32
   store i32 %2, ptr %arrayidx5, align 4

diff  --git a/llvm/test/Analysis/ScalarEvolution/nsw-offset-assume.ll b/llvm/test/Analysis/ScalarEvolution/nsw-offset-assume.ll
index bc308f258dd1d..60809fcf31de6 100644
--- a/llvm/test/Analysis/ScalarEvolution/nsw-offset-assume.ll
+++ b/llvm/test/Analysis/ScalarEvolution/nsw-offset-assume.ll
@@ -25,7 +25,7 @@ define void @foo(i32 %no, ptr nocapture %d, ptr nocapture %q) nounwind {
 ; CHECK-NEXT:    --> {0,+,2}<nuw><nsw><%bb> U: [0,2147483645) S: [0,2147483645) Exits: (2 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> LoopDispositions: { %bb: Computable }
 ; CHECK-NEXT:    %5 = getelementptr inbounds double, ptr %q, i64 %4
 ; CHECK-NEXT:    --> {%q,+,16}<nuw><%bb> U: full-set S: full-set Exits: ((16 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> + %q) LoopDispositions: { %bb: Computable }
-; CHECK-NEXT:    %7 = or i32 %i.01, 1
+; CHECK-NEXT:    %7 = or disjoint i32 %i.01, 1
 ; CHECK-NEXT:    --> {1,+,2}<nuw><nsw><%bb> U: [1,2147483646) S: [1,2147483646) Exits: (1 + (2 * ((-1 + (2 * (%no /u 2))<nuw>) /u 2))<nuw>)<nuw><nsw> LoopDispositions: { %bb: Computable }
 ; CHECK-NEXT:    %8 = sext i32 %7 to i64
 ; CHECK-NEXT:    --> {1,+,2}<nuw><nsw><%bb> U: [1,2147483646) S: [1,2147483646) Exits: (1 + (2 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw>)<nuw><nsw> LoopDispositions: { %bb: Computable }
@@ -71,7 +71,7 @@ bb:                                               ; preds = %bb.nph, %bb1
   %4 = sext i32 %i.01 to i64                      ; <i64> [#uses=1]
   %5 = getelementptr inbounds double, ptr %q, i64 %4  ; <ptr> [#uses=1]
   %6 = load double, ptr %5, align 8                   ; <double> [#uses=1]
-  %7 = or i32 %i.01, 1                            ; <i32> [#uses=1]
+  %7 = or disjoint i32 %i.01, 1                            ; <i32> [#uses=1]
 
   %8 = sext i32 %7 to i64                         ; <i64> [#uses=1]
 

diff  --git a/llvm/test/Analysis/ScalarEvolution/nsw-offset.ll b/llvm/test/Analysis/ScalarEvolution/nsw-offset.ll
index 51b8db6768048..fab408ea372e6 100644
--- a/llvm/test/Analysis/ScalarEvolution/nsw-offset.ll
+++ b/llvm/test/Analysis/ScalarEvolution/nsw-offset.ll
@@ -22,7 +22,7 @@ define void @foo(i32 %no, ptr nocapture %d, ptr nocapture %q) nounwind {
 ; CHECK-NEXT:    --> {0,+,2}<nuw><nsw><%bb> U: [0,2147483645) S: [0,2147483645) Exits: (2 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> LoopDispositions: { %bb: Computable }
 ; CHECK-NEXT:    %5 = getelementptr inbounds double, ptr %q, i64 %4
 ; CHECK-NEXT:    --> {%q,+,16}<nuw><%bb> U: full-set S: full-set Exits: ((16 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw> + %q) LoopDispositions: { %bb: Computable }
-; CHECK-NEXT:    %7 = or i32 %i.01, 1
+; CHECK-NEXT:    %7 = or disjoint i32 %i.01, 1
 ; CHECK-NEXT:    --> {1,+,2}<nuw><nsw><%bb> U: [1,2147483646) S: [1,2147483646) Exits: (1 + (2 * ((-1 + (2 * (%no /u 2))<nuw>) /u 2))<nuw>)<nuw><nsw> LoopDispositions: { %bb: Computable }
 ; CHECK-NEXT:    %8 = sext i32 %7 to i64
 ; CHECK-NEXT:    --> {1,+,2}<nuw><nsw><%bb> U: [1,2147483646) S: [1,2147483646) Exits: (1 + (2 * ((1 + (zext i32 (-2 + (2 * (%no /u 2))<nuw>) to i64))<nuw><nsw> /u 2))<nuw><nsw>)<nuw><nsw> LoopDispositions: { %bb: Computable }
@@ -67,7 +67,7 @@ bb:                                               ; preds = %bb.nph, %bb1
   %4 = sext i32 %i.01 to i64                      ; <i64> [#uses=1]
   %5 = getelementptr inbounds double, ptr %q, i64 %4  ; <ptr> [#uses=1]
   %6 = load double, ptr %5, align 8                   ; <double> [#uses=1]
-  %7 = or i32 %i.01, 1                            ; <i32> [#uses=1]
+  %7 = or disjoint i32 %i.01, 1                            ; <i32> [#uses=1]
 
   %8 = sext i32 %7 to i64                         ; <i64> [#uses=1]
 

diff  --git a/llvm/test/Analysis/ScalarEvolution/sext-mul.ll b/llvm/test/Analysis/ScalarEvolution/sext-mul.ll
index a31789c5ee3f7..2340d5cb6fb66 100644
--- a/llvm/test/Analysis/ScalarEvolution/sext-mul.ll
+++ b/llvm/test/Analysis/ScalarEvolution/sext-mul.ll
@@ -18,7 +18,7 @@ define void @foo(ptr nocapture %arg, i32 %arg1, i32 %arg2) {
 ; CHECK-NEXT:    --> %tmp12 U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %bb7: Variant }
 ; CHECK-NEXT:    %tmp13 = sub nsw i32 %tmp12, %arg1
 ; CHECK-NEXT:    --> ((-1 * %arg1) + %tmp12) U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %bb7: Variant }
-; CHECK-NEXT:    %tmp14 = or i64 %tmp10, 1
+; CHECK-NEXT:    %tmp14 = or disjoint i64 %tmp10, 1
 ; CHECK-NEXT:    --> (1 + (sext i32 {0,+,2}<%bb7> to i64))<nuw><nsw> U: [1,0) S: [-2147483647,2147483648) Exits: (1 + (sext i32 (-2 + (2 * %arg2)) to i64))<nuw><nsw> LoopDispositions: { %bb7: Computable }
 ; CHECK-NEXT:    %tmp15 = getelementptr inbounds i32, ptr %arg, i64 %tmp14
 ; CHECK-NEXT:    --> (4 + (4 * (sext i32 {0,+,2}<%bb7> to i64))<nsw> + %arg) U: full-set S: full-set Exits: (4 + (4 * (sext i32 (-2 + (2 * %arg2)) to i64))<nsw> + %arg) LoopDispositions: { %bb7: Computable }
@@ -58,7 +58,7 @@ bb7:                                              ; preds = %bb7, %bb3
   %tmp12 = load i32, ptr %tmp11, align 4
   %tmp13 = sub nsw i32 %tmp12, %arg1
   store i32 %tmp13, ptr %tmp11, align 4
-  %tmp14 = or i64 %tmp10, 1
+  %tmp14 = or disjoint i64 %tmp10, 1
   %tmp15 = getelementptr inbounds i32, ptr %arg, i64 %tmp14
   %tmp16 = load i32, ptr %tmp15, align 4
   %tmp17 = mul nsw i32 %tmp16, %arg1
@@ -85,7 +85,7 @@ define void @goo(ptr nocapture %arg3, i32 %arg4, i32 %arg5) {
 ; CHECK-NEXT:    --> %t12 U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %bb7: Variant }
 ; CHECK-NEXT:    %t13 = sub nsw i32 %t12, %arg4
 ; CHECK-NEXT:    --> ((-1 * %arg4) + %t12) U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %bb7: Variant }
-; CHECK-NEXT:    %t14 = or i128 %t10, 1
+; CHECK-NEXT:    %t14 = or disjoint i128 %t10, 1
 ; CHECK-NEXT:    --> (1 + (sext i127 {0,+,633825300114114700748351602688}<%bb7> to i128))<nuw><nsw> U: [1,-633825300114114700748351602686) S: [-85070591730234615865843651857942052863,85070591096409315751728951109590450178) Exits: (1 + (sext i127 (-633825300114114700748351602688 + (633825300114114700748351602688 * (zext i32 %arg5 to i127))) to i128))<nuw><nsw> LoopDispositions: { %bb7: Computable }
 ; CHECK-NEXT:    %t15 = getelementptr inbounds i32, ptr %arg3, i128 %t14
 ; CHECK-NEXT:    --> (4 + %arg3)<nuw> U: [4,0) S: [4,0) Exits: (4 + %arg3)<nuw> LoopDispositions: { %bb7: Invariant }
@@ -125,7 +125,7 @@ bb7:                                              ; preds = %bb7, %bb3
   %t12 = load i32, ptr %t11, align 4
   %t13 = sub nsw i32 %t12, %arg4
   store i32 %t13, ptr %t11, align 4
-  %t14 = or i128 %t10, 1
+  %t14 = or disjoint i128 %t10, 1
   %t15 = getelementptr inbounds i32, ptr %arg3, i128 %t14
   %t16 = load i32, ptr %t15, align 4
   %t17 = mul nsw i32 %t16, %arg4

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-sched-store.ll b/llvm/test/CodeGen/AArch64/aarch64-sched-store.ll
index 7bf444344f286..65ffe3cd86038 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-sched-store.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-sched-store.ll
@@ -56,17 +56,17 @@ vector.body:                                      ; preds = %vector.body, %entry
   store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array0, align 8
   %array2 = getelementptr inbounds double, ptr %array0, i64 2
   store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array2, align 8
-  %index4 = or i64 %index, 4
+  %index4 = or disjoint i64 %index, 4
   %array4 = getelementptr inbounds double, ptr %array, i64 %index4
   store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array4, align 8
   %array6 = getelementptr inbounds double, ptr %array4, i64 2
   store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array6, align 8
-  %index8 = or i64 %index, 8
+  %index8 = or disjoint i64 %index, 8
   %array8 = getelementptr inbounds double, ptr %array, i64 %index8
   store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array8, align 8
   %array10 = getelementptr inbounds double, ptr %array8, i64 2
   store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array10, align 8
-  %index12 = or i64 %index, 12
+  %index12 = or disjoint i64 %index, 12
   %array12 = getelementptr inbounds double, ptr %array, i64 %index12
   store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array12, align 8
   %array14 = getelementptr inbounds double, ptr %array12, i64 2

diff  --git a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
index 3d12e44d46d20..85cd00cbfc536 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
@@ -961,61 +961,61 @@ for.body:                                         ; preds = %for.body, %for.cond
   %load1 = load i64, ptr addrspace(1) %add.ptr8, align 8
   %add = add i64 %load1, %sum.128
 
-  %add9 = or i32 %block.029, 256
+  %add9 = or disjoint i32 %block.029, 256
   %conv3.1 = zext i32 %add9 to i64
   %add.ptr8.1 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.1
   %load2 = load i64, ptr addrspace(1) %add.ptr8.1, align 8
   %add.1 = add i64 %load2, %add
 
-  %add9.1 = or i32 %block.029, 512
+  %add9.1 = or disjoint i32 %block.029, 512
   %conv3.2 = zext i32 %add9.1 to i64
   %add.ptr8.2 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.2
   %l3 = load i64, ptr addrspace(1) %add.ptr8.2, align 8
   %add.2 = add i64 %l3, %add.1
 
-  %add9.2 = or i32 %block.029, 768
+  %add9.2 = or disjoint i32 %block.029, 768
   %conv3.3 = zext i32 %add9.2 to i64
   %add.ptr8.3 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.3
   %l4 = load i64, ptr addrspace(1) %add.ptr8.3, align 8
   %add.3 = add i64 %l4, %add.2
 
-  %add9.3 = or i32 %block.029, 1024
+  %add9.3 = or disjoint i32 %block.029, 1024
   %conv3.4 = zext i32 %add9.3 to i64
   %add.ptr8.4 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.4
   %l5 = load i64, ptr addrspace(1) %add.ptr8.4, align 8
   %add.4 = add i64 %l5, %add.3
 
-  %add9.4 = or i32 %block.029, 1280
+  %add9.4 = or disjoint i32 %block.029, 1280
   %conv3.5 = zext i32 %add9.4 to i64
   %add.ptr8.5 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.5
   %l6 = load i64, ptr addrspace(1) %add.ptr8.5, align 8
   %add.5 = add i64 %l6, %add.4
 
-  %add9.5 = or i32 %block.029, 1536
+  %add9.5 = or disjoint i32 %block.029, 1536
   %conv3.6 = zext i32 %add9.5 to i64
   %add.ptr8.6 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.6
   %load7 = load i64, ptr addrspace(1) %add.ptr8.6, align 8
   %add.6 = add i64 %load7, %add.5
 
-  %add9.6 = or i32 %block.029, 1792
+  %add9.6 = or disjoint i32 %block.029, 1792
   %conv3.7 = zext i32 %add9.6 to i64
   %add.ptr8.7 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.7
   %load8 = load i64, ptr addrspace(1) %add.ptr8.7, align 8
   %add.7 = add i64 %load8, %add.6
 
-  %add9.7 = or i32 %block.029, 2048
+  %add9.7 = or disjoint i32 %block.029, 2048
   %conv3.8 = zext i32 %add9.7 to i64
   %add.ptr8.8 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.8
   %load9 = load i64, ptr addrspace(1) %add.ptr8.8, align 8
   %add.8 = add i64 %load9, %add.7
 
-  %add9.8 = or i32 %block.029, 2304
+  %add9.8 = or disjoint i32 %block.029, 2304
   %conv3.9 = zext i32 %add9.8 to i64
   %add.ptr8.9 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.9
   %load10 = load i64, ptr addrspace(1) %add.ptr8.9, align 8
   %add.9 = add i64 %load10, %add.8
 
-  %add9.9 = or i32 %block.029, 2560
+  %add9.9 = or disjoint i32 %block.029, 2560
   %conv3.10 = zext i32 %add9.9 to i64
   %add.ptr8.10 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.10
   %load11 = load i64, ptr addrspace(1) %add.ptr8.10, align 8

diff  --git a/llvm/test/CodeGen/ARM/ParallelDSP/smlad0.ll b/llvm/test/CodeGen/ARM/ParallelDSP/smlad0.ll
index d9af38782fe8b..0d5e8d61e16b4 100644
--- a/llvm/test/CodeGen/ARM/ParallelDSP/smlad0.ll
+++ b/llvm/test/CodeGen/ARM/ParallelDSP/smlad0.ll
@@ -91,14 +91,14 @@ for.body:
   %i.056 = phi i32 [ %add29, %for.body ], [ 0, %for.body.preheader ]
   %arrayidx = getelementptr inbounds i16, ptr %arg3, i32 %i.056
   %0 = load i16, ptr %arrayidx, align 2
-  %add1 = or i32 %i.056, 1
+  %add1 = or disjoint i32 %i.056, 1
   %arrayidx2 = getelementptr inbounds i16, ptr %arg3, i32 %add1
   %1 = load i16, ptr %arrayidx2, align 2
-  %add3 = or i32 %i.056, 2
+  %add3 = or disjoint i32 %i.056, 2
   %arrayidx4 = getelementptr inbounds i16, ptr %arg3, i32 %add3
   %2 = load i16, ptr %arrayidx4, align 2
 
-  %add5 = or i32 %i.056, 3
+  %add5 = or disjoint i32 %i.056, 3
   %arrayidx6 = getelementptr inbounds i16, ptr %arg3, i32 %add5
   %3 = load i16, ptr %arrayidx6, align 2
   %arrayidx8 = getelementptr inbounds i16, ptr %arg2, i32 %i.056

diff  --git a/llvm/test/CodeGen/ARM/ParallelDSP/smlad11.ll b/llvm/test/CodeGen/ARM/ParallelDSP/smlad11.ll
index 061603db79be0..bb3b2917e8d39 100644
--- a/llvm/test/CodeGen/ARM/ParallelDSP/smlad11.ll
+++ b/llvm/test/CodeGen/ARM/ParallelDSP/smlad11.ll
@@ -33,13 +33,13 @@ for.body:
   %i.053 = phi i32 [ %add29, %for.body ], [ 0, %for.body.preheader ]
   %arrayidx = getelementptr inbounds i16, ptr %arg3, i32 %i.053
   %0 = load i16, ptr %arrayidx, align 2
-  %add1 = or i32 %i.053, 1
+  %add1 = or disjoint i32 %i.053, 1
   %arrayidx2 = getelementptr inbounds i16, ptr %arg3, i32 %add1
   %1 = load i16, ptr %arrayidx2, align 2
-  %add3 = or i32 %i.053, 2
+  %add3 = or disjoint i32 %i.053, 2
   %arrayidx4 = getelementptr inbounds i16, ptr %arg3, i32 %add3
   %2 = load i16, ptr %arrayidx4, align 2
-  %add5 = or i32 %i.053, 3
+  %add5 = or disjoint i32 %i.053, 3
   %arrayidx6 = getelementptr inbounds i16, ptr %arg3, i32 %add5
   %3 = load i16, ptr %arrayidx6, align 2
   %arrayidx8 = getelementptr inbounds i16, ptr %arg2, i32 %i.053

diff  --git a/llvm/test/CodeGen/ARM/ParallelDSP/smladx-1.ll b/llvm/test/CodeGen/ARM/ParallelDSP/smladx-1.ll
index c7705468dc912..9001671ba7f14 100644
--- a/llvm/test/CodeGen/ARM/ParallelDSP/smladx-1.ll
+++ b/llvm/test/CodeGen/ARM/ParallelDSP/smladx-1.ll
@@ -69,17 +69,17 @@ for.body:
   %In2 = load i16, ptr %pIn2Base, align 2
   %pIn1.0 = getelementptr inbounds i16, ptr %pIn1, i32 %i.011
   %In1 = load i16, ptr %pIn1.0, align 2
-  %inc = or i32 %i.011, 1
+  %inc = or disjoint i32 %i.011, 1
   %pIn2.1 = getelementptr inbounds i16, ptr %pIn2Base, i32 -1
   %In2.1 = load i16, ptr %pIn2.1, align 2
   %pIn1.1 = getelementptr inbounds i16, ptr %pIn1, i32 %inc
   %In1.1 = load i16, ptr %pIn1.1, align 2
-  %inc.1 = or i32 %i.011, 2
+  %inc.1 = or disjoint i32 %i.011, 2
   %pIn2.2 = getelementptr inbounds i16, ptr %pIn2Base, i32 -2
   %In2.2 = load i16, ptr %pIn2.2, align 2
   %pIn1.2 = getelementptr inbounds i16, ptr %pIn1, i32 %inc.1
   %In1.2 = load i16, ptr %pIn1.2, align 2
-  %inc.2 = or i32 %i.011, 3
+  %inc.2 = or disjoint i32 %i.011, 3
   %pIn2.3 = getelementptr inbounds i16, ptr %pIn2Base, i32 -3
   %In2.3 = load i16, ptr %pIn2.3, align 2
   %pIn1.3 = getelementptr inbounds i16, ptr %pIn1, i32 %inc.2

diff  --git a/llvm/test/CodeGen/ARM/ParallelDSP/smlald0.ll b/llvm/test/CodeGen/ARM/ParallelDSP/smlald0.ll
index 843776f4edd45..d4f64297c848e 100644
--- a/llvm/test/CodeGen/ARM/ParallelDSP/smlald0.ll
+++ b/llvm/test/CodeGen/ARM/ParallelDSP/smlald0.ll
@@ -91,14 +91,14 @@ for.body:
   %i.056 = phi i32 [ %add29, %for.body ], [ 0, %for.body.preheader ]
   %arrayidx = getelementptr inbounds i16, ptr %arg3, i32 %i.056
   %0 = load i16, ptr %arrayidx, align 2
-  %add1 = or i32 %i.056, 1
+  %add1 = or disjoint i32 %i.056, 1
   %arrayidx2 = getelementptr inbounds i16, ptr %arg3, i32 %add1
   %1 = load i16, ptr %arrayidx2, align 2
-  %add3 = or i32 %i.056, 2
+  %add3 = or disjoint i32 %i.056, 2
   %arrayidx4 = getelementptr inbounds i16, ptr %arg3, i32 %add3
   %2 = load i16, ptr %arrayidx4, align 2
 
-  %add5 = or i32 %i.056, 3
+  %add5 = or disjoint i32 %i.056, 3
   %arrayidx6 = getelementptr inbounds i16, ptr %arg3, i32 %add5
   %3 = load i16, ptr %arrayidx6, align 2
   %arrayidx8 = getelementptr inbounds i16, ptr %arg2, i32 %i.056

diff  --git a/llvm/test/CodeGen/ARM/ParallelDSP/smlald2.ll b/llvm/test/CodeGen/ARM/ParallelDSP/smlald2.ll
index 9d721ac22c843..df23934667352 100644
--- a/llvm/test/CodeGen/ARM/ParallelDSP/smlald2.ll
+++ b/llvm/test/CodeGen/ARM/ParallelDSP/smlald2.ll
@@ -92,14 +92,14 @@ for.body:
   %i.056 = phi i32 [ %add29, %for.body ], [ 0, %for.body.preheader ]
   %arrayidx = getelementptr inbounds i16, ptr %arg3, i32 %i.056
   %0 = load i16, ptr %arrayidx, align 2
-  %add1 = or i32 %i.056, 1
+  %add1 = or disjoint i32 %i.056, 1
   %arrayidx2 = getelementptr inbounds i16, ptr %arg3, i32 %add1
   %1 = load i16, ptr %arrayidx2, align 2
-  %add3 = or i32 %i.056, 2
+  %add3 = or disjoint i32 %i.056, 2
   %arrayidx4 = getelementptr inbounds i16, ptr %arg3, i32 %add3
   %2 = load i16, ptr %arrayidx4, align 2
 
-  %add5 = or i32 %i.056, 3
+  %add5 = or disjoint i32 %i.056, 3
   %arrayidx6 = getelementptr inbounds i16, ptr %arg3, i32 %add5
   %3 = load i16, ptr %arrayidx6, align 2
   %arrayidx8 = getelementptr inbounds i16, ptr %arg2, i32 %i.056

diff  --git a/llvm/test/CodeGen/ARM/ParallelDSP/smlaldx-1.ll b/llvm/test/CodeGen/ARM/ParallelDSP/smlaldx-1.ll
index d6a18bceab592..88d6abdd83483 100644
--- a/llvm/test/CodeGen/ARM/ParallelDSP/smlaldx-1.ll
+++ b/llvm/test/CodeGen/ARM/ParallelDSP/smlaldx-1.ll
@@ -68,17 +68,17 @@ for.body:
   %In2 = load i16, ptr %pIn2Base, align 2
   %pIn1.0 = getelementptr inbounds i16, ptr %pIn1, i32 %i.011
   %In1 = load i16, ptr %pIn1.0, align 2
-  %inc = or i32 %i.011, 1
+  %inc = or disjoint i32 %i.011, 1
   %pIn2.1 = getelementptr inbounds i16, ptr %pIn2Base, i32 -1
   %In2.1 = load i16, ptr %pIn2.1, align 2
   %pIn1.1 = getelementptr inbounds i16, ptr %pIn1, i32 %inc
   %In1.1 = load i16, ptr %pIn1.1, align 2
-  %inc.1 = or i32 %i.011, 2
+  %inc.1 = or disjoint i32 %i.011, 2
   %pIn2.2 = getelementptr inbounds i16, ptr %pIn2Base, i32 -2
   %In2.2 = load i16, ptr %pIn2.2, align 2
   %pIn1.2 = getelementptr inbounds i16, ptr %pIn1, i32 %inc.1
   %In1.2 = load i16, ptr %pIn1.2, align 2
-  %inc.2 = or i32 %i.011, 3
+  %inc.2 = or disjoint i32 %i.011, 3
   %pIn2.3 = getelementptr inbounds i16, ptr %pIn2Base, i32 -3
   %In2.3 = load i16, ptr %pIn2.3, align 2
   %pIn1.3 = getelementptr inbounds i16, ptr %pIn1, i32 %inc.2

diff  --git a/llvm/test/CodeGen/ARM/ParallelDSP/smlaldx-2.ll b/llvm/test/CodeGen/ARM/ParallelDSP/smlaldx-2.ll
index e47cf75bc8d09..75c202629f012 100644
--- a/llvm/test/CodeGen/ARM/ParallelDSP/smlaldx-2.ll
+++ b/llvm/test/CodeGen/ARM/ParallelDSP/smlaldx-2.ll
@@ -68,17 +68,17 @@ for.body:
   %In2 = load i16, ptr %pIn2Base, align 2
   %pIn1.0 = getelementptr inbounds i16, ptr %pIn1, i32 %i.011
   %In1 = load i16, ptr %pIn1.0, align 2
-  %inc = or i32 %i.011, 1
+  %inc = or disjoint i32 %i.011, 1
   %pIn2.1 = getelementptr inbounds i16, ptr %pIn2Base, i32 -1
   %In2.1 = load i16, ptr %pIn2.1, align 2
   %pIn1.1 = getelementptr inbounds i16, ptr %pIn1, i32 %inc
   %In1.1 = load i16, ptr %pIn1.1, align 2
-  %inc.1 = or i32 %i.011, 2
+  %inc.1 = or disjoint i32 %i.011, 2
   %pIn2.2 = getelementptr inbounds i16, ptr %pIn2Base, i32 -2
   %In2.2 = load i16, ptr %pIn2.2, align 2
   %pIn1.2 = getelementptr inbounds i16, ptr %pIn1, i32 %inc.1
   %In1.2 = load i16, ptr %pIn1.2, align 2
-  %inc.2 = or i32 %i.011, 3
+  %inc.2 = or disjoint i32 %i.011, 3
   %pIn2.3 = getelementptr inbounds i16, ptr %pIn2Base, i32 -3
   %In2.3 = load i16, ptr %pIn2.3, align 2
   %pIn1.3 = getelementptr inbounds i16, ptr %pIn1, i32 %inc.2

diff  --git a/llvm/test/CodeGen/ARM/ParallelDSP/unroll-n-jam-smlad.ll b/llvm/test/CodeGen/ARM/ParallelDSP/unroll-n-jam-smlad.ll
index 3890edeaa353d..51d113376a375 100644
--- a/llvm/test/CodeGen/ARM/ParallelDSP/unroll-n-jam-smlad.ll
+++ b/llvm/test/CodeGen/ARM/ParallelDSP/unroll-n-jam-smlad.ll
@@ -16,15 +16,15 @@ entry:
   %arrayidx.us.i117.i = getelementptr inbounds i32, ptr %res, i32 %idx
   store i32 0, ptr %arrayidx.us.i117.i, align 4
   %mul.us.i118.i = mul i32 %idx, %N
-  %inc11.us.i.i = or i32 %idx, 1
+  %inc11.us.i.i = or disjoint i32 %idx, 1
   %arrayidx.us.i117.1.i = getelementptr inbounds i32, ptr %res, i32 %inc11.us.i.i
   store i32 0, ptr %arrayidx.us.i117.1.i, align 4
   %mul.us.i118.1.i = mul i32 %inc11.us.i.i, %N
-  %inc11.us.i.1.i = or i32 %idx, 2
+  %inc11.us.i.1.i = or disjoint i32 %idx, 2
   %arrayidx.us.i117.2.i = getelementptr inbounds i32, ptr %res, i32 %inc11.us.i.1.i
   store i32 0, ptr %arrayidx.us.i117.2.i, align 4
   %mul.us.i118.2.i = mul i32 %inc11.us.i.1.i, %N
-  %inc11.us.i.2.i = or i32 %idx, 3
+  %inc11.us.i.2.i = or disjoint i32 %idx, 3
   %arrayidx.us.i117.3.i = getelementptr inbounds i32, ptr %res, i32 %inc11.us.i.2.i
   store i32 0, ptr %arrayidx.us.i117.3.i, align 4
   %mul.us.i118.3.i = mul i32 %inc11.us.i.2.i, %N
@@ -63,7 +63,7 @@ for.body:
   %conv6.us.i.i = sext i16 %A8 to i32
   %mul7.us.i.i = mul nsw i32 %conv6.us.i.i, %conv.us.i.i
   %add9.us.i.i = add nsw i32 %mul7.us.i.i, %A3
-  %inc.us.i.i = or i32 %j.026.us.i.i, 1
+  %inc.us.i.i = or disjoint i32 %j.026.us.i.i, 1
   %add.us.i.1.i = add i32 %j.026.us.i.i, %mul.us.i118.1.i
   %arrayidx4.us.i.1.i = getelementptr inbounds i16, ptr %A, i32 %add.us.i.1.i
   %A9 = load i16, ptr %arrayidx4.us.i.1.i, align 2
@@ -73,7 +73,7 @@ for.body:
   %conv6.us.i.1.i = sext i16 %B0 to i32
   %mul7.us.i.1.i = mul nsw i32 %conv6.us.i.1.i, %conv.us.i.1.i
   %add9.us.i.1.i = add nsw i32 %mul7.us.i.1.i, %A4
-  %inc.us.i.1.i = or i32 %j.026.us.i.i, 1
+  %inc.us.i.1.i = or disjoint i32 %j.026.us.i.i, 1
   %add.us.i.2.i = add i32 %j.026.us.i.i, %mul.us.i118.2.i
   %arrayidx4.us.i.2.i = getelementptr inbounds i16, ptr %A, i32 %add.us.i.2.i
   %B1 = load i16, ptr %arrayidx4.us.i.2.i, align 2
@@ -83,7 +83,7 @@ for.body:
   %conv6.us.i.2.i = sext i16 %B2 to i32
   %mul7.us.i.2.i = mul nsw i32 %conv6.us.i.2.i, %conv.us.i.2.i
   %add9.us.i.2.i = add nsw i32 %mul7.us.i.2.i, %A5
-  %inc.us.i.2.i = or i32 %j.026.us.i.i, 1
+  %inc.us.i.2.i = or disjoint i32 %j.026.us.i.i, 1
   %add.us.i.3.i = add i32 %j.026.us.i.i, %mul.us.i118.3.i
   %arrayidx4.us.i.3.i = getelementptr inbounds i16, ptr %A, i32 %add.us.i.3.i
   %B3 = load i16, ptr %arrayidx4.us.i.3.i, align 2
@@ -93,7 +93,7 @@ for.body:
   %conv6.us.i.3.i = sext i16 %B4 to i32
   %mul7.us.i.3.i = mul nsw i32 %conv6.us.i.3.i, %conv.us.i.3.i
   %add9.us.i.3.i = add nsw i32 %mul7.us.i.3.i, %A6
-  %inc.us.i.3.i = or i32 %j.026.us.i.i, 1
+  %inc.us.i.3.i = or disjoint i32 %j.026.us.i.i, 1
   %add.us.i.1337.i = add i32 %inc.us.i.i, %mul.us.i118.i
   %arrayidx4.us.i.1338.i = getelementptr inbounds i16, ptr %A, i32 %add.us.i.1337.i
   %B5 = load i16, ptr %arrayidx4.us.i.1338.i, align 2
@@ -103,7 +103,7 @@ for.body:
   %conv6.us.i.1341.i = sext i16 %B6 to i32
   %mul7.us.i.1342.i = mul nsw i32 %conv6.us.i.1341.i, %conv.us.i.1339.i
   %add9.us.i.1343.i = add nsw i32 %mul7.us.i.1342.i, %add9.us.i.i
-  %inc.us.i.1344.i = or i32 %j.026.us.i.i, 2
+  %inc.us.i.1344.i = or disjoint i32 %j.026.us.i.i, 2
   %add.us.i.1.1.i = add i32 %inc.us.i.1.i, %mul.us.i118.1.i
   %arrayidx4.us.i.1.1.i = getelementptr inbounds i16, ptr %A, i32 %add.us.i.1.1.i
   %B7 = load i16, ptr %arrayidx4.us.i.1.1.i, align 2
@@ -113,7 +113,7 @@ for.body:
   %conv6.us.i.1.1.i = sext i16 %B6.dup to i32
   %mul7.us.i.1.1.i = mul nsw i32 %conv6.us.i.1.1.i, %conv.us.i.1.1.i
   %add9.us.i.1.1.i = add nsw i32 %mul7.us.i.1.1.i, %add9.us.i.1.i
-  %inc.us.i.1.1.i = or i32 %j.026.us.i.i, 2
+  %inc.us.i.1.1.i = or disjoint i32 %j.026.us.i.i, 2
   %add.us.i.2.1.i = add i32 %inc.us.i.2.i, %mul.us.i118.2.i
   %arrayidx4.us.i.2.1.i = getelementptr inbounds i16, ptr %A, i32 %add.us.i.2.1.i
   %B9 = load i16, ptr %arrayidx4.us.i.2.1.i, align 2
@@ -123,7 +123,7 @@ for.body:
   %conv6.us.i.2.1.i = sext i16 %B6.dup.i to i32
   %mul7.us.i.2.1.i = mul nsw i32 %conv6.us.i.2.1.i, %conv.us.i.2.1.i
   %add9.us.i.2.1.i = add nsw i32 %mul7.us.i.2.1.i, %add9.us.i.2.i
-  %inc.us.i.2.1.i = or i32 %j.026.us.i.i, 2
+  %inc.us.i.2.1.i = or disjoint i32 %j.026.us.i.i, 2
   %add.us.i.3.1.i = add i32 %inc.us.i.3.i, %mul.us.i118.3.i
   %arrayidx4.us.i.3.1.i = getelementptr inbounds i16, ptr %A, i32 %add.us.i.3.1.i
   %B11 = load i16, ptr %arrayidx4.us.i.3.1.i, align 2
@@ -133,7 +133,7 @@ for.body:
   %conv6.us.i.3.1.i = sext i16 %B6.dup.i.i to i32
   %mul7.us.i.3.1.i = mul nsw i32 %conv6.us.i.3.1.i, %conv.us.i.3.1.i
   %add9.us.i.3.1.i = add nsw i32 %mul7.us.i.3.1.i, %add9.us.i.3.i
-  %inc.us.i.3.1.i = or i32 %j.026.us.i.i, 2
+  %inc.us.i.3.1.i = or disjoint i32 %j.026.us.i.i, 2
   %add.us.i.2346.i = add i32 %inc.us.i.1344.i, %mul.us.i118.i
   %arrayidx4.us.i.2347.i = getelementptr inbounds i16, ptr %A, i32 %add.us.i.2346.i
   %B13 = load i16, ptr %arrayidx4.us.i.2347.i, align 2
@@ -143,7 +143,7 @@ for.body:
   %conv6.us.i.2350.i = sext i16 %B14 to i32
   %mul7.us.i.2351.i = mul nsw i32 %conv6.us.i.2350.i, %conv.us.i.2348.i
   %add9.us.i.2352.i = add nsw i32 %mul7.us.i.2351.i, %add9.us.i.1343.i
-  %inc.us.i.2353.i = or i32 %j.026.us.i.i, 3
+  %inc.us.i.2353.i = or disjoint i32 %j.026.us.i.i, 3
   %add.us.i.1.2.i = add i32 %inc.us.i.1.1.i, %mul.us.i118.1.i
   %arrayidx4.us.i.1.2.i = getelementptr inbounds i16, ptr %A, i32 %add.us.i.1.2.i
   %B15 = load i16, ptr %arrayidx4.us.i.1.2.i, align 2
@@ -153,7 +153,7 @@ for.body:
   %conv6.us.i.1.2.i = sext i16 %B14.dup to i32
   %mul7.us.i.1.2.i = mul nsw i32 %conv6.us.i.1.2.i, %conv.us.i.1.2.i
   %add9.us.i.1.2.i = add nsw i32 %mul7.us.i.1.2.i, %add9.us.i.1.1.i
-  %inc.us.i.1.2.i = or i32 %j.026.us.i.i, 3
+  %inc.us.i.1.2.i = or disjoint i32 %j.026.us.i.i, 3
   %add.us.i.2.2.i = add i32 %inc.us.i.2.1.i, %mul.us.i118.2.i
   %arrayidx4.us.i.2.2.i = getelementptr inbounds i16, ptr %A, i32 %add.us.i.2.2.i
   %B17 = load i16, ptr %arrayidx4.us.i.2.2.i, align 2
@@ -163,7 +163,7 @@ for.body:
   %conv6.us.i.2.2.i = sext i16 %B14.dup.i to i32
   %mul7.us.i.2.2.i = mul nsw i32 %conv6.us.i.2.2.i, %conv.us.i.2.2.i
   %add9.us.i.2.2.i = add nsw i32 %mul7.us.i.2.2.i, %add9.us.i.2.1.i
-  %inc.us.i.2.2.i = or i32 %j.026.us.i.i, 3
+  %inc.us.i.2.2.i = or disjoint i32 %j.026.us.i.i, 3
   %add.us.i.3.2.i = add i32 %inc.us.i.3.1.i, %mul.us.i118.3.i
   %arrayidx4.us.i.3.2.i = getelementptr inbounds i16, ptr %A, i32 %add.us.i.3.2.i
   %B19 = load i16, ptr %arrayidx4.us.i.3.2.i, align 2
@@ -173,7 +173,7 @@ for.body:
   %conv6.us.i.3.2.i = sext i16 %B14.dup.i.i to i32
   %mul7.us.i.3.2.i = mul nsw i32 %conv6.us.i.3.2.i, %conv.us.i.3.2.i
   %add9.us.i.3.2.i = add nsw i32 %mul7.us.i.3.2.i, %add9.us.i.3.1.i
-  %inc.us.i.3.2.i = or i32 %j.026.us.i.i, 3
+  %inc.us.i.3.2.i = or disjoint i32 %j.026.us.i.i, 3
   %add.us.i.3355.i = add i32 %inc.us.i.2353.i, %mul.us.i118.i
   %arrayidx4.us.i.3356.i = getelementptr inbounds i16, ptr %A, i32 %add.us.i.3355.i
   %B21 = load i16, ptr %arrayidx4.us.i.3356.i, align 2

diff  --git a/llvm/test/CodeGen/ARM/dsp-loop-indexing.ll b/llvm/test/CodeGen/ARM/dsp-loop-indexing.ll
index c940158437fe4..9fb64471e9881 100644
--- a/llvm/test/CodeGen/ARM/dsp-loop-indexing.ll
+++ b/llvm/test/CodeGen/ARM/dsp-loop-indexing.ll
@@ -49,7 +49,7 @@ loop:
   %qadd.1 = call i32 @llvm.arm.qadd(i32 %a.1, i32 %b.1)
   %addr.1 = getelementptr inbounds i32, ptr %out.array, i32 %idx.1
   store i32 %qadd.1, ptr %addr.1
-  %idx.2 = or i32 %idx.1, 1
+  %idx.2 = or disjoint i32 %idx.1, 1
   %gep.a.2 = getelementptr inbounds i32, ptr %a.array, i32 %idx.2
   %a.2 = load i32, ptr %gep.a.2
   %gep.b.2 = getelementptr inbounds i32, ptr %b.array, i32 %idx.2
@@ -228,7 +228,7 @@ loop:
   %qadd.1 = call i32 @llvm.arm.qadd(i32 %a.1, i32 %b.1)
   %addr.1 = getelementptr inbounds i32, ptr %out.array, i32 %idx.1
   store i32 %qadd.1, ptr %addr.1
-  %idx.2 = or i32 %idx.1, 1
+  %idx.2 = or disjoint i32 %idx.1, 1
   %gep.a.2 = getelementptr inbounds i32, ptr %a.array, i32 %idx.2
   %a.2 = load i32, ptr %gep.a.2
   %gep.b.2 = getelementptr inbounds i32, ptr %b.array, i32 %idx.2
@@ -236,7 +236,7 @@ loop:
   %qadd.2 = call i32 @llvm.arm.qadd(i32 %a.2, i32 %b.2)
   %addr.2 = getelementptr inbounds i32, ptr %out.array, i32 %idx.2
   store i32 %qadd.2, ptr %addr.2
-  %idx.3 = or i32 %idx.1, 2
+  %idx.3 = or disjoint i32 %idx.1, 2
   %gep.a.3 = getelementptr inbounds i32, ptr %a.array, i32 %idx.3
   %a.3 = load i32, ptr %gep.a.3
   %gep.b.3 = getelementptr inbounds i32, ptr %b.array, i32 %idx.3
@@ -244,7 +244,7 @@ loop:
   %qadd.3 = call i32 @llvm.arm.qadd(i32 %a.3, i32 %b.3)
   %addr.3 = getelementptr inbounds i32, ptr %out.array, i32 %idx.3
   store i32 %qadd.3, ptr %addr.3
-  %idx.4 = or i32 %idx.1, 3
+  %idx.4 = or disjoint i32 %idx.1, 3
   %gep.a.4 = getelementptr inbounds i32, ptr %a.array, i32 %idx.4
   %a.4 = load i32, ptr %gep.a.4
   %gep.b.4 = getelementptr inbounds i32, ptr %b.array, i32 %idx.4

diff  --git a/llvm/test/CodeGen/ARM/fpclamptosat.ll b/llvm/test/CodeGen/ARM/fpclamptosat.ll
index 6c3c74a47ebf1..8bd8aa7b34dec 100644
--- a/llvm/test/CodeGen/ARM/fpclamptosat.ll
+++ b/llvm/test/CodeGen/ARM/fpclamptosat.ll
@@ -4158,7 +4158,7 @@ define void @unroll_maxmin(ptr nocapture %0, ptr nocapture readonly %1, i32 %2)
   %15 = trunc i64 %14 to i32
   %16 = getelementptr inbounds i32, ptr %0, i32 %6
   store i32 %15, ptr %16, align 4
-  %17 = or i32 %6, 1
+  %17 = or disjoint i32 %6, 1
   %18 = getelementptr inbounds float, ptr %1, i32 %17
   %19 = load float, ptr %18, align 4
   %20 = fmul float %19, 0x41E0000000000000
@@ -4354,7 +4354,7 @@ define void @unroll_minmax(ptr nocapture %0, ptr nocapture readonly %1, i32 %2)
   %15 = trunc i64 %14 to i32
   %16 = getelementptr inbounds i32, ptr %0, i32 %6
   store i32 %15, ptr %16, align 4
-  %17 = or i32 %6, 1
+  %17 = or disjoint i32 %6, 1
   %18 = getelementptr inbounds float, ptr %1, i32 %17
   %19 = load float, ptr %18, align 4
   %20 = fmul float %19, 0x41E0000000000000

diff  --git a/llvm/test/CodeGen/ARM/loop-indexing.ll b/llvm/test/CodeGen/ARM/loop-indexing.ll
index 110342c7f3ba7..bb859b202bbc0 100644
--- a/llvm/test/CodeGen/ARM/loop-indexing.ll
+++ b/llvm/test/CodeGen/ARM/loop-indexing.ll
@@ -51,7 +51,7 @@ loop:
   %b.1 = load float, ptr %gep.b.1
   %fmul.1 = fmul float %a.1, %b.1
   %fma.1 = fadd float %fmul.1, %res
-  %idx.2 = or i32 %idx.1, 1
+  %idx.2 = or disjoint i32 %idx.1, 1
   %gep.a.2 = getelementptr inbounds float, ptr %a, i32 %idx.2
   %a.2 = load float, ptr %gep.a.2
   %gep.b.2 = getelementptr inbounds float, ptr %b, i32 %idx.2
@@ -123,7 +123,7 @@ for.body12.us.us:                                 ; preds = %for.body12.us.us, %
   %conv17.us.us = sext i16 %tmp10 to i32
   %mul.us.us = mul nsw i32 %conv17.us.us, %conv.us.us
   %add18.us.us = add nsw i32 %mul.us.us, %result_element.152.us.us
-  %inc.us.us = or i32 %filter_x.053.us.us, 1
+  %inc.us.us = or disjoint i32 %filter_x.053.us.us, 1
   %add13.us.us.1 = add i32 %inc.us.us, %res_x.060.us
   %arrayidx14.us.us.1 = getelementptr inbounds i16, ptr %tmp5, i32 %inc.us.us
   %tmp11 = load i16, ptr %arrayidx14.us.us.1, align 2
@@ -133,7 +133,7 @@ for.body12.us.us:                                 ; preds = %for.body12.us.us, %
   %conv17.us.us.1 = sext i16 %tmp12 to i32
   %mul.us.us.1 = mul nsw i32 %conv17.us.us.1, %conv.us.us.1
   %add18.us.us.1 = add nsw i32 %mul.us.us.1, %add18.us.us
-  %inc.us.us.1 = or i32 %filter_x.053.us.us, 2
+  %inc.us.us.1 = or disjoint i32 %filter_x.053.us.us, 2
   %add13.us.us.2 = add i32 %inc.us.us.1, %res_x.060.us
   %arrayidx14.us.us.2 = getelementptr inbounds i16, ptr %tmp5, i32 %inc.us.us.1
   %tmp13 = load i16, ptr %arrayidx14.us.us.2, align 2
@@ -143,7 +143,7 @@ for.body12.us.us:                                 ; preds = %for.body12.us.us, %
   %conv17.us.us.2 = sext i16 %tmp14 to i32
   %mul.us.us.2 = mul nsw i32 %conv17.us.us.2, %conv.us.us.2
   %add18.us.us.2 = add nsw i32 %mul.us.us.2, %add18.us.us.1
-  %inc.us.us.2 = or i32 %filter_x.053.us.us, 3
+  %inc.us.us.2 = or disjoint i32 %filter_x.053.us.us, 3
   %add13.us.us.3 = add i32 %inc.us.us.2, %res_x.060.us
   %arrayidx14.us.us.3 = getelementptr inbounds i16, ptr %tmp5, i32 %inc.us.us.2
   %tmp15 = load i16, ptr %arrayidx14.us.us.3, align 2
@@ -250,7 +250,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul = mul nuw nsw i32 %conv2, %conv
   %arrayidx3 = getelementptr inbounds i32, ptr %C, i32 %i.010
   store i32 %mul, ptr %arrayidx3, align 4
-  %inc = or i32 %i.010, 1
+  %inc = or disjoint i32 %i.010, 1
   %arrayidx.1 = getelementptr inbounds i8, ptr %A, i32 %inc
   %tmp6 = load i8, ptr %arrayidx.1, align 1
   %conv.1 = zext i8 %tmp6 to i32
@@ -260,7 +260,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul.1 = mul nuw nsw i32 %conv2.1, %conv.1
   %arrayidx3.1 = getelementptr inbounds i32, ptr %C, i32 %inc
   store i32 %mul.1, ptr %arrayidx3.1, align 4
-  %inc.1 = or i32 %i.010, 2
+  %inc.1 = or disjoint i32 %i.010, 2
   %arrayidx.2 = getelementptr inbounds i8, ptr %A, i32 %inc.1
   %tmp8 = load i8, ptr %arrayidx.2, align 1
   %conv.2 = zext i8 %tmp8 to i32
@@ -270,7 +270,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul.2 = mul nuw nsw i32 %conv2.2, %conv.2
   %arrayidx3.2 = getelementptr inbounds i32, ptr %C, i32 %inc.1
   store i32 %mul.2, ptr %arrayidx3.2, align 4
-  %inc.2 = or i32 %i.010, 3
+  %inc.2 = or disjoint i32 %i.010, 3
   %arrayidx.3 = getelementptr inbounds i8, ptr %A, i32 %inc.2
   %tmp10 = load i8, ptr %arrayidx.3, align 1
   %conv.3 = zext i8 %tmp10 to i32
@@ -356,7 +356,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul = mul nsw i32 %conv2, %conv
   %arrayidx3 = getelementptr inbounds i32, ptr %C, i32 %i.010
   store i32 %mul, ptr %arrayidx3, align 4
-  %inc = or i32 %i.010, 1
+  %inc = or disjoint i32 %i.010, 1
   %arrayidx.1 = getelementptr inbounds i16, ptr %A, i32 %inc
   %tmp6 = load i16, ptr %arrayidx.1, align 2
   %conv.1 = sext i16 %tmp6 to i32
@@ -366,7 +366,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul.1 = mul nsw i32 %conv2.1, %conv.1
   %arrayidx3.1 = getelementptr inbounds i32, ptr %C, i32 %inc
   store i32 %mul.1, ptr %arrayidx3.1, align 4
-  %inc.1 = or i32 %i.010, 2
+  %inc.1 = or disjoint i32 %i.010, 2
   %arrayidx.2 = getelementptr inbounds i16, ptr %A, i32 %inc.1
   %tmp8 = load i16, ptr %arrayidx.2, align 2
   %conv.2 = sext i16 %tmp8 to i32
@@ -376,7 +376,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul.2 = mul nsw i32 %conv2.2, %conv.2
   %arrayidx3.2 = getelementptr inbounds i32, ptr %C, i32 %inc.1
   store i32 %mul.2, ptr %arrayidx3.2, align 4
-  %inc.2 = or i32 %i.010, 3
+  %inc.2 = or disjoint i32 %i.010, 3
   %arrayidx.3 = getelementptr inbounds i16, ptr %A, i32 %inc.2
   %tmp10 = load i16, ptr %arrayidx.3, align 2
   %conv.3 = sext i16 %tmp10 to i32
@@ -464,7 +464,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul = mul nsw i32 %conv2, %conv
   %arrayidx3 = getelementptr inbounds i32, ptr %C, i32 %i.010
   store i32 %mul, ptr %arrayidx3, align 4
-  %inc = or i32 %i.010, 1
+  %inc = or disjoint i32 %i.010, 1
   %arrayidx.1 = getelementptr inbounds i16, ptr %A, i32 %inc
   %tmp6 = load i16, ptr %arrayidx.1, align 2
   %conv.1 = sext i16 %tmp6 to i32
@@ -474,7 +474,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul.1 = mul nsw i32 %conv2.1, %conv.1
   %arrayidx3.1 = getelementptr inbounds i32, ptr %C, i32 %inc
   store i32 %mul.1, ptr %arrayidx3.1, align 4
-  %inc.1 = or i32 %i.010, 2
+  %inc.1 = or disjoint i32 %i.010, 2
   %arrayidx.2 = getelementptr inbounds i16, ptr %A, i32 %inc.1
   %tmp8 = load i16, ptr %arrayidx.2, align 2
   %conv.2 = sext i16 %tmp8 to i32
@@ -484,7 +484,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul.2 = mul nsw i32 %conv2.2, %conv.2
   %arrayidx3.2 = getelementptr inbounds i32, ptr %C, i32 %inc.1
   store i32 %mul.2, ptr %arrayidx3.2, align 4
-  %inc.2 = or i32 %i.010, 3
+  %inc.2 = or disjoint i32 %i.010, 3
   %arrayidx.3 = getelementptr inbounds i16, ptr %A, i32 %inc.2
   %tmp10 = load i16, ptr %arrayidx.3, align 2
   %conv.3 = sext i16 %tmp10 to i32
@@ -550,7 +550,7 @@ for.body4.us:                                     ; preds = %for.body4.us, %for.
   %tmp4 = load i32, ptr %arrayidx9.us, align 4
   %add.us = add nsw i32 %tmp4, %mul.us
   store i32 %add.us, ptr %arrayidx9.us, align 4
-  %inc.us = or i32 %j.023.us, 1
+  %inc.us = or disjoint i32 %j.023.us, 1
   %tmp5 = load i8, ptr %arrayidx.us, align 1
   %conv.us.1 = zext i8 %tmp5 to i32
   %arrayidx6.us.1 = getelementptr inbounds i8, ptr %.pre, i32 %inc.us
@@ -561,7 +561,7 @@ for.body4.us:                                     ; preds = %for.body4.us, %for.
   %tmp7 = load i32, ptr %arrayidx9.us.1, align 4
   %add.us.1 = add nsw i32 %tmp7, %mul.us.1
   store i32 %add.us.1, ptr %arrayidx9.us.1, align 4
-  %inc.us.1 = or i32 %j.023.us, 2
+  %inc.us.1 = or disjoint i32 %j.023.us, 2
   %tmp8 = load i8, ptr %arrayidx.us, align 1
   %conv.us.2 = zext i8 %tmp8 to i32
   %arrayidx6.us.2 = getelementptr inbounds i8, ptr %.pre, i32 %inc.us.1
@@ -572,7 +572,7 @@ for.body4.us:                                     ; preds = %for.body4.us, %for.
   %tmp10 = load i32, ptr %arrayidx9.us.2, align 4
   %add.us.2 = add nsw i32 %tmp10, %mul.us.2
   store i32 %add.us.2, ptr %arrayidx9.us.2, align 4
-  %inc.us.2 = or i32 %j.023.us, 3
+  %inc.us.2 = or disjoint i32 %j.023.us, 3
   %tmp11 = load i8, ptr %arrayidx.us, align 1
   %conv.us.3 = zext i8 %tmp11 to i32
   %arrayidx6.us.3 = getelementptr inbounds i8, ptr %.pre, i32 %inc.us.2
@@ -669,7 +669,7 @@ for.body4.us:                                     ; preds = %for.body4.us, %for.
   %tmp6 = load i32, ptr %arrayidx9.us, align 4
   %add.us = add nsw i32 %tmp6, %mul.us
   store i32 %add.us, ptr %arrayidx9.us, align 4
-  %inc.us = or i32 %j.023.us, 1
+  %inc.us = or disjoint i32 %j.023.us, 1
   %arrayidx6.us.1 = getelementptr inbounds i16, ptr %tmp3, i32 %inc.us
   %tmp7 = load i16, ptr %arrayidx6.us.1, align 2
   %conv7.us.1 = sext i16 %tmp7 to i32
@@ -678,7 +678,7 @@ for.body4.us:                                     ; preds = %for.body4.us, %for.
   %tmp8 = load i32, ptr %arrayidx9.us.1, align 4
   %add.us.1 = add nsw i32 %tmp8, %mul.us.1
   store i32 %add.us.1, ptr %arrayidx9.us.1, align 4
-  %inc.us.1 = or i32 %j.023.us, 2
+  %inc.us.1 = or disjoint i32 %j.023.us, 2
   %arrayidx6.us.2 = getelementptr inbounds i16, ptr %tmp3, i32 %inc.us.1
   %tmp9 = load i16, ptr %arrayidx6.us.2, align 2
   %conv7.us.2 = sext i16 %tmp9 to i32
@@ -687,7 +687,7 @@ for.body4.us:                                     ; preds = %for.body4.us, %for.
   %tmp10 = load i32, ptr %arrayidx9.us.2, align 4
   %add.us.2 = add nsw i32 %tmp10, %mul.us.2
   store i32 %add.us.2, ptr %arrayidx9.us.2, align 4
-  %inc.us.2 = or i32 %j.023.us, 3
+  %inc.us.2 = or disjoint i32 %j.023.us, 3
   %arrayidx6.us.3 = getelementptr inbounds i16, ptr %tmp3, i32 %inc.us.2
   %tmp11 = load i16, ptr %arrayidx6.us.3, align 2
   %conv7.us.3 = sext i16 %tmp11 to i32
@@ -785,7 +785,7 @@ for.body4.us:                                     ; preds = %for.body4.us, %for.
   %mul.us = mul nuw nsw i32 %conv7.us, %conv.us
   %add.us = add nsw i32 %mul.us, %tmp2
   store i32 %add.us, ptr %arrayidx8.us, align 4
-  %inc.us = or i32 %j.021.us, 1
+  %inc.us = or disjoint i32 %j.021.us, 1
   %tmp5 = load i8, ptr %arrayidx.us, align 1
   %conv.us.1 = zext i8 %tmp5 to i32
   %arrayidx6.us.1 = getelementptr inbounds i8, ptr %.pre, i32 %inc.us
@@ -794,7 +794,7 @@ for.body4.us:                                     ; preds = %for.body4.us, %for.
   %mul.us.1 = mul nuw nsw i32 %conv7.us.1, %conv.us.1
   %add.us.1 = add nsw i32 %mul.us.1, %add.us
   store i32 %add.us.1, ptr %arrayidx8.us, align 4
-  %inc.us.1 = or i32 %j.021.us, 2
+  %inc.us.1 = or disjoint i32 %j.021.us, 2
   %tmp7 = load i8, ptr %arrayidx.us, align 1
   %conv.us.2 = zext i8 %tmp7 to i32
   %arrayidx6.us.2 = getelementptr inbounds i8, ptr %.pre, i32 %inc.us.1
@@ -803,7 +803,7 @@ for.body4.us:                                     ; preds = %for.body4.us, %for.
   %mul.us.2 = mul nuw nsw i32 %conv7.us.2, %conv.us.2
   %add.us.2 = add nsw i32 %mul.us.2, %add.us.1
   store i32 %add.us.2, ptr %arrayidx8.us, align 4
-  %inc.us.2 = or i32 %j.021.us, 3
+  %inc.us.2 = or disjoint i32 %j.021.us, 3
   %tmp9 = load i8, ptr %arrayidx.us, align 1
   %conv.us.3 = zext i8 %tmp9 to i32
   %arrayidx6.us.3 = getelementptr inbounds i8, ptr %.pre, i32 %inc.us.2
@@ -898,19 +898,19 @@ for.body4.us:                                     ; preds = %for.body4.us, %for.
   %conv7.us = sext i16 %tmp4 to i32
   %mul.us = mul nsw i32 %conv7.us, %conv.us
   %add.us = add nsw i32 %mul.us, %add22.us
-  %inc.us = or i32 %j.021.us, 1
+  %inc.us = or disjoint i32 %j.021.us, 1
   %arrayidx6.us.1 = getelementptr inbounds i16, ptr %tmp3, i32 %inc.us
   %tmp5 = load i16, ptr %arrayidx6.us.1, align 2
   %conv7.us.1 = sext i16 %tmp5 to i32
   %mul.us.1 = mul nsw i32 %conv7.us.1, %conv.us
   %add.us.1 = add nsw i32 %mul.us.1, %add.us
-  %inc.us.1 = or i32 %j.021.us, 2
+  %inc.us.1 = or disjoint i32 %j.021.us, 2
   %arrayidx6.us.2 = getelementptr inbounds i16, ptr %tmp3, i32 %inc.us.1
   %tmp6 = load i16, ptr %arrayidx6.us.2, align 2
   %conv7.us.2 = sext i16 %tmp6 to i32
   %mul.us.2 = mul nsw i32 %conv7.us.2, %conv.us
   %add.us.2 = add nsw i32 %mul.us.2, %add.us.1
-  %inc.us.2 = or i32 %j.021.us, 3
+  %inc.us.2 = or disjoint i32 %j.021.us, 3
   %arrayidx6.us.3 = getelementptr inbounds i16, ptr %tmp3, i32 %inc.us.2
   %tmp7 = load i16, ptr %arrayidx6.us.3, align 2
   %conv7.us.3 = sext i16 %tmp7 to i32
@@ -1100,7 +1100,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul = mul nsw i32 %tmp5, %tmp4
   %arrayidx2 = getelementptr inbounds i32, ptr %a, i32 %i.09
   store i32 %mul, ptr %arrayidx2, align 4
-  %inc = or i32 %i.09, 1
+  %inc = or disjoint i32 %i.09, 1
   %arrayidx.1 = getelementptr inbounds i32, ptr %b, i32 %inc
   %tmp6 = load i32, ptr %arrayidx.1, align 4
   %arrayidx1.1 = getelementptr inbounds i32, ptr %c, i32 %inc
@@ -1108,7 +1108,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul.1 = mul nsw i32 %tmp7, %tmp6
   %arrayidx2.1 = getelementptr inbounds i32, ptr %a, i32 %inc
   store i32 %mul.1, ptr %arrayidx2.1, align 4
-  %inc.1 = or i32 %i.09, 2
+  %inc.1 = or disjoint i32 %i.09, 2
   %arrayidx.2 = getelementptr inbounds i32, ptr %b, i32 %inc.1
   %tmp8 = load i32, ptr %arrayidx.2, align 4
   %arrayidx1.2 = getelementptr inbounds i32, ptr %c, i32 %inc.1
@@ -1116,7 +1116,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul.2 = mul nsw i32 %tmp9, %tmp8
   %arrayidx2.2 = getelementptr inbounds i32, ptr %a, i32 %inc.1
   store i32 %mul.2, ptr %arrayidx2.2, align 4
-  %inc.2 = or i32 %i.09, 3
+  %inc.2 = or disjoint i32 %i.09, 3
   %arrayidx.3 = getelementptr inbounds i32, ptr %b, i32 %inc.2
   %tmp10 = load i32, ptr %arrayidx.3, align 4
   %arrayidx1.3 = getelementptr inbounds i32, ptr %c, i32 %inc.2

diff  --git a/llvm/test/CodeGen/ARM/shifter_operand.ll b/llvm/test/CodeGen/ARM/shifter_operand.ll
index f62f195e1d735..bf2e8aa911c64 100644
--- a/llvm/test/CodeGen/ARM/shifter_operand.ll
+++ b/llvm/test/CodeGen/ARM/shifter_operand.ll
@@ -508,14 +508,14 @@ for.cond:                                         ; preds = %for.cond1.for.cond.
 
 for.cond2.preheader:                              ; preds = %for.cond2.preheader.2, %for.cond
   %indvar24 = phi i32 [ 0, %for.cond ], [ %indvar.next25.3, %for.cond2.preheader.2 ]
-  %indvar.next25 = or i32 %indvar24, 1
+  %indvar.next25 = or disjoint i32 %indvar24, 1
   %l5 = mul i32 %2, %indvar.next25
   %scevgep.1 = getelementptr [15 x [25 x [18 x i8]]], ptr @arr_9, i32 -217196, i32 %4, i32 %0, i32 %l5
   store i32 0, ptr %scevgep.1, align 1
   br i1 %cc, label %for.cond1.for.cond.cleanup_crit_edge, label %for.cond2.preheader.2
 
 for.cond2.preheader.2:                            ; preds = %for.cond2.preheader
-  %indvar.next25.1 = or i32 %indvar24, 2
+  %indvar.next25.1 = or disjoint i32 %indvar24, 2
   %l8 = mul i32 %2, %indvar.next25.1
   %scevgep.2 = getelementptr [15 x [25 x [18 x i8]]], ptr @arr_9, i32 -217196, i32 %4, i32 %0, i32 %l8
   store i32 0, ptr %scevgep.2, align 1

diff  --git a/llvm/test/CodeGen/Hexagon/autohvx/interleave.ll b/llvm/test/CodeGen/Hexagon/autohvx/interleave.ll
index 0367721f6a670..a4b24a1a61100 100644
--- a/llvm/test/CodeGen/Hexagon/autohvx/interleave.ll
+++ b/llvm/test/CodeGen/Hexagon/autohvx/interleave.ll
@@ -27,7 +27,7 @@ b4:                                               ; preds = %b4, %b1
   %v5 = load i32, ptr %v4, align 4, !tbaa !1
   %v6 = add nsw i32 %v5, %v3
   store i32 %v6, ptr %v4, align 4, !tbaa !1
-  %v7 = or i32 %v1, 1
+  %v7 = or disjoint i32 %v1, 1
   %v8 = getelementptr inbounds i32, ptr %a1, i32 %v7
   %v9 = load i32, ptr %v8, align 4, !tbaa !1
   %v10 = getelementptr inbounds i32, ptr %a0, i32 %v7

diff  --git a/llvm/test/CodeGen/PowerPC/rematerializable-instruction-machine-licm.ll b/llvm/test/CodeGen/PowerPC/rematerializable-instruction-machine-licm.ll
index 82a50d6ac1746..33356e922fbae 100644
--- a/llvm/test/CodeGen/PowerPC/rematerializable-instruction-machine-licm.ll
+++ b/llvm/test/CodeGen/PowerPC/rematerializable-instruction-machine-licm.ll
@@ -460,13 +460,13 @@ define zeroext i32 @test1(i64 %0, ptr %1) {
   %23 = phi i64 [ 0, %20 ], [ %107, %22 ]
   %24 = mul i64 %23, 400000
   %25 = getelementptr i64, ptr %3, i64 %24
-  %26 = or i64 %23, 1
+  %26 = or disjoint i64 %23, 1
   %27 = mul i64 %26, 400000
   %28 = getelementptr i64, ptr %3, i64 %27
-  %29 = or i64 %23, 2
+  %29 = or disjoint i64 %23, 2
   %30 = mul i64 %29, 400000
   %31 = getelementptr i64, ptr %3, i64 %30
-  %32 = or i64 %23, 3
+  %32 = or disjoint i64 %23, 3
   %33 = mul i64 %32, 400000
   %34 = getelementptr i64, ptr %3, i64 %33
   %35 = mul i64 %23, 400000

diff  --git a/llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll b/llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll
index 78f2ca8f128d4..e73249739f356 100644
--- a/llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll
+++ b/llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll
@@ -118,7 +118,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %2 = load i32, ptr %arrayidx, align 4
   %add = add nsw i32 %2, 4
   store i32 %add, ptr %arrayidx, align 4
-  %indvars.iv.next = or i64 %indvars.iv, 1
+  %indvars.iv.next = or disjoint i64 %indvars.iv, 1
   %arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
   %3 = load i32, ptr %arrayidx.1, align 4
   %add.1 = add nsw i32 %3, 4

diff  --git a/llvm/test/CodeGen/SystemZ/vec-load-element.ll b/llvm/test/CodeGen/SystemZ/vec-load-element.ll
index 25d29127ebf07..2baaed19546df 100644
--- a/llvm/test/CodeGen/SystemZ/vec-load-element.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-load-element.ll
@@ -18,8 +18,8 @@ entry:
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next.3, %vector.body ]
   %vec.phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %57, %vector.body ]
-  %0 = or i64 %index, 2
-  %1 = or i64 %index, 3
+  %0 = or disjoint i64 %index, 2
+  %1 = or disjoint i64 %index, 3
   %2 = getelementptr inbounds [150 x %type0], ptr @Mem, i64 0, i64 0, i32 3
   %3 = getelementptr inbounds [150 x %type0], ptr @Mem, i64 0, i64 %0, i32 3
   %4 = getelementptr inbounds [150 x %type0], ptr @Mem, i64 0, i64 %1, i32 3
@@ -31,7 +31,7 @@ vector.body:                                      ; preds = %vector.body, %entry
   %10 = insertelement <4 x i32> %9, i32 %6, i32 2
   %11 = insertelement <4 x i32> %10, i32 %7, i32 3
   %12 = add nsw <4 x i32> %11, %vec.phi
-  %13 = or i64 %index, 7
+  %13 = or disjoint i64 %index, 7
   %14 = getelementptr inbounds [150 x %type0], ptr @Mem, i64 0, i64 undef, i32 3
   %15 = getelementptr inbounds [150 x %type0], ptr @Mem, i64 0, i64 0, i32 3
   %16 = getelementptr inbounds [150 x %type0], ptr @Mem, i64 0, i64 %13, i32 3
@@ -44,9 +44,9 @@ vector.body:                                      ; preds = %vector.body, %entry
   %23 = insertelement <4 x i32> %22, i32 %19, i32 2
   %24 = insertelement <4 x i32> %23, i32 %20, i32 3
   %25 = add nsw <4 x i32> %24, %12
-  %26 = or i64 %index, 9
-  %27 = or i64 %index, 10
-  %28 = or i64 %index, 11
+  %26 = or disjoint i64 %index, 9
+  %27 = or disjoint i64 %index, 10
+  %28 = or disjoint i64 %index, 11
   %29 = getelementptr inbounds [150 x %type0], ptr @Mem, i64 0, i64 undef, i32 3
   %30 = getelementptr inbounds [150 x %type0], ptr @Mem, i64 0, i64 %26, i32 3
   %31 = getelementptr inbounds [150 x %type0], ptr @Mem, i64 0, i64 %27, i32 3
@@ -60,9 +60,9 @@ vector.body:                                      ; preds = %vector.body, %entry
   %39 = insertelement <4 x i32> %38, i32 %35, i32 2
   %40 = insertelement <4 x i32> %39, i32 %36, i32 3
   %41 = add nsw <4 x i32> %40, %25
-  %42 = or i64 %index, 13
-  %43 = or i64 %index, 14
-  %44 = or i64 %index, 15
+  %42 = or disjoint i64 %index, 13
+  %43 = or disjoint i64 %index, 14
+  %44 = or disjoint i64 %index, 15
   %45 = getelementptr inbounds [150 x %type0], ptr @Mem, i64 0, i64 undef, i32 3
   %46 = getelementptr inbounds [150 x %type0], ptr @Mem, i64 0, i64 %42, i32 3
   %47 = getelementptr inbounds [150 x %type0], ptr @Mem, i64 0, i64 %43, i32 3

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll
index 9933720953d33..2fdf534d52656 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/fast-fp-loops.ll
@@ -166,7 +166,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul = fmul fast float %i14, %i13
   %arrayidx2 = getelementptr inbounds float, ptr %a, i32 %i.09
   store float %mul, ptr %arrayidx2, align 4
-  %inc = or i32 %i.09, 1
+  %inc = or disjoint i32 %i.09, 1
   %arrayidx.1 = getelementptr inbounds float, ptr %b, i32 %inc
   %i15 = load float, ptr %arrayidx.1, align 4
   %arrayidx1.1 = getelementptr inbounds float, ptr %c, i32 %inc
@@ -174,7 +174,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul.1 = fmul fast float %i16, %i15
   %arrayidx2.1 = getelementptr inbounds float, ptr %a, i32 %inc
   store float %mul.1, ptr %arrayidx2.1, align 4
-  %inc.1 = or i32 %i.09, 2
+  %inc.1 = or disjoint i32 %i.09, 2
   %arrayidx.2 = getelementptr inbounds float, ptr %b, i32 %inc.1
   %i17 = load float, ptr %arrayidx.2, align 4
   %arrayidx1.2 = getelementptr inbounds float, ptr %c, i32 %inc.1
@@ -182,7 +182,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul.2 = fmul fast float %i18, %i17
   %arrayidx2.2 = getelementptr inbounds float, ptr %a, i32 %inc.1
   store float %mul.2, ptr %arrayidx2.2, align 4
-  %inc.2 = or i32 %i.09, 3
+  %inc.2 = or disjoint i32 %i.09, 3
   %arrayidx.3 = getelementptr inbounds float, ptr %b, i32 %inc.2
   %i19 = load float, ptr %arrayidx.3, align 4
   %arrayidx1.3 = getelementptr inbounds float, ptr %c, i32 %inc.2

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-float-loops.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-float-loops.ll
index cc6d0925d1803..8b71987246ee5 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-float-loops.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-float-loops.ll
@@ -1526,7 +1526,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul = fmul half %4, %5
   %conv = fpext half %mul to float
   %add = fadd float %res.09, %conv
-  %inc = or i32 %i.010, 1
+  %inc = or disjoint i32 %i.010, 1
   %arrayidx.1 = getelementptr inbounds half, ptr %a, i32 %inc
   %6 = load half, ptr %arrayidx.1, align 2
   %arrayidx1.1 = getelementptr inbounds half, ptr %b, i32 %inc
@@ -1534,7 +1534,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul.1 = fmul half %6, %7
   %conv.1 = fpext half %mul.1 to float
   %add.1 = fadd float %add, %conv.1
-  %inc.1 = or i32 %i.010, 2
+  %inc.1 = or disjoint i32 %i.010, 2
   %arrayidx.2 = getelementptr inbounds half, ptr %a, i32 %inc.1
   %8 = load half, ptr %arrayidx.2, align 2
   %arrayidx1.2 = getelementptr inbounds half, ptr %b, i32 %inc.1
@@ -1542,7 +1542,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul.2 = fmul half %8, %9
   %conv.2 = fpext half %mul.2 to float
   %add.2 = fadd float %add.1, %conv.2
-  %inc.2 = or i32 %i.010, 3
+  %inc.2 = or disjoint i32 %i.010, 3
   %arrayidx.3 = getelementptr inbounds half, ptr %a, i32 %inc.2
   %10 = load half, ptr %arrayidx.3, align 2
   %arrayidx1.3 = getelementptr inbounds half, ptr %b, i32 %inc.2
@@ -1681,7 +1681,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %add = fadd half %4, %5
   %conv = fpext half %add to float
   %add2 = fadd float %res.010, %conv
-  %inc = or i32 %i.011, 1
+  %inc = or disjoint i32 %i.011, 1
   %arrayidx.1 = getelementptr inbounds half, ptr %a, i32 %inc
   %6 = load half, ptr %arrayidx.1, align 2
   %arrayidx1.1 = getelementptr inbounds half, ptr %b, i32 %inc
@@ -1689,7 +1689,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %add.1 = fadd half %6, %7
   %conv.1 = fpext half %add.1 to float
   %add2.1 = fadd float %add2, %conv.1
-  %inc.1 = or i32 %i.011, 2
+  %inc.1 = or disjoint i32 %i.011, 2
   %arrayidx.2 = getelementptr inbounds half, ptr %a, i32 %inc.1
   %8 = load half, ptr %arrayidx.2, align 2
   %arrayidx1.2 = getelementptr inbounds half, ptr %b, i32 %inc.1
@@ -1697,7 +1697,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %add.2 = fadd half %8, %9
   %conv.2 = fpext half %add.2 to float
   %add2.2 = fadd float %add2.1, %conv.2
-  %inc.2 = or i32 %i.011, 3
+  %inc.2 = or disjoint i32 %i.011, 3
   %arrayidx.3 = getelementptr inbounds half, ptr %a, i32 %inc.2
   %10 = load half, ptr %arrayidx.3, align 2
   %arrayidx1.3 = getelementptr inbounds half, ptr %b, i32 %inc.2
@@ -1846,7 +1846,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul = fmul half %4, %conv2
   %conv3 = fpext half %mul to float
   %add = fadd float %res.011, %conv3
-  %inc = or i32 %i.012, 1
+  %inc = or disjoint i32 %i.012, 1
   %arrayidx.1 = getelementptr inbounds half, ptr %a, i32 %inc
   %6 = load half, ptr %arrayidx.1, align 2
   %arrayidx1.1 = getelementptr inbounds i16, ptr %b, i32 %inc
@@ -1855,7 +1855,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul.1 = fmul half %6, %conv2.1
   %conv3.1 = fpext half %mul.1 to float
   %add.1 = fadd float %add, %conv3.1
-  %inc.1 = or i32 %i.012, 2
+  %inc.1 = or disjoint i32 %i.012, 2
   %arrayidx.2 = getelementptr inbounds half, ptr %a, i32 %inc.1
   %8 = load half, ptr %arrayidx.2, align 2
   %arrayidx1.2 = getelementptr inbounds i16, ptr %b, i32 %inc.1
@@ -1864,7 +1864,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %mul.2 = fmul half %8, %conv2.2
   %conv3.2 = fpext half %mul.2 to float
   %add.2 = fadd float %add.1, %conv3.2
-  %inc.2 = or i32 %i.012, 3
+  %inc.2 = or disjoint i32 %i.012, 3
   %arrayidx.3 = getelementptr inbounds half, ptr %a, i32 %inc.2
   %10 = load half, ptr %arrayidx.3, align 2
   %arrayidx1.3 = getelementptr inbounds i16, ptr %b, i32 %inc.2

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll
index d41d7d2c1a51d..cfa6eb3aca820 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll
@@ -506,7 +506,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %add = add nuw nsw i32 %mul, %conv3
   %arrayidx4 = getelementptr inbounds i32, i32* %res, i32 %i.011
   store i32 %add, i32* %arrayidx4, align 4
-  %inc = or i32 %i.011, 1
+  %inc = or disjoint i32 %i.011, 1
   %arrayidx.1 = getelementptr inbounds i8, i8* %a, i32 %inc
   %18 = load i8, i8* %arrayidx.1, align 1
   %conv.1 = zext i8 %18 to i32
@@ -517,7 +517,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %add.1 = add nuw nsw i32 %mul.1, %conv3
   %arrayidx4.1 = getelementptr inbounds i32, i32* %res, i32 %inc
   store i32 %add.1, i32* %arrayidx4.1, align 4
-  %inc.1 = or i32 %i.011, 2
+  %inc.1 = or disjoint i32 %i.011, 2
   %arrayidx.2 = getelementptr inbounds i8, i8* %a, i32 %inc.1
   %20 = load i8, i8* %arrayidx.2, align 1
   %conv.2 = zext i8 %20 to i32
@@ -528,7 +528,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %add.2 = add nuw nsw i32 %mul.2, %conv3
   %arrayidx4.2 = getelementptr inbounds i32, i32* %res, i32 %inc.1
   store i32 %add.2, i32* %arrayidx4.2, align 4
-  %inc.2 = or i32 %i.011, 3
+  %inc.2 = or disjoint i32 %i.011, 3
   %arrayidx.3 = getelementptr inbounds i8, i8* %a, i32 %inc.2
   %22 = load i8, i8* %arrayidx.3, align 1
   %conv.3 = zext i8 %22 to i32
@@ -782,7 +782,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %add = add nuw nsw i32 %mul, %conv3
   %arrayidx4 = getelementptr inbounds i32, i32* %res, i32 %i.011
   store i32 %add, i32* %arrayidx4, align 4
-  %inc = or i32 %i.011, 1
+  %inc = or disjoint i32 %i.011, 1
   %arrayidx.1 = getelementptr inbounds i8, i8* %a, i32 %inc
   %18 = load i8, i8* %arrayidx.1, align 1
   %conv.1 = zext i8 %18 to i32
@@ -793,7 +793,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %add.1 = add nuw nsw i32 %mul.1, %conv3
   %arrayidx4.1 = getelementptr inbounds i32, i32* %res, i32 %inc
   store i32 %add.1, i32* %arrayidx4.1, align 4
-  %inc.1 = or i32 %i.011, 2
+  %inc.1 = or disjoint i32 %i.011, 2
   %arrayidx.2 = getelementptr inbounds i8, i8* %a, i32 %inc.1
   %20 = load i8, i8* %arrayidx.2, align 1
   %conv.2 = zext i8 %20 to i32
@@ -804,7 +804,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %add.2 = add nuw nsw i32 %mul.2, %conv3
   %arrayidx4.2 = getelementptr inbounds i32, i32* %res, i32 %inc.1
   store i32 %add.2, i32* %arrayidx4.2, align 4
-  %inc.2 = or i32 %i.011, 3
+  %inc.2 = or disjoint i32 %i.011, 3
   %arrayidx.3 = getelementptr inbounds i8, i8* %a, i32 %inc.2
   %22 = load i8, i8* %arrayidx.3, align 1
   %conv.3 = zext i8 %22 to i32
@@ -1051,7 +1051,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %add = add nsw i32 %mul, %c
   %arrayidx2 = getelementptr inbounds i32, i32* %res, i32 %i.09
   store i32 %add, i32* %arrayidx2, align 4
-  %inc = or i32 %i.09, 1
+  %inc = or disjoint i32 %i.09, 1
   %arrayidx.1 = getelementptr inbounds i32, i32* %a, i32 %inc
   %16 = load i32, i32* %arrayidx.1, align 4
   %arrayidx1.1 = getelementptr inbounds i32, i32* %b, i32 %inc
@@ -1060,7 +1060,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %add.1 = add nsw i32 %mul.1, %c
   %arrayidx2.1 = getelementptr inbounds i32, i32* %res, i32 %inc
   store i32 %add.1, i32* %arrayidx2.1, align 4
-  %inc.1 = or i32 %i.09, 2
+  %inc.1 = or disjoint i32 %i.09, 2
   %arrayidx.2 = getelementptr inbounds i32, i32* %a, i32 %inc.1
   %18 = load i32, i32* %arrayidx.2, align 4
   %arrayidx1.2 = getelementptr inbounds i32, i32* %b, i32 %inc.1
@@ -1069,7 +1069,7 @@ for.body:                                         ; preds = %for.body, %for.body
   %add.2 = add nsw i32 %mul.2, %c
   %arrayidx2.2 = getelementptr inbounds i32, i32* %res, i32 %inc.1
   store i32 %add.2, i32* %arrayidx2.2, align 4
-  %inc.2 = or i32 %i.09, 3
+  %inc.2 = or disjoint i32 %i.09, 3
   %arrayidx.3 = getelementptr inbounds i32, i32* %a, i32 %inc.2
   %20 = load i32, i32* %arrayidx.3, align 4
   %arrayidx1.3 = getelementptr inbounds i32, i32* %b, i32 %inc.2

diff  --git a/llvm/test/CodeGen/Thumb2/mve-blockplacement.ll b/llvm/test/CodeGen/Thumb2/mve-blockplacement.ll
index e22fd4cabfa52..cc7b5a7f3f819 100644
--- a/llvm/test/CodeGen/Thumb2/mve-blockplacement.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-blockplacement.ll
@@ -213,7 +213,7 @@ cond.end22:                                       ; preds = %for.body10, %cond.t
   %tobool24 = icmp ne i32 %cond23, 0
   %frombool = zext i1 %tobool24 to i8
   store i8 %frombool, ptr @var_36, align 1
-  %add = or i32 %i_15.044, 1
+  %add = or disjoint i32 %i_15.044, 1
   %arraydecay.1 = getelementptr inbounds [18 x [22 x i8]], ptr %arr_60, i32 %add, i32 0
   %2 = ptrtoint ptr %arraydecay.1 to i32
   %arrayidx13.1 = getelementptr inbounds [1 x i32], ptr @arr_61, i32 0, i32 %add
@@ -258,7 +258,7 @@ cond.end22.1119:                                  ; preds = %cond.true19.1114, %
   %tobool24.1116 = icmp ne i32 %cond23.1115, 0
   %frombool.1117 = zext i1 %tobool24.1116 to i8
   store i8 %frombool.1117, ptr @var_36, align 1
-  %add.1118 = or i32 %i_15.044.1, 1
+  %add.1118 = or disjoint i32 %i_15.044.1, 1
   %arraydecay.1.1 = getelementptr inbounds [18 x [22 x i8]], ptr %arr_60, i32 %add.1118, i32 0
   %6 = ptrtoint ptr %arraydecay.1.1 to i32
   %arrayidx13.1.1 = getelementptr inbounds [1 x i32], ptr @arr_61, i32 0, i32 %add.1118
@@ -308,7 +308,7 @@ cond.end22.2:                                     ; preds = %cond.true19.2, %for
   %tobool24.2 = icmp ne i32 %cond23.2, 0
   %frombool.2 = zext i1 %tobool24.2 to i8
   store i8 %frombool.2, ptr @var_36, align 1
-  %add.2 = or i32 %i_15.044.2, 1
+  %add.2 = or disjoint i32 %i_15.044.2, 1
   %arraydecay.1.2 = getelementptr inbounds [18 x [22 x i8]], ptr %arr_60, i32 %add.2, i32 0
   %10 = ptrtoint ptr %arraydecay.1.2 to i32
   %arrayidx13.1.2 = getelementptr inbounds [1 x i32], ptr @arr_61, i32 0, i32 %add.2

diff  --git a/llvm/test/CodeGen/Thumb2/mve-tailpred-loopinvariant.ll b/llvm/test/CodeGen/Thumb2/mve-tailpred-loopinvariant.ll
index 750947dc26186..6b2a6e63af23b 100644
--- a/llvm/test/CodeGen/Thumb2/mve-tailpred-loopinvariant.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-tailpred-loopinvariant.ll
@@ -43,35 +43,35 @@ while.body.preheader:                             ; preds = %entry
 vector.body:                                      ; preds = %vector.body, %while.body.preheader
   %index = phi i32 [ 0, %while.body.preheader ], [ %index.next, %vector.body ]
   %next.gep = getelementptr i8, ptr %c, i32 %index
-  %1 = or i32 %index, 1
+  %1 = or disjoint i32 %index, 1
   %next.gep7 = getelementptr i8, ptr %c, i32 %1
-  %2 = or i32 %index, 2
+  %2 = or disjoint i32 %index, 2
   %next.gep8 = getelementptr i8, ptr %c, i32 %2
-  %3 = or i32 %index, 3
+  %3 = or disjoint i32 %index, 3
   %next.gep9 = getelementptr i8, ptr %c, i32 %3
-  %4 = or i32 %index, 4
+  %4 = or disjoint i32 %index, 4
   %next.gep10 = getelementptr i8, ptr %c, i32 %4
-  %5 = or i32 %index, 5
+  %5 = or disjoint i32 %index, 5
   %next.gep11 = getelementptr i8, ptr %c, i32 %5
-  %6 = or i32 %index, 6
+  %6 = or disjoint i32 %index, 6
   %next.gep12 = getelementptr i8, ptr %c, i32 %6
-  %7 = or i32 %index, 7
+  %7 = or disjoint i32 %index, 7
   %next.gep13 = getelementptr i8, ptr %c, i32 %7
-  %8 = or i32 %index, 8
+  %8 = or disjoint i32 %index, 8
   %next.gep14 = getelementptr i8, ptr %c, i32 %8
-  %9 = or i32 %index, 9
+  %9 = or disjoint i32 %index, 9
   %next.gep15 = getelementptr i8, ptr %c, i32 %9
-  %10 = or i32 %index, 10
+  %10 = or disjoint i32 %index, 10
   %next.gep16 = getelementptr i8, ptr %c, i32 %10
-  %11 = or i32 %index, 11
+  %11 = or disjoint i32 %index, 11
   %next.gep17 = getelementptr i8, ptr %c, i32 %11
-  %12 = or i32 %index, 12
+  %12 = or disjoint i32 %index, 12
   %next.gep18 = getelementptr i8, ptr %c, i32 %12
-  %13 = or i32 %index, 13
+  %13 = or disjoint i32 %index, 13
   %next.gep19 = getelementptr i8, ptr %c, i32 %13
-  %14 = or i32 %index, 14
+  %14 = or disjoint i32 %index, 14
   %next.gep20 = getelementptr i8, ptr %c, i32 %14
-  %15 = or i32 %index, 15
+  %15 = or disjoint i32 %index, 15
   %next.gep21 = getelementptr i8, ptr %c, i32 %15
   %16 = insertelement <16 x ptr> poison, ptr %next.gep, i32 0
   %17 = insertelement <16 x ptr> %16, ptr %next.gep7, i32 1

diff  --git a/llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll b/llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll
index aa556b4418f58..853a56d41af88 100644
--- a/llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll
+++ b/llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll
@@ -55,15 +55,15 @@ for.body:                                         ; preds = %for.body, %for.body
   %arrayidx = getelementptr inbounds i32, ptr %vla, i32 %i.010
   %3 = load i32, ptr %arrayidx, align 4
   %add = add nsw i32 %3, %s.09
-  %inc = or i32 %i.010, 1
+  %inc = or disjoint i32 %i.010, 1
   %arrayidx.1 = getelementptr inbounds i32, ptr %vla, i32 %inc
   %4 = load i32, ptr %arrayidx.1, align 4
   %add.1 = add nsw i32 %4, %add
-  %inc.1 = or i32 %i.010, 2
+  %inc.1 = or disjoint i32 %i.010, 2
   %arrayidx.2 = getelementptr inbounds i32, ptr %vla, i32 %inc.1
   %5 = load i32, ptr %arrayidx.2, align 4
   %add.2 = add nsw i32 %5, %add.1
-  %inc.2 = or i32 %i.010, 3
+  %inc.2 = or disjoint i32 %i.010, 3
   %arrayidx.3 = getelementptr inbounds i32, ptr %vla, i32 %inc.2
   %6 = load i32, ptr %arrayidx.3, align 4
   %add.3 = add nsw i32 %6, %add.2

diff  --git a/llvm/test/CodeGen/WebAssembly/unrolled-mem-indices.ll b/llvm/test/CodeGen/WebAssembly/unrolled-mem-indices.ll
index 5178f1ec0c155..a232853688cb1 100644
--- a/llvm/test/CodeGen/WebAssembly/unrolled-mem-indices.ll
+++ b/llvm/test/CodeGen/WebAssembly/unrolled-mem-indices.ll
@@ -70,7 +70,7 @@ bb4:                                              ; preds = %bb4, %bb
   %i11 = add nsw i32 %i10, %i7
   %i12 = getelementptr inbounds i32, ptr %arg2, i32 %i
   store i32 %i11, ptr %i12, align 4
-  %i13 = or i32 %i, 1
+  %i13 = or disjoint i32 %i, 1
   %i14 = getelementptr inbounds i16, ptr %arg, i32 %i13
   %i15 = load i16, ptr %i14, align 2
   %i16 = sext i16 %i15 to i32
@@ -152,7 +152,7 @@ bb4:                                              ; preds = %bb4, %bb
   %i11 = add nsw i32 %i10, %i7
   %i12 = getelementptr inbounds i32, ptr %arg2, i32 %i
   store i32 %i11, ptr %i12, align 4
-  %i13 = or i32 %i, 1
+  %i13 = or disjoint i32 %i, 1
   %i14 = getelementptr i16, ptr %arg, i32 %i13
   %i15 = load i16, ptr %i14, align 2
   %i16 = sext i16 %i15 to i32
@@ -303,7 +303,7 @@ bb14:                                             ; preds = %bb14, %bb3
   %i22 = sext i16 %i21 to i32
   %i23 = add nsw i32 %i22, %i19
   %i24 = add nsw i32 %i23, %i16
-  %i25 = or i32 %i15, 1
+  %i25 = or disjoint i32 %i15, 1
   %i26 = getelementptr inbounds i16, ptr %i5, i32 %i25
   %i27 = load i16, ptr %i26, align 2
   %i28 = sext i16 %i27 to i32
@@ -312,7 +312,7 @@ bb14:                                             ; preds = %bb14, %bb3
   %i31 = sext i16 %i30 to i32
   %i32 = add nsw i32 %i31, %i28
   %i33 = add nsw i32 %i32, %i24
-  %i34 = or i32 %i15, 2
+  %i34 = or disjoint i32 %i15, 2
   %i35 = getelementptr inbounds i16, ptr %i5, i32 %i34
   %i36 = load i16, ptr %i35, align 2
   %i37 = sext i16 %i36 to i32
@@ -321,7 +321,7 @@ bb14:                                             ; preds = %bb14, %bb3
   %i40 = sext i16 %i39 to i32
   %i41 = add nsw i32 %i40, %i37
   %i42 = add nsw i32 %i41, %i33
-  %i43 = or i32 %i15, 3
+  %i43 = or disjoint i32 %i15, 3
   %i44 = getelementptr inbounds i16, ptr %i5, i32 %i43
   %i45 = load i16, ptr %i44, align 2
   %i46 = sext i16 %i45 to i32
@@ -478,7 +478,7 @@ bb20:                                             ; preds = %bb20, %bb7
   %i27 = fadd float %i24, %i26
   %i28 = getelementptr inbounds float, ptr %arg2, i32 %i21
   store float %i27, ptr %i28, align 4
-  %i29 = or i32 %i21, 1
+  %i29 = or disjoint i32 %i21, 1
   %i30 = getelementptr inbounds float, ptr %arg, i32 %i29
   %i31 = load float, ptr %i30, align 4
   %i32 = getelementptr inbounds float, ptr %arg1, i32 %i29

diff  --git a/llvm/test/CodeGen/X86/2008-08-06-CmpStride.ll b/llvm/test/CodeGen/X86/2008-08-06-CmpStride.ll
index dfe1662492a32..ca92c555058ab 100644
--- a/llvm/test/CodeGen/X86/2008-08-06-CmpStride.ll
+++ b/llvm/test/CodeGen/X86/2008-08-06-CmpStride.ll
@@ -31,7 +31,7 @@ forbody:
         %i.0 = phi i32 [ 0, %entry ], [ %inc, %forbody ]                ; <i32>[#uses=3]
         %sub14 = sub i32 1027, %i.0             ; <i32> [#uses=1]
         %mul15 = mul i32 %sub14, 10             ; <i32> [#uses=1]
-        %add166 = or i32 %mul15, 1              ; <i32> ptr
+        %add166 = or disjoint i32 %mul15, 1              ; <i32> ptr
         call i32 (ptr, ...) @printf( ptr noalias  @.str, i32 %add166 ) nounwind
         %inc = add i32 %i.0, 1          ; <i32> [#uses=3]
         %cmp = icmp ne i32 %inc, 1027          ; <i1> [#uses=1]

diff  --git a/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll b/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll
index ecf38980573d6..78a0a849f13ea 100644
--- a/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll
+++ b/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll
@@ -979,7 +979,7 @@ define void @MergeLoadStoreBaseIndexOffsetComplicated(i8* %a, i8* %b, i8* %c, i6
   %8 = load i8, i8* %7, align 1
   %9 = getelementptr inbounds i8, i8* %a, i64 %.09
   store i8 %5, i8* %9, align 1
-  %10 = or i64 %.09, 1
+  %10 = or disjoint i64 %.09, 1
   %11 = getelementptr inbounds i8, i8* %a, i64 %10
   store i8 %8, i8* %11, align 1
   %12 = getelementptr inbounds i8, i8* %.08, i64 1

diff  --git a/llvm/test/CodeGen/X86/avx512vnni-combine.ll b/llvm/test/CodeGen/X86/avx512vnni-combine.ll
index f0c8a7e208326..c491a952682d5 100644
--- a/llvm/test/CodeGen/X86/avx512vnni-combine.ll
+++ b/llvm/test/CodeGen/X86/avx512vnni-combine.ll
@@ -135,17 +135,17 @@ define <8 x i64> @foo_512(i32 %0, <8 x i64> %1, <8 x i64> %2, ptr %3) {
   %40 = load <32 x i16>, ptr %39, align 64
   %41 = tail call <16 x i32> @llvm.x86.avx512.pmaddw.d.512(<32 x i16> %7, <32 x i16> %40)
   %42 = add <16 x i32> %41, %37
-  %43 = or i64 %36, 1
+  %43 = or disjoint i64 %36, 1
   %44 = getelementptr inbounds <8 x i64>, ptr %3, i64 %43
   %45 = load <32 x i16>, ptr %44, align 64
   %46 = tail call <16 x i32> @llvm.x86.avx512.pmaddw.d.512(<32 x i16> %7, <32 x i16> %45)
   %47 = add <16 x i32> %46, %42
-  %48 = or i64 %36, 2
+  %48 = or disjoint i64 %36, 2
   %49 = getelementptr inbounds <8 x i64>, ptr %3, i64 %48
   %50 = load <32 x i16>, ptr %49, align 64
   %51 = tail call <16 x i32> @llvm.x86.avx512.pmaddw.d.512(<32 x i16> %7, <32 x i16> %50)
   %52 = add <16 x i32> %51, %47
-  %53 = or i64 %36, 3
+  %53 = or disjoint i64 %36, 3
   %54 = getelementptr inbounds <8 x i64>, ptr %3, i64 %53
   %55 = load <32 x i16>, ptr %54, align 64
   %56 = tail call <16 x i32> @llvm.x86.avx512.pmaddw.d.512(<32 x i16> %7, <32 x i16> %55)
@@ -243,7 +243,7 @@ define void @bar_512(i32 %0, ptr %1, <8 x i64> %2, ptr %3) {
   %29 = load <16 x i32>, ptr %28, align 64
   %30 = tail call <16 x i32> @llvm.x86.avx512.vpdpwssd.512(<16 x i32> %29, <16 x i32> %7, <16 x i32> %27)
   store <16 x i32> %30, ptr %28, align 64
-  %31 = or i64 %24, 1
+  %31 = or disjoint i64 %24, 1
   %32 = getelementptr inbounds <8 x i64>, ptr %3, i64 %31
   %33 = load <16 x i32>, ptr %32, align 64
   %34 = getelementptr inbounds <8 x i64>, ptr %1, i64 %31

diff  --git a/llvm/test/CodeGen/X86/avxvnni-combine.ll b/llvm/test/CodeGen/X86/avxvnni-combine.ll
index 75e29df9f34ac..158a1fdf44fc6 100644
--- a/llvm/test/CodeGen/X86/avxvnni-combine.ll
+++ b/llvm/test/CodeGen/X86/avxvnni-combine.ll
@@ -187,17 +187,17 @@ define <2 x i64> @foo_128(i32 %0, <2 x i64> %1, <2 x i64> %2, ptr %3) {
   %40 = load <8 x i16>, ptr %39, align 16
   %41 = tail call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %7, <8 x i16> %40)
   %42 = add <4 x i32> %41, %37
-  %43 = or i64 %36, 1
+  %43 = or disjoint i64 %36, 1
   %44 = getelementptr inbounds <2 x i64>, ptr %3, i64 %43
   %45 = load <8 x i16>, ptr %44, align 16
   %46 = tail call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %7, <8 x i16> %45)
   %47 = add <4 x i32> %46, %42
-  %48 = or i64 %36, 2
+  %48 = or disjoint i64 %36, 2
   %49 = getelementptr inbounds <2 x i64>, ptr %3, i64 %48
   %50 = load <8 x i16>, ptr %49, align 16
   %51 = tail call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %7, <8 x i16> %50)
   %52 = add <4 x i32> %51, %47
-  %53 = or i64 %36, 3
+  %53 = or disjoint i64 %36, 3
   %54 = getelementptr inbounds <2 x i64>, ptr %3, i64 %53
   %55 = load <8 x i16>, ptr %54, align 16
   %56 = tail call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %7, <8 x i16> %55)
@@ -328,7 +328,7 @@ define void @bar_128(i32 %0, ptr %1, <2 x i64> %2, ptr %3) {
   %29 = load <4 x i32>, ptr %28, align 16
   %30 = tail call <4 x i32> @llvm.x86.avx512.vpdpwssd.128(<4 x i32> %29, <4 x i32> %7, <4 x i32> %27)
   store <4 x i32> %30, ptr %28, align 16
-  %31 = or i64 %24, 1
+  %31 = or disjoint i64 %24, 1
   %32 = getelementptr inbounds <2 x i64>, ptr %3, i64 %31
   %33 = load <4 x i32>, ptr %32, align 16
   %34 = getelementptr inbounds <2 x i64>, ptr %1, i64 %31
@@ -534,17 +534,17 @@ define <4 x i64> @foo_256(i32 %0, <4 x i64> %1, <4 x i64> %2, ptr %3) {
   %40 = load <16 x i16>, ptr %39, align 32
   %41 = tail call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %7, <16 x i16> %40)
   %42 = add <8 x i32> %41, %37
-  %43 = or i64 %36, 1
+  %43 = or disjoint i64 %36, 1
   %44 = getelementptr inbounds <4 x i64>, ptr %3, i64 %43
   %45 = load <16 x i16>, ptr %44, align 32
   %46 = tail call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %7, <16 x i16> %45)
   %47 = add <8 x i32> %46, %42
-  %48 = or i64 %36, 2
+  %48 = or disjoint i64 %36, 2
   %49 = getelementptr inbounds <4 x i64>, ptr %3, i64 %48
   %50 = load <16 x i16>, ptr %49, align 32
   %51 = tail call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %7, <16 x i16> %50)
   %52 = add <8 x i32> %51, %47
-  %53 = or i64 %36, 3
+  %53 = or disjoint i64 %36, 3
   %54 = getelementptr inbounds <4 x i64>, ptr %3, i64 %53
   %55 = load <16 x i16>, ptr %54, align 32
   %56 = tail call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %7, <16 x i16> %55)
@@ -684,7 +684,7 @@ define void @bar_256(i32 %0, ptr %1, <4 x i64> %2, ptr %3) {
   %29 = load <8 x i32>, ptr %28, align 32
   %30 = tail call <8 x i32> @llvm.x86.avx512.vpdpwssd.256(<8 x i32> %29, <8 x i32> %7, <8 x i32> %27)
   store <8 x i32> %30, ptr %28, align 32
-  %31 = or i64 %24, 1
+  %31 = or disjoint i64 %24, 1
   %32 = getelementptr inbounds <4 x i64>, ptr %3, i64 %31
   %33 = load <8 x i32>, ptr %32, align 32
   %34 = getelementptr inbounds <4 x i64>, ptr %1, i64 %31

diff  --git a/llvm/test/CodeGen/X86/loop-strength-reduce4.ll b/llvm/test/CodeGen/X86/loop-strength-reduce4.ll
index 3e4d620b6dc87..4bb1150bf702a 100644
--- a/llvm/test/CodeGen/X86/loop-strength-reduce4.ll
+++ b/llvm/test/CodeGen/X86/loop-strength-reduce4.ll
@@ -35,21 +35,21 @@ bb:		; preds = %bb, %entry
 	%tmp7 = load i32, ptr %tmp6, align 4		; <i32> [#uses=1]
 	%tmp8 = xor i32 %tmp7, %tmp4		; <i32> [#uses=2]
 	store i32 %tmp8, ptr %tmp3, align 4
-	%tmp1378 = or i32 %j.065.0, 1		; <i32> [#uses=1]
+	%tmp1378 = or disjoint i32 %j.065.0, 1		; <i32> [#uses=1]
 	%tmp16 = getelementptr [0 x i32], ptr @state, i32 0, i32 %tmp1378		; <ptr> [#uses=2]
 	%tmp17 = load i32, ptr %tmp16, align 4		; <i32> [#uses=1]
 	%tmp19 = getelementptr [0 x i32], ptr @S, i32 0, i32 %tmp8		; <ptr> [#uses=1]
 	%tmp20 = load i32, ptr %tmp19, align 4		; <i32> [#uses=1]
 	%tmp21 = xor i32 %tmp20, %tmp17		; <i32> [#uses=2]
 	store i32 %tmp21, ptr %tmp16, align 4
-	%tmp2680 = or i32 %j.065.0, 2		; <i32> [#uses=1]
+	%tmp2680 = or disjoint i32 %j.065.0, 2		; <i32> [#uses=1]
 	%tmp29 = getelementptr [0 x i32], ptr @state, i32 0, i32 %tmp2680		; <ptr> [#uses=2]
 	%tmp30 = load i32, ptr %tmp29, align 4		; <i32> [#uses=1]
 	%tmp32 = getelementptr [0 x i32], ptr @S, i32 0, i32 %tmp21		; <ptr> [#uses=1]
 	%tmp33 = load i32, ptr %tmp32, align 4		; <i32> [#uses=1]
 	%tmp34 = xor i32 %tmp33, %tmp30		; <i32> [#uses=2]
 	store i32 %tmp34, ptr %tmp29, align 4
-	%tmp3982 = or i32 %j.065.0, 3		; <i32> [#uses=1]
+	%tmp3982 = or disjoint i32 %j.065.0, 3		; <i32> [#uses=1]
 	%tmp42 = getelementptr [0 x i32], ptr @state, i32 0, i32 %tmp3982		; <ptr> [#uses=2]
 	%tmp43 = load i32, ptr %tmp42, align 4		; <i32> [#uses=1]
 	%tmp45 = getelementptr [0 x i32], ptr @S, i32 0, i32 %tmp34		; <ptr> [#uses=1]

diff  --git a/llvm/test/CodeGen/X86/lsr-addrecloops.ll b/llvm/test/CodeGen/X86/lsr-addrecloops.ll
index b0f97e3fc2a33..74a8d68a850f8 100644
--- a/llvm/test/CodeGen/X86/lsr-addrecloops.ll
+++ b/llvm/test/CodeGen/X86/lsr-addrecloops.ll
@@ -68,25 +68,25 @@ vector.body807:                                   ; preds = %vector.body807, %ve
   %niter = phi i64 [ 0, %vector.body807.preheader.new ], [ %niter.next.7, %vector.body807 ]
   %uglygep1197 = getelementptr i8, ptr %lsr.iv1135, i64 %lsr.iv1194
   store <8 x float> zeroinitializer, ptr %uglygep1197, align 4
-  %lsr.iv.next1195 = or i64 %lsr.iv1194, 1
+  %lsr.iv.next1195 = or disjoint i64 %lsr.iv1194, 1
   %uglygep1197.1 = getelementptr i8, ptr %lsr.iv1135, i64 %lsr.iv.next1195
   store <8 x float> zeroinitializer, ptr %uglygep1197.1, align 4
-  %lsr.iv.next1195.1 = or i64 %lsr.iv1194, 2
+  %lsr.iv.next1195.1 = or disjoint i64 %lsr.iv1194, 2
   %uglygep1197.2 = getelementptr i8, ptr %lsr.iv1135, i64 %lsr.iv.next1195.1
   store <8 x float> zeroinitializer, ptr %uglygep1197.2, align 4
-  %lsr.iv.next1195.2 = or i64 %lsr.iv1194, 3
+  %lsr.iv.next1195.2 = or disjoint i64 %lsr.iv1194, 3
   %uglygep1197.3 = getelementptr i8, ptr %lsr.iv1135, i64 %lsr.iv.next1195.2
   store <8 x float> zeroinitializer, ptr %uglygep1197.3, align 4
-  %lsr.iv.next1195.3 = or i64 %lsr.iv1194, 4
+  %lsr.iv.next1195.3 = or disjoint i64 %lsr.iv1194, 4
   %uglygep1197.4 = getelementptr i8, ptr %lsr.iv1135, i64 %lsr.iv.next1195.3
   store <8 x float> zeroinitializer, ptr %uglygep1197.4, align 4
-  %lsr.iv.next1195.4 = or i64 %lsr.iv1194, 5
+  %lsr.iv.next1195.4 = or disjoint i64 %lsr.iv1194, 5
   %uglygep1197.5 = getelementptr i8, ptr %lsr.iv1135, i64 %lsr.iv.next1195.4
   store <8 x float> zeroinitializer, ptr %uglygep1197.5, align 4
-  %lsr.iv.next1195.5 = or i64 %lsr.iv1194, 6
+  %lsr.iv.next1195.5 = or disjoint i64 %lsr.iv1194, 6
   %uglygep1197.6 = getelementptr i8, ptr %lsr.iv1135, i64 %lsr.iv.next1195.5
   store <8 x float> zeroinitializer, ptr %uglygep1197.6, align 4
-  %lsr.iv.next1195.6 = or i64 %lsr.iv1194, 7
+  %lsr.iv.next1195.6 = or disjoint i64 %lsr.iv1194, 7
   %uglygep1197.7 = getelementptr i8, ptr %lsr.iv1135, i64 %lsr.iv.next1195.6
   store <8 x float> zeroinitializer, ptr %uglygep1197.7, align 4
   %lsr.iv.next1195.7 = add i64 %lsr.iv1194, 8
@@ -136,25 +136,25 @@ vector.body847:                                   ; preds = %vector.body847, %ve
   %niter16 = phi i64 [ 0, %vector.body847.preheader.new ], [ %niter16.next.7, %vector.body847 ]
   %uglygep1156 = getelementptr i8, ptr %uglygep11551, i64 %lsr.iv1152
   store <8 x float> zeroinitializer, ptr %uglygep1156, align 4
-  %lsr.iv.next1153 = or i64 %lsr.iv1152, 1
+  %lsr.iv.next1153 = or disjoint i64 %lsr.iv1152, 1
   %uglygep1156.1 = getelementptr i8, ptr %uglygep11551, i64 %lsr.iv.next1153
   store <8 x float> zeroinitializer, ptr %uglygep1156.1, align 4
-  %lsr.iv.next1153.1 = or i64 %lsr.iv1152, 2
+  %lsr.iv.next1153.1 = or disjoint i64 %lsr.iv1152, 2
   %uglygep1156.2 = getelementptr i8, ptr %uglygep11551, i64 %lsr.iv.next1153.1
   store <8 x float> zeroinitializer, ptr %uglygep1156.2, align 4
-  %lsr.iv.next1153.2 = or i64 %lsr.iv1152, 3
+  %lsr.iv.next1153.2 = or disjoint i64 %lsr.iv1152, 3
   %uglygep1156.3 = getelementptr i8, ptr %uglygep11551, i64 %lsr.iv.next1153.2
   store <8 x float> zeroinitializer, ptr %uglygep1156.3, align 4
-  %lsr.iv.next1153.3 = or i64 %lsr.iv1152, 4
+  %lsr.iv.next1153.3 = or disjoint i64 %lsr.iv1152, 4
   %uglygep1156.4 = getelementptr i8, ptr %uglygep11551, i64 %lsr.iv.next1153.3
   store <8 x float> zeroinitializer, ptr %uglygep1156.4, align 4
-  %lsr.iv.next1153.4 = or i64 %lsr.iv1152, 5
+  %lsr.iv.next1153.4 = or disjoint i64 %lsr.iv1152, 5
   %uglygep1156.5 = getelementptr i8, ptr %uglygep11551, i64 %lsr.iv.next1153.4
   store <8 x float> zeroinitializer, ptr %uglygep1156.5, align 4
-  %lsr.iv.next1153.5 = or i64 %lsr.iv1152, 6
+  %lsr.iv.next1153.5 = or disjoint i64 %lsr.iv1152, 6
   %uglygep1156.6 = getelementptr i8, ptr %uglygep11551, i64 %lsr.iv.next1153.5
   store <8 x float> zeroinitializer, ptr %uglygep1156.6, align 4
-  %lsr.iv.next1153.6 = or i64 %lsr.iv1152, 7
+  %lsr.iv.next1153.6 = or disjoint i64 %lsr.iv1152, 7
   %uglygep1156.7 = getelementptr i8, ptr %uglygep11551, i64 %lsr.iv.next1153.6
   store <8 x float> zeroinitializer, ptr %uglygep1156.7, align 4
   %lsr.iv.next1153.7 = add i64 %lsr.iv1152, 8

diff  --git a/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll b/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
index a2e4e4784d361..85449b01265bf 100644
--- a/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
+++ b/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
@@ -195,7 +195,7 @@ bb:		; preds = %bb1, %entry
 	%9 = zext i32 %8 to i64		; <i64> [#uses=1]
 	%10 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %9		; <i32*> [#uses=1]
 	%11 = load i32, i32* %10, align 4		; <i32> [#uses=1]
-	%ctg2.sum2728 = or i64 %tmp18, 8		; <i64> [#uses=1]
+	%ctg2.sum2728 = or disjoint i64 %tmp18, 8		; <i64> [#uses=1]
 	%12 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2728		; <i8*> [#uses=1]
 	%13 = bitcast i8* %12 to i32*		; <i32*> [#uses=1]
 	%14 = load i32, i32* %13, align 4		; <i32> [#uses=1]
@@ -209,7 +209,7 @@ bb:		; preds = %bb1, %entry
 	%22 = zext i32 %21 to i64		; <i64> [#uses=1]
 	%23 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %22		; <i32*> [#uses=1]
 	%24 = load i32, i32* %23, align 4		; <i32> [#uses=1]
-	%ctg2.sum2930 = or i64 %tmp18, 12		; <i64> [#uses=1]
+	%ctg2.sum2930 = or disjoint i64 %tmp18, 12		; <i64> [#uses=1]
 	%25 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2930		; <i8*> [#uses=1]
 	%26 = bitcast i8* %25 to i32*		; <i32*> [#uses=1]
 	%27 = load i32, i32* %26, align 4		; <i32> [#uses=1]

diff  --git a/llvm/test/CodeGen/X86/merge_store.ll b/llvm/test/CodeGen/X86/merge_store.ll
index 0e57b43d2f844..1cf3b9a83bac1 100644
--- a/llvm/test/CodeGen/X86/merge_store.ll
+++ b/llvm/test/CodeGen/X86/merge_store.ll
@@ -23,13 +23,13 @@ entry:
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
   store i32 1, ptr %arrayidx, align 4
-  %0 = or i64 %indvars.iv, 1
+  %0 = or disjoint i64 %indvars.iv, 1
   %arrayidx2 = getelementptr inbounds i32, ptr %a, i64 %0
   store i32 1, ptr %arrayidx2, align 4
-  %1 = or i64 %indvars.iv, 2
+  %1 = or disjoint i64 %indvars.iv, 2
   %arrayidx5 = getelementptr inbounds i32, ptr %a, i64 %1
   store i32 1, ptr %arrayidx5, align 4
-  %2 = or i64 %indvars.iv, 3
+  %2 = or disjoint i64 %indvars.iv, 3
   %arrayidx8 = getelementptr inbounds i32, ptr %a, i64 %2
   store i32 1, ptr %arrayidx8, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 4

diff  --git a/llvm/test/CodeGen/X86/optimize-max-0.ll b/llvm/test/CodeGen/X86/optimize-max-0.ll
index 4b398095b549d..1bd427c4a4b0c 100644
--- a/llvm/test/CodeGen/X86/optimize-max-0.ll
+++ b/llvm/test/CodeGen/X86/optimize-max-0.ll
@@ -324,7 +324,7 @@ bb14:                                             ; preds = %bb15, %bb.nph3
   %29 = getelementptr i8, ptr %j, i32 %.sum
   store i8 %28, ptr %29, align 1
   %30 = shl i32 %x.12, 2
-  %31 = or i32 %30, 2
+  %31 = or disjoint i32 %30, 2
   %32 = add i32 %31, %21
   %33 = getelementptr i8, ptr %r, i32 %32
   %34 = load i8, ptr %33, align 1
@@ -761,7 +761,7 @@ bb14:                                             ; preds = %bb15, %bb.nph3
   %29 = getelementptr i8, ptr %j, i32 %.sum
   store i8 %28, ptr %29, align 1
   %30 = shl i32 %x.12, 2
-  %31 = or i32 %30, 2
+  %31 = or disjoint i32 %30, 2
   %32 = add i32 %31, %21
   %33 = getelementptr i8, ptr %r, i32 %32
   %34 = load i8, ptr %33, align 1

diff  --git a/llvm/test/CodeGen/X86/unused_stackslots.ll b/llvm/test/CodeGen/X86/unused_stackslots.ll
index 5aeaca7e84c3b..d909dd478cae8 100644
--- a/llvm/test/CodeGen/X86/unused_stackslots.ll
+++ b/llvm/test/CodeGen/X86/unused_stackslots.ll
@@ -42,9 +42,9 @@ for.body:                                         ; preds = %for.inc73, %entry
   %shl1 = shl nsw i32 %rem, 3
   %tmp9 = sext i32 %shl1 to i64
   %tmp10 = sext i32 %shl to i64
-  %tmp11 = or i32 %shl1, 4
+  %tmp11 = or disjoint i32 %shl1, 4
   %tmp12 = sext i32 %tmp11 to i64
-  %tmp13 = or i32 %shl, 4
+  %tmp13 = or disjoint i32 %shl, 4
   %tmp14 = sext i32 %tmp13 to i64
   br label %for.body4
 
@@ -59,14 +59,14 @@ for.body4:                                        ; preds = %for.inc48, %for.bod
   %tmp18 = sub nuw nsw i64 %indvars.iv148, %tmp10
   %tmp19 = sext i32 %add5 to i64
   %tmp20 = add nsw i64 %tmp19, 1
-  %tmp21 = or i64 %indvars.iv148, 1
-  %tmp22 = or i64 %tmp18, 1
+  %tmp21 = or disjoint i64 %indvars.iv148, 1
+  %tmp22 = or disjoint i64 %tmp18, 1
   %tmp23 = add nsw i64 %tmp19, 2
-  %tmp24 = or i64 %indvars.iv148, 2
-  %tmp25 = or i64 %tmp18, 2
+  %tmp24 = or disjoint i64 %indvars.iv148, 2
+  %tmp25 = or disjoint i64 %tmp18, 2
   %tmp26 = add nsw i64 %tmp19, 3
-  %tmp27 = or i64 %indvars.iv148, 3
-  %tmp28 = or i64 %tmp18, 3
+  %tmp27 = or disjoint i64 %indvars.iv148, 3
+  %tmp28 = or disjoint i64 %tmp18, 3
   br label %for.body9
 
 for.body9:                                        ; preds = %for.inc45.for.body9_crit_edge, %for.body4

diff  --git a/llvm/test/Transforms/IRCE/stride_more_than_1.ll b/llvm/test/Transforms/IRCE/stride_more_than_1.ll
index 92cd410b12f66..749c17d118205 100644
--- a/llvm/test/Transforms/IRCE/stride_more_than_1.ll
+++ b/llvm/test/Transforms/IRCE/stride_more_than_1.ll
@@ -917,7 +917,7 @@ define i32 @binop_or_is_iv_base(ptr %p, i32 %end) {
 ; CHECK-NEXT:    br i1 true, label [[GUARDED]], label [[DEOPT_LOOPEXIT2:%.*]]
 ; CHECK:       guarded:
 ; CHECK-NEXT:    [[IV_ADD]] = add i32 [[IV]], 8
-; CHECK-NEXT:    [[IV_OR:%.*]] = or i32 [[IV_ADD]], 7
+; CHECK-NEXT:    [[IV_OR:%.*]] = or disjoint i32 [[IV_ADD]], 7
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[IV_OR]], [[END]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp slt i32 [[IV_OR]], [[EXIT_MAINLOOP_AT]]
 ; CHECK-NEXT:    br i1 [[TMP2]], label [[LOOP_HEADER]], label [[MAIN_EXIT_SELECTOR:%.*]]
@@ -951,7 +951,7 @@ define i32 @binop_or_is_iv_base(ptr %p, i32 %end) {
 ; CHECK-NEXT:    br i1 [[CHECK_POSTLOOP]], label [[GUARDED_POSTLOOP]], label [[DEOPT_LOOPEXIT:%.*]]
 ; CHECK:       guarded.postloop:
 ; CHECK-NEXT:    [[IV_ADD_POSTLOOP]] = add i32 [[IV_POSTLOOP]], 8
-; CHECK-NEXT:    [[IV_OR_POSTLOOP:%.*]] = or i32 [[IV_ADD_POSTLOOP]], 7
+; CHECK-NEXT:    [[IV_OR_POSTLOOP:%.*]] = or disjoint i32 [[IV_ADD_POSTLOOP]], 7
 ; CHECK-NEXT:    [[CMP_POSTLOOP:%.*]] = icmp slt i32 [[IV_OR_POSTLOOP]], [[END]]
 ; CHECK-NEXT:    br i1 [[CMP_POSTLOOP]], label [[LOOP_HEADER_POSTLOOP]], label [[COMMON_RET_LOOPEXIT_LOOPEXIT:%.*]], !llvm.loop [[LOOP21:![0-9]+]], !loop_constrainer.loop.clone !6
 ;
@@ -967,7 +967,7 @@ loop.header:
 
 guarded:
   %iv.add = add i32 %iv, 8
-  %iv.or = or i32 %iv.add, 7
+  %iv.or = or disjoint i32 %iv.add, 7
   %cmp = icmp slt i32 %iv.or, %end
   br i1 %cmp, label %loop.header, label %common.ret
 

diff  --git a/llvm/test/Transforms/IndVarSimplify/ashr-tripcount.ll b/llvm/test/Transforms/IndVarSimplify/ashr-tripcount.ll
index 4dedf7c6b6331..54421f1a5baed 100644
--- a/llvm/test/Transforms/IndVarSimplify/ashr-tripcount.ll
+++ b/llvm/test/Transforms/IndVarSimplify/ashr-tripcount.ll
@@ -53,21 +53,21 @@ bb3:		; preds = %bb4, %bb.nph
 	%t9 = zext i32 %t8 to i64		; <i64> [#uses=1]
 	%t10 = getelementptr float, ptr %pow4_3_tab_ptr, i64 %t9		; <ptr> [#uses=1]
 	%t11 = load float, ptr %t10, align 4		; <float> [#uses=1]
-	%t12 = or i32 %i.05, 1		; <i32> [#uses=1]
+	%t12 = or disjoint i32 %i.05, 1		; <i32> [#uses=1]
 	%t13 = sext i32 %t12 to i64		; <i64> [#uses=1]
 	%t14 = getelementptr i32, ptr %quaSpectrum, i64 %t13		; <ptr> [#uses=1]
 	%t15 = load i32, ptr %t14, align 4		; <i32> [#uses=1]
 	%t16 = zext i32 %t15 to i64		; <i64> [#uses=1]
 	%t17 = getelementptr float, ptr %pow4_3_tab_ptr, i64 %t16		; <ptr> [#uses=1]
 	%t18 = load float, ptr %t17, align 4		; <float> [#uses=1]
-	%t19 = or i32 %i.05, 2		; <i32> [#uses=1]
+	%t19 = or disjoint i32 %i.05, 2		; <i32> [#uses=1]
 	%t20 = sext i32 %t19 to i64		; <i64> [#uses=1]
 	%t21 = getelementptr i32, ptr %quaSpectrum, i64 %t20		; <ptr> [#uses=1]
 	%t22 = load i32, ptr %t21, align 4		; <i32> [#uses=1]
 	%t23 = zext i32 %t22 to i64		; <i64> [#uses=1]
 	%t24 = getelementptr float, ptr %pow4_3_tab_ptr, i64 %t23		; <ptr> [#uses=1]
 	%t25 = load float, ptr %t24, align 4		; <float> [#uses=1]
-	%t26 = or i32 %i.05, 3		; <i32> [#uses=1]
+	%t26 = or disjoint i32 %i.05, 3		; <i32> [#uses=1]
 	%t27 = sext i32 %t26 to i64		; <i64> [#uses=1]
 	%t28 = getelementptr i32, ptr %quaSpectrum, i64 %t27		; <ptr> [#uses=1]
 	%t29 = load i32, ptr %t28, align 4		; <i32> [#uses=1]
@@ -78,17 +78,17 @@ bb3:		; preds = %bb4, %bb.nph
 	%t34 = sext i32 %i.05 to i64		; <i64> [#uses=1]
 	%t35 = getelementptr float, ptr %iquaSpectrum, i64 %t34		; <ptr> [#uses=1]
 	store float %t33, ptr %t35, align 4
-	%t36 = or i32 %i.05, 1		; <i32> [#uses=1]
+	%t36 = or disjoint i32 %i.05, 1		; <i32> [#uses=1]
 	%t37 = fmul float %t18, %invQuantizer.0		; <float> [#uses=1]
 	%t38 = sext i32 %t36 to i64		; <i64> [#uses=1]
 	%t39 = getelementptr float, ptr %iquaSpectrum, i64 %t38		; <ptr> [#uses=1]
 	store float %t37, ptr %t39, align 4
-	%t40 = or i32 %i.05, 2		; <i32> [#uses=1]
+	%t40 = or disjoint i32 %i.05, 2		; <i32> [#uses=1]
 	%t41 = fmul float %t25, %invQuantizer.0		; <float> [#uses=1]
 	%t42 = sext i32 %t40 to i64		; <i64> [#uses=1]
 	%t43 = getelementptr float, ptr %iquaSpectrum, i64 %t42		; <ptr> [#uses=1]
 	store float %t41, ptr %t43, align 4
-	%t44 = or i32 %i.05, 3		; <i32> [#uses=1]
+	%t44 = or disjoint i32 %i.05, 3		; <i32> [#uses=1]
 	%t45 = fmul float %t32, %invQuantizer.0		; <float> [#uses=1]
 	%t46 = sext i32 %t44 to i64		; <i64> [#uses=1]
 	%t47 = getelementptr float, ptr %iquaSpectrum, i64 %t46		; <ptr> [#uses=1]

diff  --git a/llvm/test/Transforms/IndVarSimplify/lcssa-preservation.ll b/llvm/test/Transforms/IndVarSimplify/lcssa-preservation.ll
index a372fa900ddbf..f00a111aa6a6d 100644
--- a/llvm/test/Transforms/IndVarSimplify/lcssa-preservation.ll
+++ b/llvm/test/Transforms/IndVarSimplify/lcssa-preservation.ll
@@ -109,9 +109,9 @@ define void @pr57000(i64 %a) {
 ; CHECK-NEXT:    br label [[LOOP_2_HEADER:%.*]]
 ; CHECK:       loop.2.header:
 ; CHECK-NEXT:    switch i8 [[CMP_EXT_LCSSA]], label [[LOOP_1_LOOPEXIT:%.*]] [
-; CHECK-NEXT:    i8 -1, label [[LOOP_2_LATCH:%.*]]
-; CHECK-NEXT:    i8 1, label [[LOOP_2_LATCH]]
-; CHECK-NEXT:    i8 4, label [[LOOP_2_HEADER]]
+; CHECK-NEXT:      i8 -1, label [[LOOP_2_LATCH:%.*]]
+; CHECK-NEXT:      i8 1, label [[LOOP_2_LATCH]]
+; CHECK-NEXT:      i8 4, label [[LOOP_2_HEADER]]
 ; CHECK-NEXT:    ]
 ; CHECK:       loop.2.latch:
 ; CHECK-NEXT:    [[CMP_TRUNC_LCSSA1:%.*]] = phi i1 [ [[CMP_LCSSA2]], [[LOOP_2_HEADER]] ], [ [[CMP_LCSSA2]], [[LOOP_2_HEADER]] ]
@@ -147,7 +147,7 @@ define void @D149435(i16 %arg) {
 ; CHECK:       loop1:
 ; CHECK-NEXT:    [[FR:%.*]] = freeze i16 [[ARG:%.*]]
 ; CHECK-NEXT:    [[ARRAYIDX_IDX:%.*]] = shl i16 [[FR]], 1
-; CHECK-NEXT:    [[OR:%.*]] = or i16 [[ARRAYIDX_IDX]], 1
+; CHECK-NEXT:    [[OR:%.*]] = or disjoint i16 [[ARRAYIDX_IDX]], 1
 ; CHECK-NEXT:    br i1 false, label [[LOOP1]], label [[LOOP2_PREHEADER:%.*]]
 ; CHECK:       loop2.preheader:
 ; CHECK-NEXT:    [[FR_LCSSA:%.*]] = phi i16 [ [[FR]], [[LOOP1]] ]
@@ -175,7 +175,7 @@ define void @D149435(i16 %arg) {
 loop1:
   %fr = freeze i16 %arg
   %arrayidx.idx = shl i16 %fr, 1
-  %or = or i16 %arrayidx.idx, 1
+  %or = or disjoint i16 %arrayidx.idx, 1
   br i1 false, label %loop1, label %loop2.preheader
 
 loop2.preheader:

diff  --git a/llvm/test/Transforms/IndVarSimplify/no-iv-rewrite.ll b/llvm/test/Transforms/IndVarSimplify/no-iv-rewrite.ll
index 355c927b0accf..c35c5bacf68ca 100644
--- a/llvm/test/Transforms/IndVarSimplify/no-iv-rewrite.ll
+++ b/llvm/test/Transforms/IndVarSimplify/no-iv-rewrite.ll
@@ -299,7 +299,7 @@ define i64 @cloneOr(i32 %limit, ptr %base) nounwind {
 ; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[ENTRY:%.*]] ]
 ; CHECK-NEXT:    [[ADR:%.*]] = getelementptr i64, ptr [[BASE:%.*]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[VAL:%.*]] = load i64, ptr [[ADR]], align 8
-; CHECK-NEXT:    [[TMP1:%.*]] = or i64 [[INDVARS_IV]], 1
+; CHECK-NEXT:    [[TMP1:%.*]] = or disjoint i64 [[INDVARS_IV]], 1
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 2
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i64 [[INDVARS_IV_NEXT]], [[TMP0]]
 ; CHECK-NEXT:    br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]]
@@ -319,7 +319,7 @@ loop:
   %t1 = sext i32 %iv to i64
   %adr = getelementptr i64, ptr %base, i64 %t1
   %val = load i64, ptr %adr
-  %t2 = or i32 %iv, 1
+  %t2 = or disjoint i32 %iv, 1
   %t3 = sext i32 %t2 to i64
   %iv.next = add i32 %iv, 2
   %cmp = icmp slt i32 %iv.next, %halfLim

diff  --git a/llvm/test/Transforms/IndVarSimplify/pr58702-invalidate-scev-when-replacing-congruent-phis.ll b/llvm/test/Transforms/IndVarSimplify/pr58702-invalidate-scev-when-replacing-congruent-phis.ll
index d3013655ae5dc..e7025d75dd998 100644
--- a/llvm/test/Transforms/IndVarSimplify/pr58702-invalidate-scev-when-replacing-congruent-phis.ll
+++ b/llvm/test/Transforms/IndVarSimplify/pr58702-invalidate-scev-when-replacing-congruent-phis.ll
@@ -5,9 +5,9 @@ define i32 @test(i32 %p_16, i1 %c) {
 ; CHECK-LABEL: @test(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[P_16:%.*]], 6
-; CHECK-NEXT:    [[OR_1:%.*]] = or i32 [[XOR]], [[P_16]]
+; CHECK-NEXT:    [[OR_1:%.*]] = or disjoint i32 [[XOR]], [[P_16]]
 ; CHECK-NEXT:    [[XOR_1:%.*]] = xor i32 [[OR_1]], 6
-; CHECK-NEXT:    [[OR_2:%.*]] = or i32 [[XOR_1]], [[P_16]]
+; CHECK-NEXT:    [[OR_2:%.*]] = or disjoint i32 [[XOR_1]], [[P_16]]
 ; CHECK-NEXT:    [[XOR_2:%.*]] = xor i32 [[OR_2]], 6
 ; CHECK-NEXT:    br label [[OUTER_HEADER:%.*]]
 ; CHECK:       outer.header:
@@ -22,7 +22,7 @@ define i32 @test(i32 %p_16, i1 %c) {
 ; CHECK-NEXT:    br i1 [[C]], label [[OUTER_LATCH]], label [[INNER_LATCH_3:%.*]]
 ; CHECK:       inner.latch.3:
 ; CHECK-NEXT:    [[XOR_2_LCSSA:%.*]] = phi i32 [ [[XOR_2]], [[INNER_LATCH_2]] ]
-; CHECK-NEXT:    [[OR_3:%.*]] = or i32 [[XOR_2_LCSSA]], [[P_16]]
+; CHECK-NEXT:    [[OR_3:%.*]] = or disjoint i32 [[XOR_2_LCSSA]], [[P_16]]
 ; CHECK-NEXT:    [[XOR_3:%.*]] = xor i32 [[OR_3]], 6
 ; CHECK-NEXT:    ret i32 [[XOR_3]]
 ; CHECK:       outer.latch:
@@ -41,7 +41,7 @@ inner.header:
   br i1 %c, label %outer.latch, label %inner.latch
 
 inner.latch:
-  %or = or i32 %p.2, %p_16
+  %or = or disjoint i32 %p.2, %p_16
   %xor = xor i32 %or, 6
   %add = add nuw nsw i32 %p.3, 1
   %cmp = icmp ult i32 %p.3, 3

diff  --git a/llvm/test/Transforms/IndVarSimplify/pr64891.ll b/llvm/test/Transforms/IndVarSimplify/pr64891.ll
index 69b22bc373a60..465b4605ea9c9 100644
--- a/llvm/test/Transforms/IndVarSimplify/pr64891.ll
+++ b/llvm/test/Transforms/IndVarSimplify/pr64891.ll
@@ -20,7 +20,7 @@ entry:
 
 loop:
   %phi = phi i16 [ 0, %entry ], [ %or, %loop ]
-  %or = or i16 %phi, %trunc
+  %or = or disjoint i16 %phi, %trunc
   %phi.ext = sext i16 %phi to i64
   %add.ptr = getelementptr i8, ptr null, i64 %phi.ext
   br label %loop

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/complex-index.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/complex-index.ll
index 067bbdf43922c..d78537ca83ed8 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/complex-index.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/complex-index.ll
@@ -38,7 +38,7 @@ entry:
   %cstoreval1 = fptrunc double %storeval1 to float
   store float %cstoreval1, ptr addrspace(1) %arrayidx, align 4
 
-  %add23 = or i64 %add10, 1
+  %add23 = or disjoint i64 %add10, 1
   %arrayidx24 = getelementptr inbounds float, ptr addrspace(1) %c, i64 %add23
   %load2 = load float, ptr addrspace(1) %arrayidx24, align 4
   %conv25 = fpext float %load2 to double

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll
index 6df55ffba8633..9d686e9837f3b 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll
@@ -63,7 +63,7 @@ entry:
   %a.0 = getelementptr inbounds float, ptr addrspace(1) %a, i64 %zext.id.x
   %c.0 = getelementptr inbounds float, ptr addrspace(1) %c, i64 %zext.id.x
 
-  %id.x.1 = or i32 %shl, 1
+  %id.x.1 = or disjoint i32 %shl, 1
   %id.x.1.ext = zext i32 %id.x.1 to i64
 
   %a.1 = getelementptr inbounds float, ptr addrspace(1) %a, i64 %id.x.1.ext
@@ -90,7 +90,7 @@ entry:
   %a.0 = getelementptr inbounds float, ptr addrspace(1) %a, i64 %zext.id.x
   %c.0 = getelementptr inbounds float, ptr addrspace(1) %c, i64 %zext.id.x
 
-  %id.x.1 = or i32 %shl, 1
+  %id.x.1 = or disjoint i32 %shl, 1
   %id.x.1.ext = sext i32 %id.x.1 to i64
 
   %a.1 = getelementptr inbounds float, ptr addrspace(1) %a, i64 %id.x.1.ext
@@ -127,7 +127,7 @@ loop:
   %c.0 = getelementptr inbounds i32, ptr addrspace(1) %c, i64 %idx.ext
   %a.0 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %idx.ext
 
-  %idx.1 = or i32 %idx, 1
+  %idx.1 = or disjoint i32 %idx, 1
   %idx.1.ext = zext i32 %idx.1 to i64
   %c.1 = getelementptr inbounds i32, ptr addrspace(1) %c, i64 %idx.1.ext
   %a.1 = getelementptr inbounds i32, ptr addrspace(1) %a, i64 %idx.1.ext

diff  --git a/llvm/test/Transforms/LoopIdiom/unroll-custom-dl.ll b/llvm/test/Transforms/LoopIdiom/unroll-custom-dl.ll
index f6f3267aba48e..ac50c8716c73c 100644
--- a/llvm/test/Transforms/LoopIdiom/unroll-custom-dl.ll
+++ b/llvm/test/Transforms/LoopIdiom/unroll-custom-dl.ll
@@ -28,7 +28,7 @@ define void @test(ptr %f, i32 %n) nounwind ssp {
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[F]], i32 [[INDVARS_IV]]
-; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[INDVARS_IV]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = or disjoint i32 [[INDVARS_IV]], 1
 ; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[F]], i32 [[TMP5]]
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i32 [[INDVARS_IV]], 2
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[INDVARS_IV_NEXT]], [[TMP0]]
@@ -50,7 +50,7 @@ for.body:                                         ; preds = %for.body.preheader,
   %indvars.iv = phi i32 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
   %arrayidx = getelementptr inbounds i32, ptr %f, i32 %indvars.iv
   store i32 0, ptr %arrayidx, align 4
-  %1 = or i32 %indvars.iv, 1
+  %1 = or disjoint i32 %indvars.iv, 1
   %arrayidx2 = getelementptr inbounds i32, ptr %f, i32 %1
   store i32 0, ptr %arrayidx2, align 4
   %indvars.iv.next = add nuw nsw i32 %indvars.iv, 2
@@ -86,7 +86,7 @@ define void @test_pattern(ptr %f, i32 %n) nounwind ssp {
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[F]], i32 [[INDVARS_IV]]
-; CHECK-NEXT:    [[X1:%.*]] = or i32 [[INDVARS_IV]], 1
+; CHECK-NEXT:    [[X1:%.*]] = or disjoint i32 [[INDVARS_IV]], 1
 ; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[F]], i32 [[X1]]
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i32 [[INDVARS_IV]], 2
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[INDVARS_IV_NEXT]], [[MUL]]
@@ -108,7 +108,7 @@ for.body:                                         ; preds = %for.body.preheader,
   %indvars.iv = phi i32 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
   %arrayidx = getelementptr inbounds i32, ptr %f, i32 %indvars.iv
   store i32 2, ptr %arrayidx, align 4
-  %x1 = or i32 %indvars.iv, 1
+  %x1 = or disjoint i32 %indvars.iv, 1
   %arrayidx2 = getelementptr inbounds i32, ptr %f, i32 %x1
   store i32 2, ptr %arrayidx2, align 4
   %indvars.iv.next = add nuw nsw i32 %indvars.iv, 2

diff  --git a/llvm/test/Transforms/LoopIdiom/unroll.ll b/llvm/test/Transforms/LoopIdiom/unroll.ll
index 8dce77da033f8..7c41310abdfd8 100644
--- a/llvm/test/Transforms/LoopIdiom/unroll.ll
+++ b/llvm/test/Transforms/LoopIdiom/unroll.ll
@@ -29,7 +29,7 @@ define void @test(ptr %f, i32 %n) nounwind ssp {
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[INDVARS_IV]]
-; CHECK-NEXT:    [[TMP5:%.*]] = or i64 [[INDVARS_IV]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = or disjoint i64 [[INDVARS_IV]], 1
 ; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[TMP5]]
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 2
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT]], [[TMP0]]
@@ -52,7 +52,7 @@ for.body:                                         ; preds = %for.body.preheader,
   %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
   %arrayidx = getelementptr inbounds i32, ptr %f, i64 %indvars.iv
   store i32 0, ptr %arrayidx, align 4
-  %1 = or i64 %indvars.iv, 1
+  %1 = or disjoint i64 %indvars.iv, 1
   %arrayidx2 = getelementptr inbounds i32, ptr %f, i64 %1
   store i32 0, ptr %arrayidx2, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
@@ -89,7 +89,7 @@ define void @test_pattern(ptr %f, i32 %n) nounwind ssp {
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[INDVARS_IV]]
-; CHECK-NEXT:    [[TMP5:%.*]] = or i64 [[INDVARS_IV]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = or disjoint i64 [[INDVARS_IV]], 1
 ; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[TMP5]]
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 2
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT]], [[TMP0]]
@@ -112,7 +112,7 @@ for.body:                                         ; preds = %for.body.preheader,
   %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
   %arrayidx = getelementptr inbounds i32, ptr %f, i64 %indvars.iv
   store i32 2, ptr %arrayidx, align 4
-  %1 = or i64 %indvars.iv, 1
+  %1 = or disjoint i64 %indvars.iv, 1
   %arrayidx2 = getelementptr inbounds i32, ptr %f, i64 %1
   store i32 2, ptr %arrayidx2, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2

diff  --git a/llvm/test/Transforms/LoopInterchange/pr57148.ll b/llvm/test/Transforms/LoopInterchange/pr57148.ll
index 4bdc41db559ea..f2b8a93a780bd 100644
--- a/llvm/test/Transforms/LoopInterchange/pr57148.ll
+++ b/llvm/test/Transforms/LoopInterchange/pr57148.ll
@@ -116,7 +116,7 @@ define void @test2() {
 ; CHECK-NEXT:    [[INDEX86:%.*]] = phi i16 [ 0, [[FOR_COND37_PREHEADER_SPLIT]] ], [ [[TMP3:%.*]], [[VECTOR_BODY85_SPLIT:%.*]] ]
 ; CHECK-NEXT:    br label [[FOR_COND33_PREHEADER_PREHEADER]]
 ; CHECK:       vector.body85.split1:
-; CHECK-NEXT:    [[TMP0:%.*]] = or i16 [[INDEX86]], 2
+; CHECK-NEXT:    [[TMP0:%.*]] = or disjoint i16 [[INDEX86]], 2
 ; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [512 x [4 x i32]], ptr @b, i16 0, i16 [[TMP0]], i16 [[J_165]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 1
 ; CHECK-NEXT:    [[INDEX_NEXT87:%.*]] = add nuw i16 [[INDEX86]], 4
@@ -148,7 +148,7 @@ for.cond37.preheader:                             ; preds = %middle.block80, %fo
 
 vector.body85:                                    ; preds = %vector.body85, %for.cond37.preheader
   %index86 = phi i16 [ 0, %for.cond37.preheader ], [ %index.next87, %vector.body85 ]
-  %0 = or i16 %index86, 2
+  %0 = or disjoint i16 %index86, 2
   %1 = getelementptr inbounds [512 x [4 x i32]], ptr @b, i16 0, i16 %0, i16 %j.165
   %2 = load i32, ptr %1, align 1
   %index.next87 = add nuw i16 %index86, 4

diff  --git a/llvm/test/Transforms/LoopReroll/basic32iters.ll b/llvm/test/Transforms/LoopReroll/basic32iters.ll
index 6c313c1febab4..edf38cb3eb18d 100644
--- a/llvm/test/Transforms/LoopReroll/basic32iters.ll
+++ b/llvm/test/Transforms/LoopReroll/basic32iters.ll
@@ -53,7 +53,7 @@ for.body:                                         ; preds = %entry, %for.body
   %1 = load float, ptr %arrayidx2, align 4
   %add = fadd float %1, %mul
   store float %add, ptr %arrayidx2, align 4
-  %2 = or i64 %indvars.iv, 1
+  %2 = or disjoint i64 %indvars.iv, 1
   %arrayidx5 = getelementptr inbounds float, ptr %b, i64 %2
   %3 = load float, ptr %arrayidx5, align 4
   %mul6 = fmul float %3, %alpha
@@ -61,7 +61,7 @@ for.body:                                         ; preds = %entry, %for.body
   %4 = load float, ptr %arrayidx9, align 4
   %add10 = fadd float %4, %mul6
   store float %add10, ptr %arrayidx9, align 4
-  %5 = or i64 %indvars.iv, 2
+  %5 = or disjoint i64 %indvars.iv, 2
   %arrayidx13 = getelementptr inbounds float, ptr %b, i64 %5
   %6 = load float, ptr %arrayidx13, align 4
   %mul14 = fmul float %6, %alpha
@@ -69,7 +69,7 @@ for.body:                                         ; preds = %entry, %for.body
   %7 = load float, ptr %arrayidx17, align 4
   %add18 = fadd float %7, %mul14
   store float %add18, ptr %arrayidx17, align 4
-  %8 = or i64 %indvars.iv, 3
+  %8 = or disjoint i64 %indvars.iv, 3
   %arrayidx21 = getelementptr inbounds float, ptr %b, i64 %8
   %9 = load float, ptr %arrayidx21, align 4
   %mul22 = fmul float %9, %alpha
@@ -77,7 +77,7 @@ for.body:                                         ; preds = %entry, %for.body
   %10 = load float, ptr %arrayidx25, align 4
   %add26 = fadd float %10, %mul22
   store float %add26, ptr %arrayidx25, align 4
-  %11 = or i64 %indvars.iv, 4
+  %11 = or disjoint i64 %indvars.iv, 4
   %arrayidx29 = getelementptr inbounds float, ptr %b, i64 %11
   %12 = load float, ptr %arrayidx29, align 4
   %mul30 = fmul float %12, %alpha
@@ -85,7 +85,7 @@ for.body:                                         ; preds = %entry, %for.body
   %13 = load float, ptr %arrayidx33, align 4
   %add34 = fadd float %13, %mul30
   store float %add34, ptr %arrayidx33, align 4
-  %14 = or i64 %indvars.iv, 5
+  %14 = or disjoint i64 %indvars.iv, 5
   %arrayidx37 = getelementptr inbounds float, ptr %b, i64 %14
   %15 = load float, ptr %arrayidx37, align 4
   %mul38 = fmul float %15, %alpha
@@ -93,7 +93,7 @@ for.body:                                         ; preds = %entry, %for.body
   %16 = load float, ptr %arrayidx41, align 4
   %add42 = fadd float %16, %mul38
   store float %add42, ptr %arrayidx41, align 4
-  %17 = or i64 %indvars.iv, 6
+  %17 = or disjoint i64 %indvars.iv, 6
   %arrayidx45 = getelementptr inbounds float, ptr %b, i64 %17
   %18 = load float, ptr %arrayidx45, align 4
   %mul46 = fmul float %18, %alpha
@@ -101,7 +101,7 @@ for.body:                                         ; preds = %entry, %for.body
   %19 = load float, ptr %arrayidx49, align 4
   %add50 = fadd float %19, %mul46
   store float %add50, ptr %arrayidx49, align 4
-  %20 = or i64 %indvars.iv, 7
+  %20 = or disjoint i64 %indvars.iv, 7
   %arrayidx53 = getelementptr inbounds float, ptr %b, i64 %20
   %21 = load float, ptr %arrayidx53, align 4
   %mul54 = fmul float %21, %alpha
@@ -109,7 +109,7 @@ for.body:                                         ; preds = %entry, %for.body
   %22 = load float, ptr %arrayidx57, align 4
   %add58 = fadd float %22, %mul54
   store float %add58, ptr %arrayidx57, align 4
-  %23 = or i64 %indvars.iv, 8
+  %23 = or disjoint i64 %indvars.iv, 8
   %arrayidx61 = getelementptr inbounds float, ptr %b, i64 %23
   %24 = load float, ptr %arrayidx61, align 4
   %mul62 = fmul float %24, %alpha
@@ -117,7 +117,7 @@ for.body:                                         ; preds = %entry, %for.body
   %25 = load float, ptr %arrayidx65, align 4
   %add66 = fadd float %25, %mul62
   store float %add66, ptr %arrayidx65, align 4
-  %26 = or i64 %indvars.iv, 9
+  %26 = or disjoint i64 %indvars.iv, 9
   %arrayidx69 = getelementptr inbounds float, ptr %b, i64 %26
   %27 = load float, ptr %arrayidx69, align 4
   %mul70 = fmul float %27, %alpha
@@ -125,7 +125,7 @@ for.body:                                         ; preds = %entry, %for.body
   %28 = load float, ptr %arrayidx73, align 4
   %add74 = fadd float %28, %mul70
   store float %add74, ptr %arrayidx73, align 4
-  %29 = or i64 %indvars.iv, 10
+  %29 = or disjoint i64 %indvars.iv, 10
   %arrayidx77 = getelementptr inbounds float, ptr %b, i64 %29
   %30 = load float, ptr %arrayidx77, align 4
   %mul78 = fmul float %30, %alpha
@@ -133,7 +133,7 @@ for.body:                                         ; preds = %entry, %for.body
   %31 = load float, ptr %arrayidx81, align 4
   %add82 = fadd float %31, %mul78
   store float %add82, ptr %arrayidx81, align 4
-  %32 = or i64 %indvars.iv, 11
+  %32 = or disjoint i64 %indvars.iv, 11
   %arrayidx85 = getelementptr inbounds float, ptr %b, i64 %32
   %33 = load float, ptr %arrayidx85, align 4
   %mul86 = fmul float %33, %alpha
@@ -141,7 +141,7 @@ for.body:                                         ; preds = %entry, %for.body
   %34 = load float, ptr %arrayidx89, align 4
   %add90 = fadd float %34, %mul86
   store float %add90, ptr %arrayidx89, align 4
-  %35 = or i64 %indvars.iv, 12
+  %35 = or disjoint i64 %indvars.iv, 12
   %arrayidx93 = getelementptr inbounds float, ptr %b, i64 %35
   %36 = load float, ptr %arrayidx93, align 4
   %mul94 = fmul float %36, %alpha
@@ -149,7 +149,7 @@ for.body:                                         ; preds = %entry, %for.body
   %37 = load float, ptr %arrayidx97, align 4
   %add98 = fadd float %37, %mul94
   store float %add98, ptr %arrayidx97, align 4
-  %38 = or i64 %indvars.iv, 13
+  %38 = or disjoint i64 %indvars.iv, 13
   %arrayidx101 = getelementptr inbounds float, ptr %b, i64 %38
   %39 = load float, ptr %arrayidx101, align 4
   %mul102 = fmul float %39, %alpha
@@ -157,7 +157,7 @@ for.body:                                         ; preds = %entry, %for.body
   %40 = load float, ptr %arrayidx105, align 4
   %add106 = fadd float %40, %mul102
   store float %add106, ptr %arrayidx105, align 4
-  %41 = or i64 %indvars.iv, 14
+  %41 = or disjoint i64 %indvars.iv, 14
   %arrayidx109 = getelementptr inbounds float, ptr %b, i64 %41
   %42 = load float, ptr %arrayidx109, align 4
   %mul110 = fmul float %42, %alpha
@@ -165,7 +165,7 @@ for.body:                                         ; preds = %entry, %for.body
   %43 = load float, ptr %arrayidx113, align 4
   %add114 = fadd float %43, %mul110
   store float %add114, ptr %arrayidx113, align 4
-  %44 = or i64 %indvars.iv, 15
+  %44 = or disjoint i64 %indvars.iv, 15
   %arrayidx117 = getelementptr inbounds float, ptr %b, i64 %44
   %45 = load float, ptr %arrayidx117, align 4
   %mul118 = fmul float %45, %alpha
@@ -173,7 +173,7 @@ for.body:                                         ; preds = %entry, %for.body
   %46 = load float, ptr %arrayidx121, align 4
   %add122 = fadd float %46, %mul118
   store float %add122, ptr %arrayidx121, align 4
-  %47 = or i64 %indvars.iv, 16
+  %47 = or disjoint i64 %indvars.iv, 16
   %arrayidx125 = getelementptr inbounds float, ptr %b, i64 %47
   %48 = load float, ptr %arrayidx125, align 4
   %mul126 = fmul float %48, %alpha
@@ -181,7 +181,7 @@ for.body:                                         ; preds = %entry, %for.body
   %49 = load float, ptr %arrayidx129, align 4
   %add130 = fadd float %49, %mul126
   store float %add130, ptr %arrayidx129, align 4
-  %50 = or i64 %indvars.iv, 17
+  %50 = or disjoint i64 %indvars.iv, 17
   %arrayidx133 = getelementptr inbounds float, ptr %b, i64 %50
   %51 = load float, ptr %arrayidx133, align 4
   %mul134 = fmul float %51, %alpha
@@ -189,7 +189,7 @@ for.body:                                         ; preds = %entry, %for.body
   %52 = load float, ptr %arrayidx137, align 4
   %add138 = fadd float %52, %mul134
   store float %add138, ptr %arrayidx137, align 4
-  %53 = or i64 %indvars.iv, 18
+  %53 = or disjoint i64 %indvars.iv, 18
   %arrayidx141 = getelementptr inbounds float, ptr %b, i64 %53
   %54 = load float, ptr %arrayidx141, align 4
   %mul142 = fmul float %54, %alpha
@@ -197,7 +197,7 @@ for.body:                                         ; preds = %entry, %for.body
   %55 = load float, ptr %arrayidx145, align 4
   %add146 = fadd float %55, %mul142
   store float %add146, ptr %arrayidx145, align 4
-  %56 = or i64 %indvars.iv, 19
+  %56 = or disjoint i64 %indvars.iv, 19
   %arrayidx149 = getelementptr inbounds float, ptr %b, i64 %56
   %57 = load float, ptr %arrayidx149, align 4
   %mul150 = fmul float %57, %alpha
@@ -205,7 +205,7 @@ for.body:                                         ; preds = %entry, %for.body
   %58 = load float, ptr %arrayidx153, align 4
   %add154 = fadd float %58, %mul150
   store float %add154, ptr %arrayidx153, align 4
-  %59 = or i64 %indvars.iv, 20
+  %59 = or disjoint i64 %indvars.iv, 20
   %arrayidx157 = getelementptr inbounds float, ptr %b, i64 %59
   %60 = load float, ptr %arrayidx157, align 4
   %mul158 = fmul float %60, %alpha
@@ -213,7 +213,7 @@ for.body:                                         ; preds = %entry, %for.body
   %61 = load float, ptr %arrayidx161, align 4
   %add162 = fadd float %61, %mul158
   store float %add162, ptr %arrayidx161, align 4
-  %62 = or i64 %indvars.iv, 21
+  %62 = or disjoint i64 %indvars.iv, 21
   %arrayidx165 = getelementptr inbounds float, ptr %b, i64 %62
   %63 = load float, ptr %arrayidx165, align 4
   %mul166 = fmul float %63, %alpha
@@ -221,7 +221,7 @@ for.body:                                         ; preds = %entry, %for.body
   %64 = load float, ptr %arrayidx169, align 4
   %add170 = fadd float %64, %mul166
   store float %add170, ptr %arrayidx169, align 4
-  %65 = or i64 %indvars.iv, 22
+  %65 = or disjoint i64 %indvars.iv, 22
   %arrayidx173 = getelementptr inbounds float, ptr %b, i64 %65
   %66 = load float, ptr %arrayidx173, align 4
   %mul174 = fmul float %66, %alpha
@@ -229,7 +229,7 @@ for.body:                                         ; preds = %entry, %for.body
   %67 = load float, ptr %arrayidx177, align 4
   %add178 = fadd float %67, %mul174
   store float %add178, ptr %arrayidx177, align 4
-  %68 = or i64 %indvars.iv, 23
+  %68 = or disjoint i64 %indvars.iv, 23
   %arrayidx181 = getelementptr inbounds float, ptr %b, i64 %68
   %69 = load float, ptr %arrayidx181, align 4
   %mul182 = fmul float %69, %alpha
@@ -237,7 +237,7 @@ for.body:                                         ; preds = %entry, %for.body
   %70 = load float, ptr %arrayidx185, align 4
   %add186 = fadd float %70, %mul182
   store float %add186, ptr %arrayidx185, align 4
-  %71 = or i64 %indvars.iv, 24
+  %71 = or disjoint i64 %indvars.iv, 24
   %arrayidx189 = getelementptr inbounds float, ptr %b, i64 %71
   %72 = load float, ptr %arrayidx189, align 4
   %mul190 = fmul float %72, %alpha
@@ -245,7 +245,7 @@ for.body:                                         ; preds = %entry, %for.body
   %73 = load float, ptr %arrayidx193, align 4
   %add194 = fadd float %73, %mul190
   store float %add194, ptr %arrayidx193, align 4
-  %74 = or i64 %indvars.iv, 25
+  %74 = or disjoint i64 %indvars.iv, 25
   %arrayidx197 = getelementptr inbounds float, ptr %b, i64 %74
   %75 = load float, ptr %arrayidx197, align 4
   %mul198 = fmul float %75, %alpha
@@ -253,7 +253,7 @@ for.body:                                         ; preds = %entry, %for.body
   %76 = load float, ptr %arrayidx201, align 4
   %add202 = fadd float %76, %mul198
   store float %add202, ptr %arrayidx201, align 4
-  %77 = or i64 %indvars.iv, 26
+  %77 = or disjoint i64 %indvars.iv, 26
   %arrayidx205 = getelementptr inbounds float, ptr %b, i64 %77
   %78 = load float, ptr %arrayidx205, align 4
   %mul206 = fmul float %78, %alpha
@@ -261,7 +261,7 @@ for.body:                                         ; preds = %entry, %for.body
   %79 = load float, ptr %arrayidx209, align 4
   %add210 = fadd float %79, %mul206
   store float %add210, ptr %arrayidx209, align 4
-  %80 = or i64 %indvars.iv, 27
+  %80 = or disjoint i64 %indvars.iv, 27
   %arrayidx213 = getelementptr inbounds float, ptr %b, i64 %80
   %81 = load float, ptr %arrayidx213, align 4
   %mul214 = fmul float %81, %alpha
@@ -269,7 +269,7 @@ for.body:                                         ; preds = %entry, %for.body
   %82 = load float, ptr %arrayidx217, align 4
   %add218 = fadd float %82, %mul214
   store float %add218, ptr %arrayidx217, align 4
-  %83 = or i64 %indvars.iv, 28
+  %83 = or disjoint i64 %indvars.iv, 28
   %arrayidx221 = getelementptr inbounds float, ptr %b, i64 %83
   %84 = load float, ptr %arrayidx221, align 4
   %mul222 = fmul float %84, %alpha
@@ -277,7 +277,7 @@ for.body:                                         ; preds = %entry, %for.body
   %85 = load float, ptr %arrayidx225, align 4
   %add226 = fadd float %85, %mul222
   store float %add226, ptr %arrayidx225, align 4
-  %86 = or i64 %indvars.iv, 29
+  %86 = or disjoint i64 %indvars.iv, 29
   %arrayidx229 = getelementptr inbounds float, ptr %b, i64 %86
   %87 = load float, ptr %arrayidx229, align 4
   %mul230 = fmul float %87, %alpha
@@ -285,7 +285,7 @@ for.body:                                         ; preds = %entry, %for.body
   %88 = load float, ptr %arrayidx233, align 4
   %add234 = fadd float %88, %mul230
   store float %add234, ptr %arrayidx233, align 4
-  %89 = or i64 %indvars.iv, 30
+  %89 = or disjoint i64 %indvars.iv, 30
   %arrayidx237 = getelementptr inbounds float, ptr %b, i64 %89
   %90 = load float, ptr %arrayidx237, align 4
   %mul238 = fmul float %90, %alpha
@@ -293,7 +293,7 @@ for.body:                                         ; preds = %entry, %for.body
   %91 = load float, ptr %arrayidx241, align 4
   %add242 = fadd float %91, %mul238
   store float %add242, ptr %arrayidx241, align 4
-  %92 = or i64 %indvars.iv, 31
+  %92 = or disjoint i64 %indvars.iv, 31
   %arrayidx245 = getelementptr inbounds float, ptr %b, i64 %92
   %93 = load float, ptr %arrayidx245, align 4
   %mul246 = fmul float %93, %alpha

diff  --git a/llvm/test/Transforms/LoopReroll/indvar_with_ext.ll b/llvm/test/Transforms/LoopReroll/indvar_with_ext.ll
index e867972050a1c..3fcd43f1866a6 100644
--- a/llvm/test/Transforms/LoopReroll/indvar_with_ext.ll
+++ b/llvm/test/Transforms/LoopReroll/indvar_with_ext.ll
@@ -82,7 +82,7 @@ for.body:                                         ; preds = %for.body.preheader,
   %0 = load i32, ptr %arrayidx, align 4
   %arrayidx3 = getelementptr inbounds i32, ptr %x, i64 %indvars.iv
   store i32 %0, ptr %arrayidx3, align 4
-  %1 = or i64 %indvars.iv, 1
+  %1 = or disjoint i64 %indvars.iv, 1
   %arrayidx5 = getelementptr inbounds i32, ptr %y, i64 %1
   %2 = load i32, ptr %arrayidx5, align 4
   %arrayidx8 = getelementptr inbounds i32, ptr %x, i64 %1
@@ -122,7 +122,7 @@ for.body:                                         ; preds = %for.body.preheader,
   %0 = load i32, ptr %arrayidx, align 4
   %arrayidx3 = getelementptr inbounds i32, ptr %x, i64 %idxprom
   store i32 %0, ptr %arrayidx3, align 4
-  %add = or i32 %conv23, 1
+  %add = or disjoint i32 %conv23, 1
   %idxprom5 = sext i32 %add to i64
   %arrayidx6 = getelementptr inbounds i32, ptr %y, i64 %idxprom5
   %1 = load i32, ptr %arrayidx6, align 4
@@ -166,7 +166,7 @@ for.body:                                         ; preds = %for.body.preheader,
   %0 = load i32, ptr %arrayidx, align 4
   %arrayidx3 = getelementptr inbounds i32, ptr %x, i64 %indvars.iv
   store i32 %0, ptr %arrayidx3, align 4
-  %1 = or i64 %indvars.iv, 1
+  %1 = or disjoint i64 %indvars.iv, 1
   %arrayidx5 = getelementptr inbounds i32, ptr %y, i64 %1
   %2 = load i32, ptr %arrayidx5, align 4
   %arrayidx8 = getelementptr inbounds i32, ptr %x, i64 %1

diff  --git a/llvm/test/Transforms/LoopReroll/reduction.ll b/llvm/test/Transforms/LoopReroll/reduction.ll
index 4125716bfb5a8..94f4d53bfbf68 100644
--- a/llvm/test/Transforms/LoopReroll/reduction.ll
+++ b/llvm/test/Transforms/LoopReroll/reduction.ll
@@ -12,15 +12,15 @@ for.body:                                         ; preds = %entry, %for.body
   %arrayidx = getelementptr inbounds i32, ptr %x, i64 %indvars.iv
   %0 = load i32, ptr %arrayidx, align 4
   %add = add nsw i32 %0, %r.029
-  %1 = or i64 %indvars.iv, 1
+  %1 = or disjoint i64 %indvars.iv, 1
   %arrayidx3 = getelementptr inbounds i32, ptr %x, i64 %1
   %2 = load i32, ptr %arrayidx3, align 4
   %add4 = add nsw i32 %add, %2
-  %3 = or i64 %indvars.iv, 2
+  %3 = or disjoint i64 %indvars.iv, 2
   %arrayidx7 = getelementptr inbounds i32, ptr %x, i64 %3
   %4 = load i32, ptr %arrayidx7, align 4
   %add8 = add nsw i32 %add4, %4
-  %5 = or i64 %indvars.iv, 3
+  %5 = or disjoint i64 %indvars.iv, 3
   %arrayidx11 = getelementptr inbounds i32, ptr %x, i64 %5
   %6 = load i32, ptr %arrayidx11, align 4
   %add12 = add nsw i32 %add8, %6
@@ -57,15 +57,15 @@ for.body:                                         ; preds = %entry, %for.body
   %arrayidx = getelementptr inbounds float, ptr %x, i64 %indvars.iv
   %0 = load float, ptr %arrayidx, align 4
   %add = fadd float %0, %r.029
-  %1 = or i64 %indvars.iv, 1
+  %1 = or disjoint i64 %indvars.iv, 1
   %arrayidx3 = getelementptr inbounds float, ptr %x, i64 %1
   %2 = load float, ptr %arrayidx3, align 4
   %add4 = fadd float %add, %2
-  %3 = or i64 %indvars.iv, 2
+  %3 = or disjoint i64 %indvars.iv, 2
   %arrayidx7 = getelementptr inbounds float, ptr %x, i64 %3
   %4 = load float, ptr %arrayidx7, align 4
   %add8 = fadd float %add4, %4
-  %5 = or i64 %indvars.iv, 3
+  %5 = or disjoint i64 %indvars.iv, 3
   %arrayidx11 = getelementptr inbounds float, ptr %x, i64 %5
   %6 = load float, ptr %arrayidx11, align 4
   %add12 = fadd float %add8, %6
@@ -102,15 +102,15 @@ for.body:                                         ; preds = %entry, %for.body
   %arrayidx = getelementptr inbounds i32, ptr %x, i64 %indvars.iv
   %0 = load i32, ptr %arrayidx, align 4
   %add = add nsw i32 %0, %0
-  %1 = or i64 %indvars.iv, 1
+  %1 = or disjoint i64 %indvars.iv, 1
   %arrayidx3 = getelementptr inbounds i32, ptr %x, i64 %1
   %2 = load i32, ptr %arrayidx3, align 4
   %add4 = add nsw i32 %add, %2
-  %3 = or i64 %indvars.iv, 2
+  %3 = or disjoint i64 %indvars.iv, 2
   %arrayidx7 = getelementptr inbounds i32, ptr %x, i64 %3
   %4 = load i32, ptr %arrayidx7, align 4
   %add8 = add nsw i32 %add4, %4
-  %5 = or i64 %indvars.iv, 3
+  %5 = or disjoint i64 %indvars.iv, 3
   %arrayidx11 = getelementptr inbounds i32, ptr %x, i64 %5
   %6 = load i32, ptr %arrayidx11, align 4
   %add12 = add nsw i32 %add8, %6

diff  --git a/llvm/test/Transforms/LoopReroll/reroll_with_dbg.ll b/llvm/test/Transforms/LoopReroll/reroll_with_dbg.ll
index 32f9d72c5f664..e720e761f4d6c 100644
--- a/llvm/test/Transforms/LoopReroll/reroll_with_dbg.ll
+++ b/llvm/test/Transforms/LoopReroll/reroll_with_dbg.ll
@@ -46,17 +46,17 @@ for.body:                                         ; preds = %for.body.preheader,
   %0 = load i32, ptr %arrayidx, align 4, !dbg !30, !tbaa !33
   %arrayidx1 = getelementptr inbounds float, ptr %a, i32 %i.031, !dbg !37
   store i32 %0, ptr %arrayidx1, align 4, !dbg !38, !tbaa !33
-  %add = or i32 %i.031, 1, !dbg !39
+  %add = or disjoint i32 %i.031, 1, !dbg !39
   %arrayidx2 = getelementptr inbounds float, ptr %b, i32 %add, !dbg !40
   %1 = load i32, ptr %arrayidx2, align 4, !dbg !40, !tbaa !33
   %arrayidx4 = getelementptr inbounds float, ptr %a, i32 %add, !dbg !41
   store i32 %1, ptr %arrayidx4, align 4, !dbg !42, !tbaa !33
-  %add5 = or i32 %i.031, 2, !dbg !43
+  %add5 = or disjoint i32 %i.031, 2, !dbg !43
   %arrayidx6 = getelementptr inbounds float, ptr %b, i32 %add5, !dbg !44
   %2 = load i32, ptr %arrayidx6, align 4, !dbg !44, !tbaa !33
   %arrayidx8 = getelementptr inbounds float, ptr %a, i32 %add5, !dbg !45
   store i32 %2, ptr %arrayidx8, align 4, !dbg !46, !tbaa !33
-  %add9 = or i32 %i.031, 3, !dbg !47
+  %add9 = or disjoint i32 %i.031, 3, !dbg !47
   %arrayidx10 = getelementptr inbounds float, ptr %b, i32 %add9, !dbg !48
   %3 = load i32, ptr %arrayidx10, align 4, !dbg !48, !tbaa !33
   %arrayidx12 = getelementptr inbounds float, ptr %a, i32 %add9, !dbg !49

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/ARM/complexity.ll b/llvm/test/Transforms/LoopStrengthReduce/ARM/complexity.ll
index 9ad65439f5a43..1b64ade50f219 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/ARM/complexity.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/ARM/complexity.ll
@@ -54,7 +54,7 @@ for.body12.us.us:                                 ; preds = %for.body12.us.us, %
   %conv17.us.us = sext i16 %tmp10 to i32
   %mul.us.us = mul nsw i32 %conv17.us.us, %conv.us.us
   %add18.us.us = add nsw i32 %mul.us.us, %result_element.152.us.us
-  %inc.us.us = or i32 %filter_x.053.us.us, 1
+  %inc.us.us = or disjoint i32 %filter_x.053.us.us, 1
   %add13.us.us.1 = add i32 %inc.us.us, %res_x.060.us
   %arrayidx14.us.us.1 = getelementptr inbounds i16, ptr %tmp5, i32 %inc.us.us
   %tmp11 = load i16, ptr %arrayidx14.us.us.1, align 2
@@ -64,7 +64,7 @@ for.body12.us.us:                                 ; preds = %for.body12.us.us, %
   %conv17.us.us.1 = sext i16 %tmp12 to i32
   %mul.us.us.1 = mul nsw i32 %conv17.us.us.1, %conv.us.us.1
   %add18.us.us.1 = add nsw i32 %mul.us.us.1, %add18.us.us
-  %inc.us.us.1 = or i32 %filter_x.053.us.us, 2
+  %inc.us.us.1 = or disjoint i32 %filter_x.053.us.us, 2
   %add13.us.us.2 = add i32 %inc.us.us.1, %res_x.060.us
   %arrayidx14.us.us.2 = getelementptr inbounds i16, ptr %tmp5, i32 %inc.us.us.1
   %tmp13 = load i16, ptr %arrayidx14.us.us.2, align 2
@@ -74,7 +74,7 @@ for.body12.us.us:                                 ; preds = %for.body12.us.us, %
   %conv17.us.us.2 = sext i16 %tmp14 to i32
   %mul.us.us.2 = mul nsw i32 %conv17.us.us.2, %conv.us.us.2
   %add18.us.us.2 = add nsw i32 %mul.us.us.2, %add18.us.us.1
-  %inc.us.us.2 = or i32 %filter_x.053.us.us, 3
+  %inc.us.us.2 = or disjoint i32 %filter_x.053.us.us, 3
   %add13.us.us.3 = add i32 %inc.us.us.2, %res_x.060.us
   %arrayidx14.us.us.3 = getelementptr inbounds i16, ptr %tmp5, i32 %inc.us.us.2
   %tmp15 = load i16, ptr %arrayidx14.us.us.3, align 2

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll b/llvm/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
index 07debadf34f52..1614de8dbf558 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
@@ -245,7 +245,7 @@ for.body:                                         ; preds = %for.body, %entry
   %conv3 = trunc i32 %add to i8
   %arrayidx4 = getelementptr inbounds i8, ptr %c, i32 %i.07
   store i8 %conv3, ptr %arrayidx4, align 1
-  %inc1 = or i32 %i.07, 1
+  %inc1 = or disjoint i32 %i.07, 1
   %arrayidx.1 = getelementptr inbounds i8, ptr %a, i32 %inc1
   %2 = load i8, ptr %arrayidx.1, align 1
   %conv5.1 = zext i8 %2 to i32
@@ -256,7 +256,7 @@ for.body:                                         ; preds = %for.body, %entry
   %conv3.1 = trunc i32 %add.1 to i8
   %arrayidx4.1 = getelementptr inbounds i8, ptr %c, i32 %inc1
   store i8 %conv3.1, ptr %arrayidx4.1, align 1
-  %inc.12 = or i32 %i.07, 2
+  %inc.12 = or disjoint i32 %i.07, 2
   %arrayidx.2 = getelementptr inbounds i8, ptr %a, i32 %inc.12
   %4 = load i8, ptr %arrayidx.2, align 1
   %conv5.2 = zext i8 %4 to i32
@@ -267,7 +267,7 @@ for.body:                                         ; preds = %for.body, %entry
   %conv3.2 = trunc i32 %add.2 to i8
   %arrayidx4.2 = getelementptr inbounds i8, ptr %c, i32 %inc.12
   store i8 %conv3.2, ptr %arrayidx4.2, align 1
-  %inc.23 = or i32 %i.07, 3
+  %inc.23 = or disjoint i32 %i.07, 3
   %arrayidx.3 = getelementptr inbounds i8, ptr %a, i32 %inc.23
   %6 = load i8, ptr %arrayidx.3, align 1
   %conv5.3 = zext i8 %6 to i32

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
index d618b6365fac9..39e2d6f1acca4 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
@@ -385,7 +385,7 @@ for.body:                                         ; preds = %for.body, %entry
   %conv3 = trunc i32 %add to i8
   %arrayidx4 = getelementptr inbounds i8, ptr %c, i32 %i.07
   store i8 %conv3, ptr %arrayidx4, align 1
-  %inc1 = or i32 %i.07, 1
+  %inc1 = or disjoint i32 %i.07, 1
   %arrayidx.1 = getelementptr inbounds i8, ptr %a, i32 %inc1
   %2 = load i8, ptr %arrayidx.1, align 1
   %conv5.1 = zext i8 %2 to i32
@@ -396,7 +396,7 @@ for.body:                                         ; preds = %for.body, %entry
   %conv3.1 = trunc i32 %add.1 to i8
   %arrayidx4.1 = getelementptr inbounds i8, ptr %c, i32 %inc1
   store i8 %conv3.1, ptr %arrayidx4.1, align 1
-  %inc.12 = or i32 %i.07, 2
+  %inc.12 = or disjoint i32 %i.07, 2
   %arrayidx.2 = getelementptr inbounds i8, ptr %a, i32 %inc.12
   %4 = load i8, ptr %arrayidx.2, align 1
   %conv5.2 = zext i8 %4 to i32
@@ -407,7 +407,7 @@ for.body:                                         ; preds = %for.body, %entry
   %conv3.2 = trunc i32 %add.2 to i8
   %arrayidx4.2 = getelementptr inbounds i8, ptr %c, i32 %inc.12
   store i8 %conv3.2, ptr %arrayidx4.2, align 1
-  %inc.23 = or i32 %i.07, 3
+  %inc.23 = or disjoint i32 %i.07, 3
   %arrayidx.3 = getelementptr inbounds i8, ptr %a, i32 %inc.23
   %6 = load i8, ptr %arrayidx.3, align 1
   %conv5.3 = zext i8 %6 to i32
@@ -478,7 +478,7 @@ for.body:
   %p = phi ptr [ %p.next, %for.body ], [ %a, %entry ]
   %i = phi i32 [ %inc4, %for.body ], [ 0, %entry ]
   store i32 %i, ptr %p, align 4
-  %inc1 = or i32 %i, 1
+  %inc1 = or disjoint i32 %i, 1
   %add.ptr.i1 = getelementptr inbounds i32, ptr %p, i32 1
   store i32 %inc1, ptr %add.ptr.i1, align 4
   %inc2 = add nsw i32 %i, 2

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/X86/missing-phi-operand-update.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/missing-phi-operand-update.ll
index 83f30ad33431e..b13503543d6ee 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/missing-phi-operand-update.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/missing-phi-operand-update.ll
@@ -13,8 +13,8 @@ target triple = "x86_64-unknown-linux-gnu"
 ; All the other PHI inputs besides %tmp1 go to a new phi node.
 ; This test checks that LSR is still able to rewrite %tmp2, %tmp3, %tmp4.
 define i32 @foo(ptr %A, i32 %t) {
-; CHECK-LABEL: define i32 @foo
-; CHECK-SAME: (ptr [[A:%.*]], i32 [[T:%.*]]) {
+; CHECK-LABEL: define i32 @foo(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[T:%.*]]) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[LOOP_32:%.*]]
 ; CHECK:       loop.exit.loopexitsplitsplitsplit:
@@ -131,7 +131,7 @@ for.end:                                          ; preds = %then.8.1, %ifmerge.
 loop.32:                                          ; preds = %ifmerge.46, %entry
   %i1.i64.0 = phi i64 [ 0, %entry ], [ %nextivloop.32, %ifmerge.46 ]
   %tmp1 = shl i64 %i1.i64.0, 2
-  %tmp2 = or i64 %tmp1, 1
+  %tmp2 = or disjoint i64 %tmp1, 1
   %arrayIdx = getelementptr inbounds i32, ptr %A, i64 %tmp2
   %gepload = load i32, ptr %arrayIdx, align 4
   %cmp.34 = icmp sgt i32 %gepload, %t
@@ -144,7 +144,7 @@ then.34:                                          ; preds = %loop.32
   br i1 %cmp.35, label %loop.exit, label %ifmerge.34
 
 ifmerge.34:                                       ; preds = %then.34, %loop.32
-  %tmp3 = or i64 %tmp1, 2
+  %tmp3 = or disjoint i64 %tmp1, 2
   %arrayIdx19 = getelementptr inbounds i32, ptr %A, i64 %tmp3
   %gepload20 = load i32, ptr %arrayIdx19, align 4
   %cmp.38 = icmp sgt i32 %gepload20, %t
@@ -153,7 +153,7 @@ ifmerge.34:                                       ; preds = %then.34, %loop.32
   br i1 %or.cond, label %loop.exit, label %ifmerge.38
 
 ifmerge.38:                                       ; preds = %ifmerge.34
-  %tmp4 = or i64 %tmp1, 3
+  %tmp4 = or disjoint i64 %tmp1, 3
   %arrayIdx23 = getelementptr inbounds i32, ptr %A, i64 %tmp4
   %gepload24 = load i32, ptr %arrayIdx23, align 4
   %cmp.42 = icmp sgt i32 %gepload24, %t

diff  --git a/llvm/test/Transforms/LoopUnroll/X86/high-cost-expansion.ll b/llvm/test/Transforms/LoopUnroll/X86/high-cost-expansion.ll
index be7547c097412..d5d4319b041a2 100644
--- a/llvm/test/Transforms/LoopUnroll/X86/high-cost-expansion.ll
+++ b/llvm/test/Transforms/LoopUnroll/X86/high-cost-expansion.ll
@@ -7,7 +7,7 @@ define void @mask-high(i64 %arg, ptr dereferenceable(4) %arg1) {
 ; CHECK-NEXT:    [[I:%.*]] = load i32, ptr [[ARG1:%.*]], align 4
 ; CHECK-NEXT:    [[I2:%.*]] = sext i32 [[I]] to i64
 ; CHECK-NEXT:    [[I3:%.*]] = and i64 [[ARG:%.*]], -16
-; CHECK-NEXT:    [[I4:%.*]] = or i64 1, [[I3]]
+; CHECK-NEXT:    [[I4:%.*]] = or disjoint i64 1, [[I3]]
 ; CHECK-NEXT:    [[I5:%.*]] = icmp sgt i64 [[I4]], [[I2]]
 ; CHECK-NEXT:    br i1 [[I5]], label [[BB10:%.*]], label [[BB6_PREHEADER:%.*]]
 ; CHECK:       bb6.preheader:
@@ -26,7 +26,7 @@ bb:
   %i = load i32, ptr %arg1, align 4
   %i2 = sext i32 %i to i64
   %i3 = and i64 %arg, -16
-  %i4 = or i64 1, %i3
+  %i4 = or disjoint i64 1, %i3
   %i5 = icmp sgt i64 %i4, %i2
   br i1 %i5, label %bb10, label %bb6
 

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
index f045f486eac27..2e994838ff241 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
@@ -556,7 +556,7 @@ define void @fadd_strict_interleave(ptr noalias nocapture readonly %a, ptr noali
 ; CHECK-NOT-VECTORIZED-NEXT:    [[ARRAYIDXB1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
 ; CHECK-NOT-VECTORIZED-NEXT:    [[TMP0:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4
 ; CHECK-NOT-VECTORIZED-NEXT:    [[ADD1]] = fadd float [[TMP0]], [[ADD_PHI2]]
-; CHECK-NOT-VECTORIZED-NEXT:    [[OR:%.*]] = or i64 [[IV]], 1
+; CHECK-NOT-VECTORIZED-NEXT:    [[OR:%.*]] = or disjoint i64 [[IV]], 1
 ; CHECK-NOT-VECTORIZED-NEXT:    [[ARRAYIDXB2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OR]]
 ; CHECK-NOT-VECTORIZED-NEXT:    [[TMP1:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4
 ; CHECK-NOT-VECTORIZED-NEXT:    [[ADD2]] = fadd float [[TMP1]], [[ADD_PHI1]]
@@ -628,7 +628,7 @@ define void @fadd_strict_interleave(ptr noalias nocapture readonly %a, ptr noali
 ; CHECK-UNORDERED-NEXT:    [[ARRAYIDXB1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
 ; CHECK-UNORDERED-NEXT:    [[TMP21:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4
 ; CHECK-UNORDERED-NEXT:    [[ADD1]] = fadd float [[TMP21]], [[ADD_PHI2]]
-; CHECK-UNORDERED-NEXT:    [[OR:%.*]] = or i64 [[IV]], 1
+; CHECK-UNORDERED-NEXT:    [[OR:%.*]] = or disjoint i64 [[IV]], 1
 ; CHECK-UNORDERED-NEXT:    [[ARRAYIDXB2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OR]]
 ; CHECK-UNORDERED-NEXT:    [[TMP22:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4
 ; CHECK-UNORDERED-NEXT:    [[ADD2]] = fadd float [[TMP22]], [[ADD_PHI1]]
@@ -696,7 +696,7 @@ define void @fadd_strict_interleave(ptr noalias nocapture readonly %a, ptr noali
 ; CHECK-ORDERED-NEXT:    [[ARRAYIDXB1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
 ; CHECK-ORDERED-NEXT:    [[TMP17:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4
 ; CHECK-ORDERED-NEXT:    [[ADD1]] = fadd float [[TMP17]], [[ADD_PHI2]]
-; CHECK-ORDERED-NEXT:    [[OR:%.*]] = or i64 [[IV]], 1
+; CHECK-ORDERED-NEXT:    [[OR:%.*]] = or disjoint i64 [[IV]], 1
 ; CHECK-ORDERED-NEXT:    [[ARRAYIDXB2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OR]]
 ; CHECK-ORDERED-NEXT:    [[TMP18:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4
 ; CHECK-ORDERED-NEXT:    [[ADD2]] = fadd float [[TMP18]], [[ADD_PHI1]]
@@ -776,7 +776,7 @@ define void @fadd_strict_interleave(ptr noalias nocapture readonly %a, ptr noali
 ; CHECK-ORDERED-TF-NEXT:    [[ARRAYIDXB1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
 ; CHECK-ORDERED-TF-NEXT:    [[TMP26:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4
 ; CHECK-ORDERED-TF-NEXT:    [[ADD1]] = fadd float [[TMP26]], [[ADD_PHI2]]
-; CHECK-ORDERED-TF-NEXT:    [[OR:%.*]] = or i64 [[IV]], 1
+; CHECK-ORDERED-TF-NEXT:    [[OR:%.*]] = or disjoint i64 [[IV]], 1
 ; CHECK-ORDERED-TF-NEXT:    [[ARRAYIDXB2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OR]]
 ; CHECK-ORDERED-TF-NEXT:    [[TMP27:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4
 ; CHECK-ORDERED-TF-NEXT:    [[ADD2]] = fadd float [[TMP27]], [[ADD_PHI1]]
@@ -807,7 +807,7 @@ for.body:
   %arrayidxb1 = getelementptr inbounds float, ptr %b, i64 %iv
   %0 = load float, ptr %arrayidxb1, align 4
   %add1 = fadd float %0, %add.phi2
-  %or = or i64 %iv, 1
+  %or = or disjoint i64 %iv, 1
   %arrayidxb2 = getelementptr inbounds float, ptr %b, i64 %or
   %1 = load float, ptr %arrayidxb2, align 4
   %add2 = fadd float %1, %add.phi1

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll b/llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll
index 852a5adf5d0e2..33b5273217cdf 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll
@@ -315,7 +315,7 @@ for.body:
   %arrayidxb1 = getelementptr inbounds float, ptr %b, i64 %iv
   %0 = load float, ptr %arrayidxb1, align 4
   %add1 = fadd float %0, %add.phi2
-  %or = or i64 %iv, 1
+  %or = or disjoint i64 %iv, 1
   %arrayidxb2 = getelementptr inbounds float, ptr %b, i64 %or
   %1 = load float, ptr %arrayidxb2, align 4
   %add2 = fadd float %1, %add.phi1

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
index 27b0c95873514..b09960f29f00b 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
@@ -67,7 +67,7 @@ for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %arrayidx0 = getelementptr inbounds [1024 x i32], [1024 x i32]* @AB, i64 0, i64 %indvars.iv
   %load1 = load i32, i32* %arrayidx0, align 4
-  %or = or i64 %indvars.iv, 1
+  %or = or disjoint i64 %indvars.iv, 1
   %arrayidx1 = getelementptr inbounds [1024 x i32], [1024 x i32]* @AB, i64 0, i64 %or
   %load2 = load i32, i32* %arrayidx1, align 4
   %add = add nsw i32 %load1, %C
@@ -156,7 +156,7 @@ for.body:                                         ; preds = %entry, %for.body
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %arrayidx = getelementptr inbounds [1024 x i16], [1024 x i16]* @AB_i16, i64 0, i64 %indvars.iv
   %0 = load i16, i16* %arrayidx, align 2
-  %1 = or i64 %indvars.iv, 1
+  %1 = or disjoint i64 %indvars.iv, 1
   %arrayidx2 = getelementptr inbounds [1024 x i16], [1024 x i16]* @AB_i16, i64 0, i64 %1
   %2 = load i16, i16* %arrayidx2, align 2
   %conv = sext i16 %0 to i32
@@ -248,7 +248,7 @@ for.body:                                         ; preds = %entry, %for.body
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @AB, i64 0, i64 %indvars.iv
   %0 = load i32, i32* %arrayidx, align 4
-  %1 = or i64 %indvars.iv, 1
+  %1 = or disjoint i64 %indvars.iv, 1
   %arrayidx2 = getelementptr inbounds [1024 x i32], [1024 x i32]* @AB, i64 0, i64 %1
   %2 = load i32, i32* %arrayidx2, align 4
   %add3 = add nsw i32 %0, %C
@@ -752,7 +752,7 @@ for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
   %load1 = load i32, i32* %arrayidx, align 4
-  %or = or i64 %indvars.iv, 1
+  %or = or disjoint i64 %indvars.iv, 1
   %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %or
   %load2 = load i32, i32* %arrayidx2, align 4
   %mul = mul nsw i32 %load2, %load1
@@ -1491,17 +1491,17 @@ define void @PR34743(i16* %a, i32* %b, i64 %n) #1 {
 ; CHECK-NEXT:    [[TMP16:%.*]] = add nuw nsw <vscale x 4 x i64> [[VEC_IND]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[TMP17:%.*]] = add nuw nsw <vscale x 4 x i64> [[VEC_IND]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 2, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds i16, ptr [[A]], <vscale x 4 x i64> [[TMP16]]
-; CHECK-NEXT:    [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> [[TMP18]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i16> poison), !alias.scope !34
+; CHECK-NEXT:    [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> [[TMP18]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i16> poison), !alias.scope [[META34:![0-9]+]]
 ; CHECK-NEXT:    [[TMP19:%.*]] = sext <vscale x 4 x i16> [[WIDE_MASKED_GATHER]] to <vscale x 4 x i32>
 ; CHECK-NEXT:    [[TMP20:%.*]] = getelementptr inbounds i16, ptr [[A]], <vscale x 4 x i64> [[TMP17]]
-; CHECK-NEXT:    [[WIDE_MASKED_GATHER4]] = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> [[TMP20]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i16> poison), !alias.scope !34
+; CHECK-NEXT:    [[WIDE_MASKED_GATHER4]] = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> [[TMP20]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i16> poison), !alias.scope [[META34]]
 ; CHECK-NEXT:    [[TMP21:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.splice.nxv4i16(<vscale x 4 x i16> [[VECTOR_RECUR]], <vscale x 4 x i16> [[WIDE_MASKED_GATHER4]], i32 -1)
 ; CHECK-NEXT:    [[TMP22:%.*]] = sext <vscale x 4 x i16> [[TMP21]] to <vscale x 4 x i32>
 ; CHECK-NEXT:    [[TMP23:%.*]] = sext <vscale x 4 x i16> [[WIDE_MASKED_GATHER4]] to <vscale x 4 x i32>
 ; CHECK-NEXT:    [[TMP24:%.*]] = mul nsw <vscale x 4 x i32> [[TMP22]], [[TMP19]]
 ; CHECK-NEXT:    [[TMP25:%.*]] = mul nsw <vscale x 4 x i32> [[TMP24]], [[TMP23]]
 ; CHECK-NEXT:    [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
-; CHECK-NEXT:    store <vscale x 4 x i32> [[TMP25]], ptr [[TMP26]], align 4, !alias.scope !37, !noalias !34
+; CHECK-NEXT:    store <vscale x 4 x i32> [[TMP25]], ptr [[TMP26]], align 4, !alias.scope [[META37:![0-9]+]], !noalias [[META34]]
 ; CHECK-NEXT:    [[TMP27:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP28:%.*]] = shl nuw nsw i64 [[TMP27]], 2
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP28]]

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll
index ae3abba27be5f..56e2fe6e15b70 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll
@@ -179,7 +179,7 @@ if.then:
   %mul = shl nuw nsw i32 %ix.024, 1
   %arrayidx = getelementptr inbounds i8, ptr %p, i32 %mul
   %0 = load i8, ptr %arrayidx, align 1
-  %add = or i32 %mul, 1
+  %add = or disjoint i32 %mul, 1
   %arrayidx4 = getelementptr inbounds i8, ptr %p, i32 %add
   %1 = load i8, ptr %arrayidx4, align 1
   %cmp.i = icmp slt i8 %0, %1
@@ -345,7 +345,7 @@ for.body:
   br i1 %cmp1, label %if.then, label %for.inc
 
 if.then:
-  %add = or i32 %mul, 1
+  %add = or disjoint i32 %mul, 1
   %arrayidx3 = getelementptr inbounds i8, ptr %q, i32 %add
   store i8 2, ptr %arrayidx3, align 1
   br label %for.inc
@@ -532,7 +532,7 @@ if.end:
   br i1 %cmp4, label %if.then6, label %for.inc
 
 if.then6:
-  %add = or i32 %mul, 1
+  %add = or disjoint i32 %mul, 1
   %arrayidx7 = getelementptr inbounds i8, ptr %q, i32 %add
   store i8 2, ptr %arrayidx7, align 1
   br label %for.inc

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-option.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-option.ll
index e3f7cd88848e2..1dfa7f8fe18b9 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-option.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-option.ll
@@ -328,7 +328,7 @@ for.body:                                         ; preds = %entry, %for.body
   %mul1 = mul nuw nsw i64 %i.021, 3
   %arrayidx2 = getelementptr inbounds float, ptr %dst, i64 %mul1
   store float %0, ptr %arrayidx2, align 4
-  %add = or i64 %mul, 1
+  %add = or disjoint i64 %mul, 1
   %arrayidx4 = getelementptr inbounds float, ptr %src, i64 %add
   %1 = load float, ptr %arrayidx4, align 4
   %add6 = add nuw nsw i64 %mul1, 1

diff  --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
index 794c1364356c9..abea0899a601c 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
@@ -1380,7 +1380,7 @@ entry:
 for.body:                                         ; preds = %for.body.preheader, %for.body
   %iv = phi i32 [ %iv.next, %for.body ], [ 0, %entry ]
   %red.phi = phi i32 [ %red.2, %for.body ], [ 0, %entry ]
-  %add = or i32 %iv, 1
+  %add = or disjoint i32 %iv, 1
   %gep.0 = getelementptr inbounds i32, ptr %arr, i32 %add
   %l.0 = load i32, ptr %gep.0, align 4
   %gep.1 = getelementptr inbounds i32, ptr %arr, i32 %iv

diff  --git a/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll b/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll
index 2467d3acbc6c7..1e029ba159044 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll
@@ -181,7 +181,7 @@ for.body:                                         ; preds = %for.body, %entry
   %0 = shl nsw i64 %indvars.iv, 1
   %arrayidx = getelementptr inbounds i32, ptr %b, i64 %0
   %1 = load i32, ptr %arrayidx, align 4
-  %2 = or i64 %0, 1
+  %2 = or disjoint i64 %0, 1
   %arrayidx3 = getelementptr inbounds i32, ptr %b, i64 %2
   %3 = load i32, ptr %arrayidx3, align 4
   %add4 = add nsw i32 %3, %1

diff  --git a/llvm/test/Transforms/LoopVectorize/X86/pr47437.ll b/llvm/test/Transforms/LoopVectorize/X86/pr47437.ll
index 7ace7fd2c099d..4fa65af14270f 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr47437.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr47437.ll
@@ -61,7 +61,7 @@ define void @test_muladd(ptr noalias nocapture %d1, ptr noalias nocapture readon
 ; SSE2-NEXT:    [[TMP18:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2
 ; SSE2-NEXT:    [[CONV5:%.*]] = sext i16 [[TMP18]] to i32
 ; SSE2-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[CONV5]], [[CONV]]
-; SSE2-NEXT:    [[TMP19:%.*]] = or i64 [[TMP16]], 1
+; SSE2-NEXT:    [[TMP19:%.*]] = or disjoint i64 [[TMP16]], 1
 ; SSE2-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i16, ptr [[S1]], i64 [[TMP19]]
 ; SSE2-NEXT:    [[TMP20:%.*]] = load i16, ptr [[ARRAYIDX10]], align 2
 ; SSE2-NEXT:    [[CONV11:%.*]] = sext i16 [[TMP20]] to i32
@@ -74,7 +74,7 @@ define void @test_muladd(ptr noalias nocapture %d1, ptr noalias nocapture readon
 ; SSE2-NEXT:    store i32 [[ADD18]], ptr [[ARRAYIDX20]], align 4
 ; SSE2-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
 ; SSE2-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
-; SSE2-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
+; SSE2-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
 ; SSE2:       for.end.loopexit:
 ; SSE2-NEXT:    br label [[FOR_END]]
 ; SSE2:       for.end:
@@ -157,7 +157,7 @@ define void @test_muladd(ptr noalias nocapture %d1, ptr noalias nocapture readon
 ; SSE41-NEXT:    [[TMP33:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2
 ; SSE41-NEXT:    [[CONV5:%.*]] = sext i16 [[TMP33]] to i32
 ; SSE41-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[CONV5]], [[CONV]]
-; SSE41-NEXT:    [[TMP34:%.*]] = or i64 [[TMP31]], 1
+; SSE41-NEXT:    [[TMP34:%.*]] = or disjoint i64 [[TMP31]], 1
 ; SSE41-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i16, ptr [[S1]], i64 [[TMP34]]
 ; SSE41-NEXT:    [[TMP35:%.*]] = load i16, ptr [[ARRAYIDX10]], align 2
 ; SSE41-NEXT:    [[CONV11:%.*]] = sext i16 [[TMP35]] to i32
@@ -170,7 +170,7 @@ define void @test_muladd(ptr noalias nocapture %d1, ptr noalias nocapture readon
 ; SSE41-NEXT:    store i32 [[ADD18]], ptr [[ARRAYIDX20]], align 4
 ; SSE41-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
 ; SSE41-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
-; SSE41-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
+; SSE41-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
 ; SSE41:       for.end.loopexit:
 ; SSE41-NEXT:    br label [[FOR_END]]
 ; SSE41:       for.end:
@@ -297,7 +297,7 @@ define void @test_muladd(ptr noalias nocapture %d1, ptr noalias nocapture readon
 ; AVX1-NEXT:    [[TMP63:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2
 ; AVX1-NEXT:    [[CONV5:%.*]] = sext i16 [[TMP63]] to i32
 ; AVX1-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[CONV5]], [[CONV]]
-; AVX1-NEXT:    [[TMP64:%.*]] = or i64 [[TMP61]], 1
+; AVX1-NEXT:    [[TMP64:%.*]] = or disjoint i64 [[TMP61]], 1
 ; AVX1-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i16, ptr [[S1]], i64 [[TMP64]]
 ; AVX1-NEXT:    [[TMP65:%.*]] = load i16, ptr [[ARRAYIDX10]], align 2
 ; AVX1-NEXT:    [[CONV11:%.*]] = sext i16 [[TMP65]] to i32
@@ -310,7 +310,7 @@ define void @test_muladd(ptr noalias nocapture %d1, ptr noalias nocapture readon
 ; AVX1-NEXT:    store i32 [[ADD18]], ptr [[ARRAYIDX20]], align 4
 ; AVX1-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
 ; AVX1-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
-; AVX1-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
+; AVX1-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
 ; AVX1:       for.end.loopexit:
 ; AVX1-NEXT:    br label [[FOR_END]]
 ; AVX1:       for.end:
@@ -371,7 +371,7 @@ define void @test_muladd(ptr noalias nocapture %d1, ptr noalias nocapture readon
 ; AVX2-NEXT:    [[TMP18:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2
 ; AVX2-NEXT:    [[CONV5:%.*]] = sext i16 [[TMP18]] to i32
 ; AVX2-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[CONV5]], [[CONV]]
-; AVX2-NEXT:    [[TMP19:%.*]] = or i64 [[TMP16]], 1
+; AVX2-NEXT:    [[TMP19:%.*]] = or disjoint i64 [[TMP16]], 1
 ; AVX2-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i16, ptr [[S1]], i64 [[TMP19]]
 ; AVX2-NEXT:    [[TMP20:%.*]] = load i16, ptr [[ARRAYIDX10]], align 2
 ; AVX2-NEXT:    [[CONV11:%.*]] = sext i16 [[TMP20]] to i32
@@ -384,7 +384,7 @@ define void @test_muladd(ptr noalias nocapture %d1, ptr noalias nocapture readon
 ; AVX2-NEXT:    store i32 [[ADD18]], ptr [[ARRAYIDX20]], align 4
 ; AVX2-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
 ; AVX2-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
-; AVX2-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
+; AVX2-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
 ; AVX2:       for.end.loopexit:
 ; AVX2-NEXT:    br label [[FOR_END]]
 ; AVX2:       for.end:
@@ -408,7 +408,7 @@ for.body:
   %2 = load i16, ptr %arrayidx4, align 2
   %conv5 = sext i16 %2 to i32
   %mul6 = mul nsw i32 %conv5, %conv
-  %3 = or i64 %0, 1
+  %3 = or disjoint i64 %0, 1
   %arrayidx10 = getelementptr inbounds i16, ptr %s1, i64 %3
   %4 = load i16, ptr %arrayidx10, align 2
   %conv11 = sext i16 %4 to i32

diff  --git a/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll b/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll
index 631738b0bd67e..d82d9a2d10cd8 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll
@@ -64,7 +64,7 @@ for.body.us:                                      ; preds = %for.body.us.prehead
 
 for.body5.us.us48.preheader:                      ; preds = %for.body.us
   store i32 8, ptr %arraydecay.us.us.us, align 16
-  %indvars.iv.next66 = or i64 %indvars.iv70, 1
+  %indvars.iv.next66 = or disjoint i64 %indvars.iv70, 1
   %6 = add nsw i64 %4, %indvars.iv.next66
   %arraydecay.us.us55.1 = getelementptr inbounds [10 x i32], ptr %add.ptr.us, i64 %6, i64 0
   store i32 8, ptr %arraydecay.us.us55.1, align 8
@@ -72,7 +72,7 @@ for.body5.us.us48.preheader:                      ; preds = %for.body.us
 
 for.body5.us.us.us.preheader:                     ; preds = %for.body.us
   store i32 7, ptr %arraydecay.us.us.us, align 16
-  %indvars.iv.next73 = or i64 %indvars.iv70, 1
+  %indvars.iv.next73 = or disjoint i64 %indvars.iv70, 1
   %7 = add nsw i64 %4, %indvars.iv.next73
   %arraydecay.us.us.us.1 = getelementptr inbounds [10 x i32], ptr %add.ptr.us, i64 %7, i64 0
   store i32 7, ptr %arraydecay.us.us.us.1, align 8
@@ -101,7 +101,7 @@ for.body:                                         ; preds = %for.body.preheader,
   %9 = add nsw i64 %8, %indvars.iv87
   %arraydecay.us31 = getelementptr inbounds [10 x i32], ptr %add.ptr, i64 %9, i64 0
   store i32 8, ptr %arraydecay.us31, align 16
-  %indvars.iv.next90 = or i64 %indvars.iv87, 1
+  %indvars.iv.next90 = or disjoint i64 %indvars.iv87, 1
   %10 = add nsw i64 %8, %indvars.iv.next90
   %arraydecay.us31.1 = getelementptr inbounds [10 x i32], ptr %add.ptr, i64 %10, i64 0
   store i32 8, ptr %arraydecay.us31.1, align 8

diff  --git a/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll b/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll
index 95b337944853c..aced26d5e29fe 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll
@@ -1446,7 +1446,7 @@ if.then:
   %mul = shl nuw nsw i32 %ix.024, 1
   %arrayidx = getelementptr inbounds i8, ptr %p, i32 %mul
   %0 = load i8, ptr %arrayidx, align 1
-  %add = or i32 %mul, 1
+  %add = or disjoint i32 %mul, 1
   %arrayidx4 = getelementptr inbounds i8, ptr %p, i32 %add
   %1 = load i8, ptr %arrayidx4, align 1
   %cmp.i = icmp slt i8 %0, %1
@@ -2148,7 +2148,7 @@ if.then:
   %mul = shl nuw nsw i32 %ix.024, 1
   %arrayidx = getelementptr inbounds i8, ptr %p, i32 %mul
   %0 = load i8, ptr %arrayidx, align 1
-  %add = or i32 %mul, 1
+  %add = or disjoint i32 %mul, 1
   %arrayidx4 = getelementptr inbounds i8, ptr %p, i32 %add
   %1 = load i8, ptr %arrayidx4, align 1
   %cmp.i = icmp slt i8 %0, %1
@@ -2585,7 +2585,7 @@ if.then:
   %mul = shl nuw nsw i32 %ix.023, 1
   %arrayidx = getelementptr inbounds i8, ptr %p, i32 %mul
   %0 = load i8, ptr %arrayidx, align 1
-  %add = or i32 %mul, 1
+  %add = or disjoint i32 %mul, 1
   %arrayidx3 = getelementptr inbounds i8, ptr %p, i32 %add
   %1 = load i8, ptr %arrayidx3, align 1
   %cmp.i = icmp slt i8 %0, %1
@@ -3014,7 +3014,7 @@ for.body:
   %mul = shl nuw nsw i32 %ix.021, 1
   %arrayidx = getelementptr inbounds i8, ptr %p, i32 %mul
   %0 = load i8, ptr %arrayidx, align 1
-  %add = or i32 %mul, 1
+  %add = or disjoint i32 %mul, 1
   %arrayidx2 = getelementptr inbounds i8, ptr %p, i32 %add
   %1 = load i8, ptr %arrayidx2, align 1
   %cmp.i = icmp slt i8 %0, %1

diff  --git a/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-store-accesses-with-gaps.ll b/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-store-accesses-with-gaps.ll
index 53eab9c649b42..eda074fe84455 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-store-accesses-with-gaps.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-store-accesses-with-gaps.ll
@@ -103,7 +103,7 @@ for.body:
   store i16 %0, ptr %arrayidx2, align 2
   %arrayidx4 = getelementptr inbounds i16, ptr %y, i64 %indvars.iv
   %2 = load i16, ptr %arrayidx4, align 2
-  %3 = or i64 %1, 1
+  %3 = or disjoint i64 %1, 1
   %arrayidx7 = getelementptr inbounds i16, ptr %points, i64 %3
   store i16 %2, ptr %arrayidx7, align 2
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -283,7 +283,7 @@ for.body:
   store i16 %0, ptr %arrayidx2, align 2
   %arrayidx4 = getelementptr inbounds i16, ptr %y, i64 %indvars.iv
   %2 = load i16, ptr %arrayidx4, align 2
-  %3 = or i64 %1, 1
+  %3 = or disjoint i64 %1, 1
   %arrayidx7 = getelementptr inbounds i16, ptr %points, i64 %3
   store i16 %2, ptr %arrayidx7, align 2
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1

diff  --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-masked-group.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-masked-group.ll
index 7d335a8d56f6f..cd4d2d0055ede 100644
--- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-masked-group.ll
+++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-masked-group.ll
@@ -127,7 +127,7 @@ if.then:
   %mul = shl nuw nsw i32 %ix.024, 1
   %arrayidx = getelementptr inbounds i8, ptr %p, i32 %mul
   %0 = load i8, ptr %arrayidx, align 1
-  %add = or i32 %mul, 1
+  %add = or disjoint i32 %mul, 1
   %arrayidx4 = getelementptr inbounds i8, ptr %p, i32 %add
   %1 = load i8, ptr %arrayidx4, align 1
   %cmp.i = icmp slt i8 %0, %1
@@ -163,7 +163,7 @@ for.body:
   br i1 %cmp1, label %if.then, label %for.inc
 
 if.then:
-  %add = or i32 %mul, 1
+  %add = or disjoint i32 %mul, 1
   %arrayidx3 = getelementptr inbounds i8, ptr %q, i32 %add
   store i8 2, ptr %arrayidx3, align 1
   br label %for.inc
@@ -200,7 +200,7 @@ if.end:
   br i1 %cmp4, label %if.then6, label %for.inc
 
 if.then6:
-  %add = or i32 %mul, 1
+  %add = or disjoint i32 %mul, 1
   %arrayidx7 = getelementptr inbounds i8, ptr %q, i32 %add
   store i8 2, ptr %arrayidx7, align 1
   br label %for.inc

diff  --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
index 71a1af78f7c89..187eefbe9b595 100644
--- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
@@ -64,7 +64,7 @@ for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %arrayidx0 = getelementptr inbounds [1024 x i32], ptr @AB, i64 0, i64 %indvars.iv
   %tmp = load i32, ptr %arrayidx0, align 4
-  %tmp1 = or i64 %indvars.iv, 1
+  %tmp1 = or disjoint i64 %indvars.iv, 1
   %arrayidx1 = getelementptr inbounds [1024 x i32], ptr @AB, i64 0, i64 %tmp1
   %tmp2 = load i32, ptr %arrayidx1, align 4
   %add = add nsw i32 %tmp, %C
@@ -700,7 +700,7 @@ for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
   %tmp = load i32, ptr %arrayidx, align 4
-  %tmp1 = or i64 %indvars.iv, 1
+  %tmp1 = or disjoint i64 %indvars.iv, 1
   %arrayidx2 = getelementptr inbounds i32, ptr %A, i64 %tmp1
   %tmp2 = load i32, ptr %arrayidx2, align 4
   %mul = mul nsw i32 %tmp2, %tmp
@@ -1510,7 +1510,7 @@ define void @PR34743(ptr %a, ptr %b, i64 %n) {
 ; CHECK-NEXT:    [[TMP12:%.*]] = mul nsw <4 x i32> [[TMP9]], [[TMP10]]
 ; CHECK-NEXT:    [[TMP13:%.*]] = mul nsw <4 x i32> [[TMP12]], [[TMP11]]
 ; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
-; CHECK-NEXT:    store <4 x i32> [[TMP13]], ptr [[TMP14]], align 4, !alias.scope !36, !noalias !39
+; CHECK-NEXT:    store <4 x i32> [[TMP13]], ptr [[TMP14]], align 4, !alias.scope [[META36:![0-9]+]], !noalias [[META39:![0-9]+]]
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
 ; CHECK-NEXT:    [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
 ; CHECK-NEXT:    br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]]

diff  --git a/llvm/test/Transforms/LoopVectorize/pr39099.ll b/llvm/test/Transforms/LoopVectorize/pr39099.ll
index 9c2eb3d1ebe4e..ff1e46e35607f 100644
--- a/llvm/test/Transforms/LoopVectorize/pr39099.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr39099.ll
@@ -27,7 +27,7 @@ if.then:
   %arrayidx4 = getelementptr inbounds i8, ptr %q, i32 %mul
   store i8 %0, ptr %arrayidx4, align 1
   %sub = sub i8 0, %0
-  %add = or i32 %mul, 1
+  %add = or disjoint i32 %mul, 1
   %arrayidx8 = getelementptr inbounds i8, ptr %q, i32 %add
   store i8 %sub, ptr %arrayidx8, align 1
   br label %for.inc

diff  --git a/llvm/test/Transforms/LoopVectorize/reduction-with-invariant-store.ll b/llvm/test/Transforms/LoopVectorize/reduction-with-invariant-store.ll
index 1cbe9d70aec2f..3d951da9feb27 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-with-invariant-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-with-invariant-store.ll
@@ -233,7 +233,7 @@ for.body:
   %0 = load i32, ptr %gep.src, align 4
   %sum.1 = add nsw i32 %0, %sum
   store i32 %sum.1, ptr %gep.dst, align 4
-  %1 = or i64 %iv, 1
+  %1 = or disjoint i64 %iv, 1
   %gep.src.1 = getelementptr inbounds i32, ptr %src, i64 %1
   %2 = load i32, ptr %gep.src.1, align 4
   %sum.2 = add nsw i32 %2, %sum.1
@@ -302,7 +302,7 @@ for.body:
   %0 = load i32, ptr %arrayidx, align 4
   %sum.1 = add nsw i32 %0, %sum
   store i32 %sum.1, ptr %gep.dst, align 4
-  %1 = or i64 %iv, 1
+  %1 = or disjoint i64 %iv, 1
   %arrayidx4 = getelementptr inbounds i32, ptr %src, i64 %1
   %2 = load i32, ptr %arrayidx4, align 4
   %sum.2 = add nsw i32 %2, %sum.1
@@ -349,7 +349,7 @@ predicated:                                       ; preds = %for.body
   br label %latch
 
 latch:                                            ; preds = %predicated, %for.body
-  %1 = or i64 %iv, 1
+  %1 = or disjoint i64 %iv, 1
   %gep.src.1 = getelementptr inbounds i32, ptr %src, i64 %1
   %2 = load i32, ptr %gep.src.1, align 4
   %sum.2 = add nsw i32 %2, %sum.1
@@ -384,7 +384,7 @@ for.body:                                         ; preds = %latch, %entry
   %0 = load i32, ptr %arrayidx, align 4
   %sum.1 = add nsw i32 %0, %sum
   store i32 %sum.1, ptr %gep.dst, align 4
-  %1 = or i64 %iv, 1
+  %1 = or disjoint i64 %iv, 1
   %gep.src.1 = getelementptr inbounds i32, ptr %src, i64 %1
   %2 = load i32, ptr %gep.src.1, align 4
   %sum.2 = add nsw i32 %2, %sum.1

diff  --git a/llvm/test/Transforms/LoopVectorize/unroll_nonlatch.ll b/llvm/test/Transforms/LoopVectorize/unroll_nonlatch.ll
index ba02ddd1e5e23..b721d2184bcc1 100644
--- a/llvm/test/Transforms/LoopVectorize/unroll_nonlatch.ll
+++ b/llvm/test/Transforms/LoopVectorize/unroll_nonlatch.ll
@@ -16,23 +16,23 @@ define void @test(ptr %data) {
 ; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
 ; CHECK:       vector.body:
 ; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT:    [[INDUCTION:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT:    [[INDUCTION1:%.*]] = add i64 [[INDEX]], 1
-; CHECK-NEXT:    [[TMP0:%.*]] = shl nuw nsw i64 [[INDUCTION]], 1
-; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[INDUCTION1]], 1
-; CHECK-NEXT:    [[TMP2:%.*]] = or i64 [[TMP0]], 1
-; CHECK-NEXT:    [[TMP3:%.*]] = or i64 [[TMP1]], 1
-; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds double, ptr [[DATA:%.*]], i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds double, ptr [[DATA]], i64 [[TMP3]]
-; CHECK-NEXT:    [[TMP6:%.*]] = load double, ptr [[TMP4]], align 8
-; CHECK-NEXT:    [[TMP7:%.*]] = load double, ptr [[TMP5]], align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = fneg double [[TMP6]]
-; CHECK-NEXT:    [[TMP9:%.*]] = fneg double [[TMP7]]
-; CHECK-NEXT:    store double [[TMP8]], ptr [[TMP4]], align 8
-; CHECK-NEXT:    store double [[TMP9]], ptr [[TMP5]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 [[INDEX]], 1
+; CHECK-NEXT:    [[TMP2:%.*]] = shl nuw nsw i64 [[TMP0]], 1
+; CHECK-NEXT:    [[TMP3:%.*]] = shl nuw nsw i64 [[TMP1]], 1
+; CHECK-NEXT:    [[TMP4:%.*]] = or disjoint i64 [[TMP2]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = or disjoint i64 [[TMP3]], 1
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds double, ptr [[DATA:%.*]], i64 [[TMP4]]
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds double, ptr [[DATA]], i64 [[TMP5]]
+; CHECK-NEXT:    [[TMP8:%.*]] = load double, ptr [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP9:%.*]] = load double, ptr [[TMP7]], align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = fneg double [[TMP8]]
+; CHECK-NEXT:    [[TMP11:%.*]] = fneg double [[TMP9]]
+; CHECK-NEXT:    store double [[TMP10]], ptr [[TMP6]], align 8
+; CHECK-NEXT:    store double [[TMP11]], ptr [[TMP7]], align 8
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
-; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1022
-; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT:    [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1022
+; CHECK-NEXT:    br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
 ; CHECK:       middle.block:
 ; CHECK-NEXT:    br label [[SCALAR_PH]]
 ; CHECK:       scalar.ph:
@@ -45,12 +45,12 @@ define void @test(ptr %data) {
 ; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_LATCH]]
 ; CHECK:       for.latch:
 ; CHECK-NEXT:    [[T15:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT:    [[T16:%.*]] = or i64 [[T15]], 1
+; CHECK-NEXT:    [[T16:%.*]] = or disjoint i64 [[T15]], 1
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[DATA]], i64 [[T16]]
 ; CHECK-NEXT:    [[T17:%.*]] = load double, ptr [[ARRAYIDX]], align 8
 ; CHECK-NEXT:    [[FNEG:%.*]] = fneg double [[T17]]
 ; CHECK-NEXT:    store double [[FNEG]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT:    br label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
+; CHECK-NEXT:    br label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
 ; CHECK:       for.end:
 ; CHECK-NEXT:    ret void
 ;
@@ -65,7 +65,7 @@ for.body:
 
 for.latch:
   %t15 = shl nuw nsw i64 %indvars.iv, 1
-  %t16 = or i64 %t15, 1
+  %t16 = or disjoint i64 %t15, 1
   %arrayidx = getelementptr inbounds double, ptr %data, i64 %t16
   %t17 = load double, ptr %arrayidx, align 8
   %fneg = fneg double %t17

diff  --git a/llvm/test/Transforms/SLPVectorizer/AArch64/loadorder.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/loadorder.ll
index 70eb21fcbf123..efe8bd9e610bb 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/loadorder.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/loadorder.ll
@@ -205,7 +205,7 @@ entry:
   %idxprom7 = sext i32 %mul to i64
   %arrayidx8 = getelementptr inbounds i16, ptr %x, i64 %idxprom7
   %4 = load i16, ptr %arrayidx8, align 2
-  %add10 = or i32 %mul, 1
+  %add10 = or disjoint i32 %mul, 1
   %idxprom11 = sext i32 %add10 to i64
   %arrayidx12 = getelementptr inbounds i16, ptr %x, i64 %idxprom11
   %5 = load i16, ptr %arrayidx12, align 2
@@ -754,7 +754,7 @@ entry:
   %idxprom11 = sext i32 %mul to i64
   %arrayidx12 = getelementptr inbounds i32, ptr %x, i64 %idxprom11
   %5 = load i32, ptr %arrayidx12, align 4
-  %add14 = or i32 %mul, 1
+  %add14 = or disjoint i32 %mul, 1
   %idxprom15 = sext i32 %add14 to i64
   %arrayidx16 = getelementptr inbounds i32, ptr %x, i64 %idxprom15
   %6 = load i32, ptr %arrayidx16, align 4

diff  --git a/llvm/test/Transforms/SLPVectorizer/NVPTX/v2f16.ll b/llvm/test/Transforms/SLPVectorizer/NVPTX/v2f16.ll
index 712e847a49b17..5132e49181169 100644
--- a/llvm/test/Transforms/SLPVectorizer/NVPTX/v2f16.ll
+++ b/llvm/test/Transforms/SLPVectorizer/NVPTX/v2f16.ll
@@ -10,10 +10,10 @@ define void @fusion(ptr noalias nocapture align 256 dereferenceable(19267584) %a
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i32 [[TMP5]] to i64
 ; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds half, ptr [[ARG1:%.*]], i64 [[TMP6]]
 ; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds half, ptr [[ARG:%.*]], i64 [[TMP6]]
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x half>, ptr [[TMP11]], align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = fmul fast <2 x half> [[TMP2]], <half 0xH5380, half 0xH5380>
-; CHECK-NEXT:    [[TMP4:%.*]] = fadd fast <2 x half> [[TMP3]], <half 0xH57F0, half 0xH57F0>
-; CHECK-NEXT:    store <2 x half> [[TMP4]], ptr [[TMP16]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x half>, ptr [[TMP11]], align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = fmul fast <2 x half> [[TMP1]], <half 0xH5380, half 0xH5380>
+; CHECK-NEXT:    [[TMP3:%.*]] = fadd fast <2 x half> [[TMP2]], <half 0xH57F0, half 0xH57F0>
+; CHECK-NEXT:    store <2 x half> [[TMP3]], ptr [[TMP16]], align 8
 ; CHECK-NEXT:    ret void
 ;
 ; NOVECTOR-LABEL: @fusion(
@@ -21,7 +21,7 @@ define void @fusion(ptr noalias nocapture align 256 dereferenceable(19267584) %a
 ; NOVECTOR-NEXT:    [[TMP4:%.*]] = or i32 [[TMP]], [[ARG3:%.*]]
 ; NOVECTOR-NEXT:    [[TMP5:%.*]] = shl nuw nsw i32 [[TMP4]], 2
 ; NOVECTOR-NEXT:    [[TMP6:%.*]] = zext i32 [[TMP5]] to i64
-; NOVECTOR-NEXT:    [[TMP7:%.*]] = or i64 [[TMP6]], 1
+; NOVECTOR-NEXT:    [[TMP7:%.*]] = or disjoint i64 [[TMP6]], 1
 ; NOVECTOR-NEXT:    [[TMP11:%.*]] = getelementptr inbounds half, ptr [[ARG1:%.*]], i64 [[TMP6]]
 ; NOVECTOR-NEXT:    [[TMP12:%.*]] = load half, ptr [[TMP11]], align 8
 ; NOVECTOR-NEXT:    [[TMP13:%.*]] = fmul fast half [[TMP12]], 0xH5380
@@ -40,7 +40,7 @@ define void @fusion(ptr noalias nocapture align 256 dereferenceable(19267584) %a
   %tmp4 = or i32 %tmp, %arg3
   %tmp5 = shl nuw nsw i32 %tmp4, 2
   %tmp6 = zext i32 %tmp5 to i64
-  %tmp7 = or i64 %tmp6, 1
+  %tmp7 = or disjoint i64 %tmp6, 1
   %tmp11 = getelementptr inbounds half, ptr %arg1, i64 %tmp6
   %tmp12 = load half, ptr %tmp11, align 8
   %tmp13 = fmul fast half %tmp12, 0xH5380

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll b/llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll
index 2b27a7aed1be2..22cba328b180a 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll
@@ -11,14 +11,14 @@ define void @reduce_compare(ptr nocapture %A, i32 %n) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CONV:%.*]] = sitofp i32 [[N:%.*]] to double
 ; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> poison, double [[CONV]], i32 0
-; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> poison, <2 x i32> zeroinitializer
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; CHECK-NEXT:    [[TMP1:%.*]] = shl nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[TMP1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = shl nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[TMP2]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> [[SHUFFLE]], [[TMP3]]
+; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> [[TMP1]], [[TMP3]]
 ; CHECK-NEXT:    [[TMP5:%.*]] = fmul <2 x double> [[TMP4]], <double 7.000000e+00, double 4.000000e+00>
 ; CHECK-NEXT:    [[TMP6:%.*]] = fadd <2 x double> [[TMP5]], <double 5.000000e+00, double 9.000000e+00>
 ; CHECK-NEXT:    [[TMP7:%.*]] = extractelement <2 x double> [[TMP6]], i32 0
@@ -48,7 +48,7 @@ for.body:                                         ; preds = %for.inc, %entry
   %mul1 = fmul double %conv, %1
   %mul2 = fmul double %mul1, 7.000000e+00
   %add = fadd double %mul2, 5.000000e+00
-  %2 = or i64 %0, 1
+  %2 = or disjoint i64 %0, 1
   %arrayidx6 = getelementptr inbounds double, ptr %A, i64 %2
   %3 = load double, ptr %arrayidx6, align 8
   %mul8 = fmul double %conv, %3

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/consecutive-access.ll b/llvm/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
index 63eb5a4e07ba1..2e733930650ea 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
@@ -18,17 +18,17 @@ define void @foo_3double(i32 %u) #0 {
 ; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], ptr @A, i32 0, i64 [[IDXPROM]]
 ; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], ptr @B, i32 0, i64 [[IDXPROM]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
-; CHECK-NEXT:    store <2 x double> [[TMP4]], ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = fadd <2 x double> [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    store <2 x double> [[TMP2]], ptr [[ARRAYIDX]], align 8
 ; CHECK-NEXT:    [[ADD24:%.*]] = add nsw i32 [[MUL]], 2
 ; CHECK-NEXT:    [[IDXPROM25:%.*]] = sext i32 [[ADD24]] to i64
 ; CHECK-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds [2000 x double], ptr @A, i32 0, i64 [[IDXPROM25]]
-; CHECK-NEXT:    [[TMP6:%.*]] = load double, ptr [[ARRAYIDX26]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load double, ptr [[ARRAYIDX26]], align 8
 ; CHECK-NEXT:    [[ARRAYIDX30:%.*]] = getelementptr inbounds [2000 x double], ptr @B, i32 0, i64 [[IDXPROM25]]
-; CHECK-NEXT:    [[TMP7:%.*]] = load double, ptr [[ARRAYIDX30]], align 8
-; CHECK-NEXT:    [[ADD31:%.*]] = fadd double [[TMP6]], [[TMP7]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load double, ptr [[ARRAYIDX30]], align 8
+; CHECK-NEXT:    [[ADD31:%.*]] = fadd double [[TMP3]], [[TMP4]]
 ; CHECK-NEXT:    store double [[ADD31]], ptr [[ARRAYIDX26]], align 8
 ; CHECK-NEXT:    ret void
 ;
@@ -75,10 +75,10 @@ define void @foo_2double(i32 %u) #0 {
 ; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], ptr @A, i32 0, i64 [[IDXPROM]]
 ; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], ptr @B, i32 0, i64 [[IDXPROM]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
-; CHECK-NEXT:    store <2 x double> [[TMP4]], ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = fadd <2 x double> [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    store <2 x double> [[TMP2]], ptr [[ARRAYIDX]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -114,10 +114,10 @@ define void @foo_4float(i32 %u) #0 {
 ; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x float], ptr @C, i32 0, i64 [[IDXPROM]]
 ; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x float], ptr @D, i32 0, i64 [[IDXPROM]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x float>, ptr [[ARRAYIDX4]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = fadd <4 x float> [[TMP1]], [[TMP3]]
-; CHECK-NEXT:    store <4 x float> [[TMP4]], ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[ARRAYIDX4]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = fadd <4 x float> [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    store <4 x float> [[TMP2]], ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -181,11 +181,11 @@ define i32 @foo_loop(ptr %A, i32 %n) #0 {
 ; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP0]], 2
 ; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IDXPROM]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> <double 7.000000e+00, double 7.000000e+00>, [[TMP3]]
-; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x double> [[TMP4]], i32 0
-; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x double> [[TMP4]], i32 1
-; CHECK-NEXT:    [[ADD6:%.*]] = fadd double [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = fmul <2 x double> <double 7.000000e+00, double 7.000000e+00>, [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x double> [[TMP3]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x double> [[TMP3]], i32 1
+; CHECK-NEXT:    [[ADD6:%.*]] = fadd double [[TMP4]], [[TMP5]]
 ; CHECK-NEXT:    [[ADD7]] = fadd double [[TMP1]], [[ADD6]]
 ; CHECK-NEXT:    store double [[ADD7]], ptr [[SUM]], align 8
 ; CHECK-NEXT:    [[INC]] = add nsw i32 [[TMP0]], 1
@@ -259,10 +259,10 @@ define void @foo_2double_non_power_of_2(i32 %u) #0 {
 ; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[ADD6]] to i64
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], ptr @A, i32 0, i64 [[IDXPROM]]
 ; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], ptr @B, i32 0, i64 [[IDXPROM]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
-; CHECK-NEXT:    store <2 x double> [[TMP4]], ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = fadd <2 x double> [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    store <2 x double> [[TMP2]], ptr [[ARRAYIDX]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -300,10 +300,10 @@ define void @foo_2double_non_power_of_2_zext(i32 %u) #0 {
 ; CHECK-NEXT:    [[IDXPROM:%.*]] = zext i32 [[ADD6]] to i64
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], ptr @A, i32 0, i64 [[IDXPROM]]
 ; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], ptr @B, i32 0, i64 [[IDXPROM]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
-; CHECK-NEXT:    store <2 x double> [[TMP4]], ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = fadd <2 x double> [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    store <2 x double> [[TMP2]], ptr [[ARRAYIDX]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -355,11 +355,11 @@ define i32 @foo_loop_non_power_of_2(ptr %A, i32 %n) #0 {
 ; CHECK-NEXT:    [[ADD_5:%.*]] = add i32 [[MUL]], 5
 ; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[ADD_5]] to i64
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IDXPROM]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> <double 7.000000e+00, double 7.000000e+00>, [[TMP3]]
-; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x double> [[TMP4]], i32 0
-; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x double> [[TMP4]], i32 1
-; CHECK-NEXT:    [[ADD6:%.*]] = fadd double [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = fmul <2 x double> <double 7.000000e+00, double 7.000000e+00>, [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x double> [[TMP3]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x double> [[TMP3]], i32 1
+; CHECK-NEXT:    [[ADD6:%.*]] = fadd double [[TMP4]], [[TMP5]]
 ; CHECK-NEXT:    [[ADD7]] = fadd double [[TMP1]], [[ADD6]]
 ; CHECK-NEXT:    store double [[ADD7]], ptr [[SUM]], align 8
 ; CHECK-NEXT:    [[INC]] = add i32 [[TMP0]], 1
@@ -445,18 +445,18 @@ define double @bar(ptr nocapture readonly %a, i32 %n) local_unnamed_addr #0 {
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp eq i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY:%.*]]
 ; CHECK:       for.cond.cleanup:
-; CHECK-NEXT:    [[TMP0:%.*]] = phi <2 x double> [ zeroinitializer, [[ENTRY:%.*]] ], [ [[TMP6:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = phi <2 x double> [ zeroinitializer, [[ENTRY:%.*]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x double> [[TMP0]], i32 0
 ; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x double> [[TMP0]], i32 1
 ; CHECK-NEXT:    [[MUL:%.*]] = fmul double [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    ret double [[MUL]]
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[I_018:%.*]] = phi i32 [ [[ADD5:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
-; CHECK-NEXT:    [[TMP3:%.*]] = phi <2 x double> [ [[TMP6]], [[FOR_BODY]] ], [ zeroinitializer, [[ENTRY]] ]
+; CHECK-NEXT:    [[TMP3:%.*]] = phi <2 x double> [ [[TMP5]], [[FOR_BODY]] ], [ zeroinitializer, [[ENTRY]] ]
 ; CHECK-NEXT:    [[IDXPROM:%.*]] = zext i32 [[I_018]] to i64
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[IDXPROM]]
-; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT:    [[TMP6]] = fadd <2 x double> [[TMP3]], [[TMP5]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT:    [[TMP5]] = fadd <2 x double> [[TMP3]], [[TMP4]]
 ; CHECK-NEXT:    [[ADD5]] = add i32 [[I_018]], 2
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD5]], [[N]]
 ; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP]]
@@ -479,7 +479,7 @@ for.body:                                         ; preds = %entry, %for.body
   %arrayidx = getelementptr inbounds double, ptr %a, i64 %idxprom
   %0 = load double, ptr %arrayidx, align 8
   %add = fadd double %x.016, %0
-  %add1 = or i32 %i.018, 1
+  %add1 = or disjoint i32 %i.018, 1
   %idxprom2 = zext i32 %add1 to i64
   %arrayidx3 = getelementptr inbounds double, ptr %a, i64 %idxprom2
   %1 = load double, ptr %arrayidx3, align 8

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/hoist.ll b/llvm/test/Transforms/SLPVectorizer/X86/hoist.ll
index 8290cfee3c3c8..65ede4baf6519 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/hoist.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/hoist.ll
@@ -18,13 +18,13 @@ define i32 @foo(ptr nocapture %A, i32 %n, i32 %k) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i32 0
 ; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[K:%.*]], i32 1
-; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[I_024:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD10:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I_024]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = add nsw <4 x i32> [[TMP3]], [[SHUFFLE]]
+; CHECK-NEXT:    [[TMP4:%.*]] = add nsw <4 x i32> [[TMP3]], [[TMP2]]
 ; CHECK-NEXT:    store <4 x i32> [[TMP4]], ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    [[ADD10]] = add nsw i32 [[I_024]], 4
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[ADD10]], 10000
@@ -41,17 +41,17 @@ for.body:                                         ; preds = %entry, %for.body
   %0 = load i32, ptr %arrayidx, align 4
   %add = add nsw i32 %0, %n
   store i32 %add, ptr %arrayidx, align 4
-  %add121 = or i32 %i.024, 1
+  %add121 = or disjoint i32 %i.024, 1
   %arrayidx2 = getelementptr inbounds i32, ptr %A, i32 %add121
   %1 = load i32, ptr %arrayidx2, align 4
   %add3 = add nsw i32 %1, %k
   store i32 %add3, ptr %arrayidx2, align 4
-  %add422 = or i32 %i.024, 2
+  %add422 = or disjoint i32 %i.024, 2
   %arrayidx5 = getelementptr inbounds i32, ptr %A, i32 %add422
   %2 = load i32, ptr %arrayidx5, align 4
   %add6 = add nsw i32 %2, %n
   store i32 %add6, ptr %arrayidx5, align 4
-  %add723 = or i32 %i.024, 3
+  %add723 = or disjoint i32 %i.024, 3
   %arrayidx8 = getelementptr inbounds i32, ptr %A, i32 %add723
   %3 = load i32, ptr %arrayidx8, align 4
   %add9 = add nsw i32 %3, %k

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll b/llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll
index 107237a8348f2..197ffd11b245f 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll
@@ -28,10 +28,10 @@ define i32 @add_red(ptr %A, i32 %n) {
 ; CHECK-NEXT:    [[SUM_032:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY_LR_PH]] ], [ [[ADD17:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[MUL:%.*]] = shl nsw i64 [[I_033]], 2
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[MUL]]
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = fmul <4 x float> [[TMP2]], <float 7.000000e+00, float 7.000000e+00, float 7.000000e+00, float 7.000000e+00>
-; CHECK-NEXT:    [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP3]])
-; CHECK-NEXT:    [[ADD17]] = fadd fast float [[SUM_032]], [[TMP4]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = fmul <4 x float> [[TMP1]], <float 7.000000e+00, float 7.000000e+00, float 7.000000e+00, float 7.000000e+00>
+; CHECK-NEXT:    [[TMP3:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP2]])
+; CHECK-NEXT:    [[ADD17]] = fadd fast float [[SUM_032]], [[TMP3]]
 ; CHECK-NEXT:    [[INC]] = add nsw i64 [[I_033]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[TMP0]]
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]], label [[FOR_BODY]]
@@ -57,17 +57,17 @@ for.body:
   %arrayidx = getelementptr inbounds float, ptr %A, i64 %mul
   %1 = load float, ptr %arrayidx, align 4
   %mul2 = fmul float %1, 7.000000e+00
-  %add28 = or i64 %mul, 1
+  %add28 = or disjoint i64 %mul, 1
   %arrayidx4 = getelementptr inbounds float, ptr %A, i64 %add28
   %2 = load float, ptr %arrayidx4, align 4
   %mul5 = fmul float %2, 7.000000e+00
   %add6 = fadd fast float %mul2, %mul5
-  %add829 = or i64 %mul, 2
+  %add829 = or disjoint i64 %mul, 2
   %arrayidx9 = getelementptr inbounds float, ptr %A, i64 %add829
   %3 = load float, ptr %arrayidx9, align 4
   %mul10 = fmul float %3, 7.000000e+00
   %add11 = fadd fast float %add6, %mul10
-  %add1330 = or i64 %mul, 3
+  %add1330 = or disjoint i64 %mul, 3
   %arrayidx14 = getelementptr inbounds float, ptr %A, i64 %add1330
   %4 = load float, ptr %arrayidx14, align 4
   %mul15 = fmul float %4, 7.000000e+00
@@ -103,20 +103,20 @@ define i32 @mul_red(ptr noalias %A, ptr noalias %B, i32 %n) {
 ; CHECK-NEXT:    [[CMP38:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP38]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
 ; CHECK:       for.body.lr.ph:
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[N]] to i64
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[N]] to i64
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[I_040:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[SUM_039:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY_LR_PH]] ], [ [[MUL21:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[MUL:%.*]] = shl nsw i64 [[I_040]], 2
 ; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[MUL]]
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x float>, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT:    [[TMP5:%.*]] = fmul <4 x float> [[TMP1]], [[TMP4]]
-; CHECK-NEXT:    [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP5]])
-; CHECK-NEXT:    [[MUL21]] = fmul float [[SUM_039]], [[TMP6]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = fmul <4 x float> [[TMP0]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP3]])
+; CHECK-NEXT:    [[MUL21]] = fmul float [[SUM_039]], [[TMP4]]
 ; CHECK-NEXT:    [[INC]] = add nsw i64 [[I_040]], 1
-; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[TMP2]]
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[TMP1]]
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]], label [[FOR_BODY]]
 ; CHECK:       for.cond.for.end_crit_edge:
 ; CHECK-NEXT:    [[PHITMP:%.*]] = fptosi float [[MUL21]] to i32
@@ -147,17 +147,17 @@ for.body:
   %arrayidx2 = getelementptr inbounds float, ptr %A, i64 %mul
   %5 = load float, ptr %arrayidx2, align 4
   %mul3 = fmul float %0, %5
-  %add35 = or i64 %mul, 1
+  %add35 = or disjoint i64 %mul, 1
   %arrayidx6 = getelementptr inbounds float, ptr %A, i64 %add35
   %6 = load float, ptr %arrayidx6, align 4
   %mul7 = fmul float %1, %6
   %add8 = fadd fast float %mul3, %mul7
-  %add1136 = or i64 %mul, 2
+  %add1136 = or disjoint i64 %mul, 2
   %arrayidx12 = getelementptr inbounds float, ptr %A, i64 %add1136
   %7 = load float, ptr %arrayidx12, align 4
   %mul13 = fmul float %2, %7
   %add14 = fadd fast float %add8, %mul13
-  %add1737 = or i64 %mul, 3
+  %add1737 = or disjoint i64 %mul, 3
   %arrayidx18 = getelementptr inbounds float, ptr %A, i64 %add1737
   %8 = load float, ptr %arrayidx18, align 4
   %mul19 = fmul float %3, %8
@@ -198,27 +198,27 @@ define i32 @long_red(ptr noalias %A, ptr noalias %B, i32 %n) {
 ; CHECK-NEXT:    [[CMP81:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP81]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
 ; CHECK:       for.body.lr.ph:
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x float>, ptr [[B:%.*]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x float>, ptr [[B:%.*]], align 4
 ; CHECK-NEXT:    [[ARRAYIDX45:%.*]] = getelementptr inbounds float, ptr [[B]], i64 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load float, ptr [[ARRAYIDX45]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = sext i32 [[N]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[ARRAYIDX45]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[N]] to i64
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[I_083:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[SUM_082:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY_LR_PH]] ], [ [[ADD51:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i64 [[I_083]], 6
 ; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[MUL]]
-; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x float>, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast <8 x float> [[TMP1]], [[TMP5]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x float>, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = fmul fast <8 x float> [[TMP0]], [[TMP3]]
 ; CHECK-NEXT:    [[ADD47:%.*]] = add nsw i64 [[MUL]], 8
 ; CHECK-NEXT:    [[ARRAYIDX48:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[ADD47]]
-; CHECK-NEXT:    [[TMP7:%.*]] = load float, ptr [[ARRAYIDX48]], align 4
-; CHECK-NEXT:    [[MUL49:%.*]] = fmul fast float [[TMP2]], [[TMP7]]
-; CHECK-NEXT:    [[TMP8:%.*]] = call fast float @llvm.vector.reduce.fadd.v8f32(float -0.000000e+00, <8 x float> [[TMP6]])
-; CHECK-NEXT:    [[OP_RDX:%.*]] = fadd fast float [[TMP8]], [[MUL49]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load float, ptr [[ARRAYIDX48]], align 4
+; CHECK-NEXT:    [[MUL49:%.*]] = fmul fast float [[TMP1]], [[TMP5]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fadd.v8f32(float -0.000000e+00, <8 x float> [[TMP4]])
+; CHECK-NEXT:    [[OP_RDX:%.*]] = fadd fast float [[TMP6]], [[MUL49]]
 ; CHECK-NEXT:    [[ADD51]] = fadd fast float [[SUM_082]], [[OP_RDX]]
 ; CHECK-NEXT:    [[INC]] = add nsw i64 [[I_083]], 1
-; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[TMP3]]
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[TMP2]]
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]], label [[FOR_BODY]]
 ; CHECK:       for.cond.for.end_crit_edge:
 ; CHECK-NEXT:    [[PHITMP:%.*]] = fptosi float [[ADD51]] to i32
@@ -259,7 +259,7 @@ for.body:
   %arrayidx2 = getelementptr inbounds float, ptr %A, i64 %mul
   %10 = load float, ptr %arrayidx2, align 4
   %mul3 = fmul fast float %0, %10
-  %add80 = or i64 %mul, 1
+  %add80 = or disjoint i64 %mul, 1
   %arrayidx6 = getelementptr inbounds float, ptr %A, i64 %add80
   %11 = load float, ptr %arrayidx6, align 4
   %mul7 = fmul fast float %1, %11
@@ -330,20 +330,20 @@ define i32 @chain_red(ptr noalias %A, ptr noalias %B, i32 %n) {
 ; CHECK-NEXT:    [[CMP41:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP41]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
 ; CHECK:       for.body.lr.ph:
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[N]] to i64
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[N]] to i64
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[I_043:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[SUM_042:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY_LR_PH]] ], [ [[OP_RDX:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[MUL:%.*]] = shl nsw i64 [[I_043]], 2
 ; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[MUL]]
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x float>, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT:    [[TMP5:%.*]] = fmul fast <4 x float> [[TMP1]], [[TMP4]]
-; CHECK-NEXT:    [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP5]])
-; CHECK-NEXT:    [[OP_RDX]] = fadd fast float [[TMP6]], [[SUM_042]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = fmul fast <4 x float> [[TMP0]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP3]])
+; CHECK-NEXT:    [[OP_RDX]] = fadd fast float [[TMP4]], [[SUM_042]]
 ; CHECK-NEXT:    [[INC]] = add nsw i64 [[I_043]], 1
-; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[TMP2]]
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[TMP1]]
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]], label [[FOR_BODY]]
 ; CHECK:       for.cond.for.end_crit_edge:
 ; CHECK-NEXT:    [[PHITMP:%.*]] = fptosi float [[OP_RDX]] to i32
@@ -375,17 +375,17 @@ for.body:
   %5 = load float, ptr %arrayidx2, align 4
   %mul3 = fmul fast float %0, %5
   %add = fadd fast float %sum.042, %mul3
-  %add638 = or i64 %mul, 1
+  %add638 = or disjoint i64 %mul, 1
   %arrayidx7 = getelementptr inbounds float, ptr %A, i64 %add638
   %6 = load float, ptr %arrayidx7, align 4
   %mul8 = fmul fast float %1, %6
   %add9 = fadd fast float %add, %mul8
-  %add1239 = or i64 %mul, 2
+  %add1239 = or disjoint i64 %mul, 2
   %arrayidx13 = getelementptr inbounds float, ptr %A, i64 %add1239
   %7 = load float, ptr %arrayidx13, align 4
   %mul14 = fmul fast float %2, %7
   %add15 = fadd fast float %add9, %mul14
-  %add1840 = or i64 %mul, 3
+  %add1840 = or disjoint i64 %mul, 3
   %arrayidx19 = getelementptr inbounds float, ptr %A, i64 %add1840
   %8 = load float, ptr %arrayidx19, align 4
   %mul20 = fmul fast float %3, %8
@@ -441,13 +441,13 @@ define void @foo(ptr nocapture readonly %arg_A, i32 %arg_B, ptr nocapture %array
 ; CHECK-NEXT:    [[TMP0:%.*]] = shl i64 [[INDVARS_IV]], 2
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[ARRAY:%.*]], i64 [[TMP0]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = or i64 [[TMP0]], 1
+; CHECK-NEXT:    [[TMP2:%.*]] = or disjoint i64 [[TMP0]], 1
 ; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[ARRAY]], i64 [[TMP2]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = load float, ptr [[ARRAYIDX4]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = or i64 [[TMP0]], 2
+; CHECK-NEXT:    [[TMP4:%.*]] = or disjoint i64 [[TMP0]], 2
 ; CHECK-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[ARRAY]], i64 [[TMP4]]
 ; CHECK-NEXT:    [[TMP5:%.*]] = load float, ptr [[ARRAYIDX8]], align 4
-; CHECK-NEXT:    [[TMP6:%.*]] = or i64 [[TMP0]], 3
+; CHECK-NEXT:    [[TMP6:%.*]] = or disjoint i64 [[TMP0]], 3
 ; CHECK-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds float, ptr [[ARRAY]], i64 [[TMP6]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = load float, ptr [[ARRAYIDX12]], align 4
 ; CHECK-NEXT:    br i1 [[CMP1495]], label [[FOR_COND_CLEANUP15]], label [[FOR_BODY16_LR_PH:%.*]]
@@ -502,13 +502,13 @@ for.body:                                         ; preds = %for.cond.cleanup15,
   %0 = shl i64 %indvars.iv, 2
   %arrayidx = getelementptr inbounds float, ptr %array, i64 %0
   %1 = load float, ptr %arrayidx, align 4
-  %2 = or i64 %0, 1
+  %2 = or disjoint i64 %0, 1
   %arrayidx4 = getelementptr inbounds float, ptr %array, i64 %2
   %3 = load float, ptr %arrayidx4, align 4
-  %4 = or i64 %0, 2
+  %4 = or disjoint i64 %0, 2
   %arrayidx8 = getelementptr inbounds float, ptr %array, i64 %4
   %5 = load float, ptr %arrayidx8, align 4
-  %6 = or i64 %0, 3
+  %6 = or disjoint i64 %0, 3
   %arrayidx12 = getelementptr inbounds float, ptr %array, i64 %6
   %7 = load float, ptr %arrayidx12, align 4
   br i1 %cmp1495, label %for.cond.cleanup15, label %for.body16.lr.ph
@@ -569,22 +569,22 @@ define void @store_red_double(ptr noalias %A, ptr noalias %B, ptr noalias %C, i3
 ; CHECK-NEXT:    [[CMP17:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP17]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
 ; CHECK:       for.body.lr.ph:
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[B:%.*]], align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[N]] to i64
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x double>, ptr [[B:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[N]] to i64
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[I_018:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[MUL:%.*]] = shl nsw i64 [[I_018]], 2
 ; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[MUL]]
-; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = fmul fast <2 x double> [[TMP1]], [[TMP4]]
-; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x double> [[TMP5]], i32 0
-; CHECK-NEXT:    [[TMP7:%.*]] = extractelement <2 x double> [[TMP5]], i32 1
-; CHECK-NEXT:    [[ADD8:%.*]] = fadd fast double [[TMP6]], [[TMP7]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = fmul fast <2 x double> [[TMP0]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x double> [[TMP3]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x double> [[TMP3]], i32 1
+; CHECK-NEXT:    [[ADD8:%.*]] = fadd fast double [[TMP4]], [[TMP5]]
 ; CHECK-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds double, ptr [[C:%.*]], i64 [[I_018]]
 ; CHECK-NEXT:    store double [[ADD8]], ptr [[ARRAYIDX9]], align 8
 ; CHECK-NEXT:    [[INC]] = add nsw i64 [[I_018]], 1
-; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[TMP2]]
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[TMP1]]
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]]
 ; CHECK:       for.end:
 ; CHECK-NEXT:    ret void
@@ -606,7 +606,7 @@ for.body:
   %arrayidx2 = getelementptr inbounds double, ptr %A, i64 %mul
   %3 = load double, ptr %arrayidx2, align 8
   %mul3 = fmul fast double %0, %3
-  %add16 = or i64 %mul, 1
+  %add16 = or disjoint i64 %mul, 1
   %arrayidx6 = getelementptr inbounds double, ptr %A, i64 %add16
   %4 = load double, ptr %arrayidx6, align 8
   %mul7 = fmul fast double %1, %4
@@ -645,11 +645,11 @@ define i32 @store_red(ptr noalias %A, ptr noalias %B, ptr noalias %C, i32 %n) {
 ; CHECK-NEXT:    [[C_ADDR_038:%.*]] = phi ptr [ [[C:%.*]], [[FOR_BODY_LR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[MUL:%.*]] = shl nsw i64 [[I_039]], 2
 ; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[MUL]]
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x float>, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT:    [[TMP5:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP4]]
-; CHECK-NEXT:    [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP5]])
-; CHECK-NEXT:    store float [[TMP6]], ptr [[C_ADDR_038]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = fmul fast <4 x float> [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP3]])
+; CHECK-NEXT:    store float [[TMP4]], ptr [[C_ADDR_038]], align 4
 ; CHECK-NEXT:    [[INCDEC_PTR]] = getelementptr inbounds float, ptr [[C_ADDR_038]], i64 1
 ; CHECK-NEXT:    [[INC]] = add nsw i64 [[I_039]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[TMP0]]
@@ -677,19 +677,19 @@ for.body:
   %2 = load float, ptr %arrayidx2, align 4
   %mul3 = fmul fast float %1, %2
   %3 = load float, ptr %arrayidx4, align 4
-  %add34 = or i64 %mul, 1
+  %add34 = or disjoint i64 %mul, 1
   %arrayidx6 = getelementptr inbounds float, ptr %A, i64 %add34
   %4 = load float, ptr %arrayidx6, align 4
   %mul7 = fmul fast float %3, %4
   %add8 = fadd fast float %mul3, %mul7
   %5 = load float, ptr %arrayidx9, align 4
-  %add1135 = or i64 %mul, 2
+  %add1135 = or disjoint i64 %mul, 2
   %arrayidx12 = getelementptr inbounds float, ptr %A, i64 %add1135
   %6 = load float, ptr %arrayidx12, align 4
   %mul13 = fmul fast float %5, %6
   %add14 = fadd fast float %add8, %mul13
   %7 = load float, ptr %arrayidx15, align 4
-  %add1736 = or i64 %mul, 3
+  %add1736 = or disjoint i64 %mul, 3
   %arrayidx18 = getelementptr inbounds float, ptr %A, i64 %add1736
   %8 = load float, ptr %arrayidx18, align 4
   %mul19 = fmul fast float %7, %8
@@ -1003,10 +1003,10 @@ define void @i32_red_invoke(i32 %val) personality ptr @__gxx_personality_v0 {
 ; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i32>, ptr @arr_i32, align 16
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP0]])
 ; CHECK-NEXT:    [[RES:%.*]] = invoke i32 @foobar(i32 [[TMP1]])
-; CHECK-NEXT:    to label [[NORMAL:%.*]] unwind label [[EXCEPTION:%.*]]
+; CHECK-NEXT:            to label [[NORMAL:%.*]] unwind label [[EXCEPTION:%.*]]
 ; CHECK:       exception:
 ; CHECK-NEXT:    [[CLEANUP:%.*]] = landingpad i8
-; CHECK-NEXT:    cleanup
+; CHECK-NEXT:            cleanup
 ; CHECK-NEXT:    br label [[NORMAL]]
 ; CHECK:       normal:
 ; CHECK-NEXT:    ret void
@@ -1041,11 +1041,11 @@ define i32 @reduction_result_used_in_phi(ptr nocapture readonly %data, i1 zeroex
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[B:%.*]], label [[BB:%.*]], label [[EXIT:%.*]]
 ; CHECK:       bb:
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[DATA:%.*]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP1]])
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[DATA:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP0]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[SUM_1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP2]], [[BB]] ]
+; CHECK-NEXT:    [[SUM_1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP1]], [[BB]] ]
 ; CHECK-NEXT:    ret i32 [[SUM_1]]
 ;
 entry:
@@ -1074,11 +1074,11 @@ define i32 @reduction_result_used_in_phi_loop(ptr nocapture readonly %data, i1 z
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br i1 [[B:%.*]], label [[BB:%.*]], label [[EXIT:%.*]]
 ; CHECK:       bb:
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[DATA:%.*]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP1]])
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[DATA:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP0]])
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[SUM_1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP2]], [[BB]] ]
+; CHECK-NEXT:    [[SUM_1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP1]], [[BB]] ]
 ; CHECK-NEXT:    ret i32 [[SUM_1]]
 ;
 entry:
@@ -1131,9 +1131,9 @@ bb.1:
 
 define float @fadd_v4f32_fmf(ptr %p) {
 ; CHECK-LABEL: @fadd_v4f32_fmf(
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[P:%.*]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = call reassoc nsz float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP2]])
-; CHECK-NEXT:    ret float [[TMP3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = call reassoc nsz float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP1]])
+; CHECK-NEXT:    ret float [[TMP2]]
 ;
   %p1 = getelementptr inbounds float, float* %p, i64 1
   %p2 = getelementptr inbounds float, ptr %p, i64 2
@@ -1154,9 +1154,9 @@ define float @fadd_v4f32_fmf(ptr %p) {
 
 define float @fadd_v4f32_fmf_intersect(ptr %p) {
 ; CHECK-LABEL: @fadd_v4f32_fmf_intersect(
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x float>, ptr [[P:%.*]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = call reassoc ninf nsz float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP2]])
-; CHECK-NEXT:    ret float [[TMP3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[P:%.*]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = call reassoc ninf nsz float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP1]])
+; CHECK-NEXT:    ret float [[TMP2]]
 ;
   %p1 = getelementptr inbounds float, float* %p, i64 1
   %p2 = getelementptr inbounds float, ptr %p, i64 2

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/in-tree-user.ll b/llvm/test/Transforms/SLPVectorizer/X86/in-tree-user.ll
index dcc012c9cd805..e4143a7b91168 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/in-tree-user.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/in-tree-user.ll
@@ -12,14 +12,14 @@ define void @in_tree_user(ptr nocapture %A, i32 %n) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CONV:%.*]] = sitofp i32 [[N:%.*]] to double
 ; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> poison, double [[CONV]], i32 0
-; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <2 x double> [[TMP0]], <2 x double> poison, <2 x i32> zeroinitializer
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; CHECK-NEXT:    [[TMP1:%.*]] = shl nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[TMP1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = shl nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[TMP2]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> [[SHUFFLE]], [[TMP3]]
+; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> [[TMP1]], [[TMP3]]
 ; CHECK-NEXT:    [[TMP5:%.*]] = fmul <2 x double> [[TMP4]], <double 7.000000e+00, double 4.000000e+00>
 ; CHECK-NEXT:    [[TMP6:%.*]] = fadd <2 x double> [[TMP5]], <double 5.000000e+00, double 9.000000e+00>
 ; CHECK-NEXT:    [[TMP7:%.*]] = extractelement <2 x double> [[TMP6]], i32 0
@@ -52,7 +52,7 @@ for.body:                                         ; preds = %for.inc, %entry
   %mul2 = fmul double %mul1, 7.000000e+00
   %add = fadd double %mul2, 5.000000e+00
   %InTreeUser = fadd double %add, %add    ; <------------------ In tree user.
-  %2 = or i64 %0, 1
+  %2 = or disjoint i64 %0, 1
   %arrayidx6 = getelementptr inbounds double, ptr %A, i64 %2
   %3 = load double, ptr %arrayidx6, align 8
   %mul8 = fmul double %conv, %3

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/loopinvariant.ll b/llvm/test/Transforms/SLPVectorizer/X86/loopinvariant.ll
index 42068c9bc5d49..20b0885b22537 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/loopinvariant.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/loopinvariant.ll
@@ -12,14 +12,14 @@ define i32 @foo(ptr nocapture %A, i32 %n) {
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <8 x i32> poison, i32 [[N]], i32 0
-; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <8 x i32> [[TMP2]], <8 x i32> poison, <8 x i32> zeroinitializer
-; CHECK-NEXT:    [[TMP3:%.*]] = add nsw <8 x i32> [[TMP1]], [[SHUFFLE]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i32>, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <8 x i32> poison, i32 [[N]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = add nsw <8 x i32> [[TMP0]], [[TMP2]]
 ; CHECK-NEXT:    store <8 x i32> [[TMP3]], ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 8
-; CHECK-NEXT:    [[TMP5:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP5]], [[N]]
+; CHECK-NEXT:    [[TMP4:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP4]], [[N]]
 ; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]]
 ; CHECK:       for.end:
 ; CHECK-NEXT:    ret i32 undef
@@ -34,37 +34,37 @@ for.body:
   %0 = load i32, ptr %arrayidx, align 4
   %add1 = add nsw i32 %0, %n
   store i32 %add1, ptr %arrayidx, align 4
-  %1 = or i64 %indvars.iv, 1
+  %1 = or disjoint i64 %indvars.iv, 1
   %arrayidx4 = getelementptr inbounds i32, ptr %A, i64 %1
   %2 = load i32, ptr %arrayidx4, align 4
   %add5 = add nsw i32 %2, %n
   store i32 %add5, ptr %arrayidx4, align 4
-  %3 = or i64 %indvars.iv, 2
+  %3 = or disjoint i64 %indvars.iv, 2
   %arrayidx8 = getelementptr inbounds i32, ptr %A, i64 %3
   %4 = load i32, ptr %arrayidx8, align 4
   %add9 = add nsw i32 %4, %n
   store i32 %add9, ptr %arrayidx8, align 4
-  %5 = or i64 %indvars.iv, 3
+  %5 = or disjoint i64 %indvars.iv, 3
   %arrayidx12 = getelementptr inbounds i32, ptr %A, i64 %5
   %6 = load i32, ptr %arrayidx12, align 4
   %add13 = add nsw i32 %6, %n
   store i32 %add13, ptr %arrayidx12, align 4
-  %7 = or i64 %indvars.iv, 4
+  %7 = or disjoint i64 %indvars.iv, 4
   %arrayidx16 = getelementptr inbounds i32, ptr %A, i64 %7
   %8 = load i32, ptr %arrayidx16, align 4
   %add17 = add nsw i32 %8, %n
   store i32 %add17, ptr %arrayidx16, align 4
-  %9 = or i64 %indvars.iv, 5
+  %9 = or disjoint i64 %indvars.iv, 5
   %arrayidx20 = getelementptr inbounds i32, ptr %A, i64 %9
   %10 = load i32, ptr %arrayidx20, align 4
   %add21 = add nsw i32 %10, %n
   store i32 %add21, ptr %arrayidx20, align 4
-  %11 = or i64 %indvars.iv, 6
+  %11 = or disjoint i64 %indvars.iv, 6
   %arrayidx24 = getelementptr inbounds i32, ptr %A, i64 %11
   %12 = load i32, ptr %arrayidx24, align 4
   %add25 = add nsw i32 %12, %n
   store i32 %add25, ptr %arrayidx24, align 4
-  %13 = or i64 %indvars.iv, 7
+  %13 = or disjoint i64 %indvars.iv, 7
   %arrayidx28 = getelementptr inbounds i32, ptr %A, i64 %13
   %14 = load i32, ptr %arrayidx28, align 4
   %add29 = add nsw i32 %14, %n

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/reduction.ll b/llvm/test/Transforms/SLPVectorizer/X86/reduction.ll
index ceee30a3360e2..ce9158d8bf2ee 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reduction.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reduction.ll
@@ -21,11 +21,11 @@ define i32 @reduce(ptr nocapture %A, i32 %n, i32 %m) {
 ; CHECK-NEXT:    [[SUM_014:%.*]] = phi double [ [[ADD6:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[ENTRY]] ]
 ; CHECK-NEXT:    [[MUL:%.*]] = shl nsw i32 [[I_015]], 1
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i32 [[MUL]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = fmul <2 x double> [[TMP1]], <double 7.000000e+00, double 7.000000e+00>
-; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x double> [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x double> [[TMP2]], i32 1
-; CHECK-NEXT:    [[ADD5:%.*]] = fadd double [[TMP3]], [[TMP4]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = fmul <2 x double> [[TMP0]], <double 7.000000e+00, double 7.000000e+00>
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x double> [[TMP1]], i32 1
+; CHECK-NEXT:    [[ADD5:%.*]] = fadd double [[TMP2]], [[TMP3]]
 ; CHECK-NEXT:    [[ADD6]] = fadd double [[SUM_014]], [[ADD5]]
 ; CHECK-NEXT:    [[INC]] = add nsw i32 [[I_015]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
@@ -48,7 +48,7 @@ for.body:                                         ; preds = %entry, %for.body
   %arrayidx = getelementptr inbounds double, ptr %A, i32 %mul
   %0 = load double, ptr %arrayidx, align 4
   %mul1 = fmul double %0, 7.000000e+00
-  %add12 = or i32 %mul, 1
+  %add12 = or disjoint i32 %mul, 1
   %arrayidx3 = getelementptr inbounds double, ptr %A, i32 %add12
   %1 = load double, ptr %arrayidx3, align 4
   %mul4 = fmul double %1, 7.000000e+00
@@ -74,12 +74,12 @@ define i32 @horiz_max_multiple_uses(ptr %x, ptr %p) {
 ; CHECK-LABEL: @horiz_max_multiple_uses(
 ; CHECK-NEXT:    [[X4:%.*]] = getelementptr [32 x i32], ptr [[X:%.*]], i64 0, i64 4
 ; CHECK-NEXT:    [[X5:%.*]] = getelementptr [32 x i32], ptr [[X]], i64 0, i64 5
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr [[X]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[X]], align 4
 ; CHECK-NEXT:    [[T4:%.*]] = load i32, ptr [[X4]], align 4
 ; CHECK-NEXT:    [[T5:%.*]] = load i32, ptr [[X5]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP2]])
-; CHECK-NEXT:    [[MAX_ROOT_CMP:%.*]] = icmp sgt i32 [[TMP3]], [[T4]]
-; CHECK-NEXT:    [[MAX_ROOT_SEL:%.*]] = select i1 [[MAX_ROOT_CMP]], i32 [[TMP3]], i32 [[T4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP1]])
+; CHECK-NEXT:    [[MAX_ROOT_CMP:%.*]] = icmp sgt i32 [[TMP2]], [[T4]]
+; CHECK-NEXT:    [[MAX_ROOT_SEL:%.*]] = select i1 [[MAX_ROOT_CMP]], i32 [[TMP2]], i32 [[T4]]
 ; CHECK-NEXT:    [[C012345:%.*]] = icmp sgt i32 [[MAX_ROOT_SEL]], [[T5]]
 ; CHECK-NEXT:    [[T17:%.*]] = select i1 [[C012345]], i32 [[MAX_ROOT_SEL]], i32 [[T5]]
 ; CHECK-NEXT:    [[THREE_OR_FOUR:%.*]] = select i1 [[MAX_ROOT_CMP]], i32 3, i32 4

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/reduction2.ll b/llvm/test/Transforms/SLPVectorizer/X86/reduction2.ll
index b244715caa793..5dcd5d3190ad0 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reduction2.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reduction2.ll
@@ -35,7 +35,7 @@ define double @foo(ptr nocapture %D) {
   %4 = load double, ptr %3, align 4
   %A4 = fmul double %4, %4
   %A42 = fmul double %A4, %A4
-  %5 = or i32 %2, 1
+  %5 = or disjoint i32 %2, 1
   %6 = getelementptr inbounds double, ptr %D, i32 %5
   %7 = load double, ptr %6, align 4
   %A7 = fmul double %7, %7

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/remark_horcost.ll b/llvm/test/Transforms/SLPVectorizer/X86/remark_horcost.ll
index f657477ce8e52..f1cd42a2c404a 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/remark_horcost.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/remark_horcost.ll
@@ -10,17 +10,17 @@ define i32 @foo(ptr %
diff ) #0 {
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[A_088:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[OP_RDX:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[INDVARS_IV]], 3
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[DIFF:%.*]], i64 [[TMP1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = or i64 [[TMP1]], 4
-; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[DIFF]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP0:%.*]] = shl i64 [[INDVARS_IV]], 3
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[DIFF:%.*]], i64 [[TMP0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = or disjoint i64 [[TMP0]], 4
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[DIFF]], i64 [[TMP1]]
 ; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [8 x [8 x i32]], ptr [[M2]], i64 0, i64 [[INDVARS_IV]], i64 0
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT:    [[TMP6:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = add nsw <4 x i32> [[TMP6]], [[TMP4]]
-; CHECK-NEXT:    store <4 x i32> [[TMP7]], ptr [[ARRAYIDX6]], align 16
-; CHECK-NEXT:    [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP7]])
-; CHECK-NEXT:    [[OP_RDX]] = add i32 [[TMP9]], [[A_088]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = add nsw <4 x i32> [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    store <4 x i32> [[TMP4]], ptr [[ARRAYIDX6]], align 16
+; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]])
+; CHECK-NEXT:    [[OP_RDX]] = add i32 [[TMP5]], [[A_088]]
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 8
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
@@ -37,7 +37,7 @@ for.body:                                         ; preds = %for.body, %entry
   %0 = shl i64 %indvars.iv, 3
   %arrayidx = getelementptr inbounds i32, ptr %
diff , i64 %0
   %1 = load i32, ptr %arrayidx, align 4
-  %2 = or i64 %0, 4
+  %2 = or disjoint i64 %0, 4
   %arrayidx2 = getelementptr inbounds i32, ptr %
diff , i64 %2
   %3 = load i32, ptr %arrayidx2, align 4
   %add3 = add nsw i32 %3, %1
@@ -45,10 +45,10 @@ for.body:                                         ; preds = %for.body, %entry
   store i32 %add3, ptr %arrayidx6, align 16
 
   %add10 = add nsw i32 %add3, %a.088
-  %4 = or i64 %0, 1
+  %4 = or disjoint i64 %0, 1
   %arrayidx13 = getelementptr inbounds i32, ptr %
diff , i64 %4
   %5 = load i32, ptr %arrayidx13, align 4
-  %6 = or i64 %0, 5
+  %6 = or disjoint i64 %0, 5
   %arrayidx16 = getelementptr inbounds i32, ptr %
diff , i64 %6
   %7 = load i32, ptr %arrayidx16, align 4
   %add17 = add nsw i32 %7, %5
@@ -56,10 +56,10 @@ for.body:                                         ; preds = %for.body, %entry
   store i32 %add17, ptr %arrayidx20, align 4
 
   %add24 = add nsw i32 %add10, %add17
-  %8 = or i64 %0, 2
+  %8 = or disjoint i64 %0, 2
   %arrayidx27 = getelementptr inbounds i32, ptr %
diff , i64 %8
   %9 = load i32, ptr %arrayidx27, align 4
-  %10 = or i64 %0, 6
+  %10 = or disjoint i64 %0, 6
   %arrayidx30 = getelementptr inbounds i32, ptr %
diff , i64 %10
   %11 = load i32, ptr %arrayidx30, align 4
   %add31 = add nsw i32 %11, %9
@@ -67,10 +67,10 @@ for.body:                                         ; preds = %for.body, %entry
   store i32 %add31, ptr %arrayidx34, align 8
 
   %add38 = add nsw i32 %add24, %add31
-  %12 = or i64 %0, 3
+  %12 = or disjoint i64 %0, 3
   %arrayidx41 = getelementptr inbounds i32, ptr %
diff , i64 %12
   %13 = load i32, ptr %arrayidx41, align 4
-  %14 = or i64 %0, 7
+  %14 = or disjoint i64 %0, 7
   %arrayidx44 = getelementptr inbounds i32, ptr %
diff , i64 %14
   %15 = load i32, ptr %arrayidx44, align 4
 

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/remark_not_all_parts.ll b/llvm/test/Transforms/SLPVectorizer/X86/remark_not_all_parts.ll
index 0a2cc76d2e25d..6d96d6d29cd59 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/remark_not_all_parts.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/remark_not_all_parts.ll
@@ -10,19 +10,19 @@ define i32 @foo(ptr nocapture readonly %
diff ) #0 {
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[A_088:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD24:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[INDVARS_IV]], 3
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[DIFF:%.*]], i64 [[TMP1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = or i64 [[TMP1]], 4
-; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[DIFF]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP0:%.*]] = shl i64 [[INDVARS_IV]], 3
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[DIFF:%.*]], i64 [[TMP0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = or disjoint i64 [[TMP0]], 4
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[DIFF]], i64 [[TMP1]]
 ; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [8 x [8 x i32]], ptr [[M2]], i64 0, i64 [[INDVARS_IV]], i64 0
-; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT:    [[TMP6:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = add nsw <2 x i32> [[TMP6]], [[TMP4]]
-; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <2 x i32> [[TMP7]], i32 0
-; CHECK-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP8]], [[A_088]]
-; CHECK-NEXT:    store <2 x i32> [[TMP7]], ptr [[ARRAYIDX6]], align 16
-; CHECK-NEXT:    [[TMP10:%.*]] = extractelement <2 x i32> [[TMP7]], i32 1
-; CHECK-NEXT:    [[ADD24]] = add nsw i32 [[ADD10]], [[TMP10]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = add nsw <2 x i32> [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x i32> [[TMP4]], i32 0
+; CHECK-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP5]], [[A_088]]
+; CHECK-NEXT:    store <2 x i32> [[TMP4]], ptr [[ARRAYIDX6]], align 16
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i32> [[TMP4]], i32 1
+; CHECK-NEXT:    [[ADD24]] = add nsw i32 [[ADD10]], [[TMP6]]
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 8
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
@@ -39,17 +39,17 @@ for.body:                                         ; preds = %for.body, %entry
   %0 = shl i64 %indvars.iv, 3
   %arrayidx = getelementptr inbounds i32, ptr %
diff , i64 %0
   %1 = load i32, ptr %arrayidx, align 4
-  %2 = or i64 %0, 4
+  %2 = or disjoint i64 %0, 4
   %arrayidx2 = getelementptr inbounds i32, ptr %
diff , i64 %2
   %3 = load i32, ptr %arrayidx2, align 4
   %add3 = add nsw i32 %3, %1
   %arrayidx6 = getelementptr inbounds [8 x [8 x i32]], ptr %m2, i64 0, i64 %indvars.iv, i64 0
   store i32 %add3, ptr %arrayidx6, align 16
   %add10 = add nsw i32 %add3, %a.088
-  %4 = or i64 %0, 1
+  %4 = or disjoint i64 %0, 1
   %arrayidx13 = getelementptr inbounds i32, ptr %
diff , i64 %4
   %5 = load i32, ptr %arrayidx13, align 4
-  %6 = or i64 %0, 5
+  %6 = or disjoint i64 %0, 5
   %arrayidx16 = getelementptr inbounds i32, ptr %
diff , i64 %6
   %7 = load i32, ptr %arrayidx16, align 4
   %add17 = add nsw i32 %7, %5

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/scheduling.ll b/llvm/test/Transforms/SLPVectorizer/X86/scheduling.ll
index 87d2a00341563..f682851b0406e 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/scheduling.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/scheduling.ll
@@ -9,17 +9,17 @@ define i32 @foo(ptr nocapture readonly %
diff ) #0 {
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[A_088:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[OP_RDX:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[INDVARS_IV]], 3
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[DIFF:%.*]], i64 [[TMP1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = or i64 [[TMP1]], 4
-; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[DIFF]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP0:%.*]] = shl i64 [[INDVARS_IV]], 3
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[DIFF:%.*]], i64 [[TMP0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = or disjoint i64 [[TMP0]], 4
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[DIFF]], i64 [[TMP1]]
 ; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [8 x [8 x i32]], ptr [[M2]], i64 0, i64 [[INDVARS_IV]], i64 0
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT:    [[TMP6:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = add nsw <4 x i32> [[TMP6]], [[TMP4]]
-; CHECK-NEXT:    store <4 x i32> [[TMP7]], ptr [[ARRAYIDX6]], align 16
-; CHECK-NEXT:    [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP7]])
-; CHECK-NEXT:    [[OP_RDX]] = add i32 [[TMP9]], [[A_088]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = add nsw <4 x i32> [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    store <4 x i32> [[TMP4]], ptr [[ARRAYIDX6]], align 16
+; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]])
+; CHECK-NEXT:    [[OP_RDX]] = add i32 [[TMP5]], [[A_088]]
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 8
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
@@ -37,37 +37,37 @@ for.body:                                         ; preds = %for.body, %entry
   %0 = shl i64 %indvars.iv, 3
   %arrayidx = getelementptr inbounds i32, ptr %
diff , i64 %0
   %1 = load i32, ptr %arrayidx, align 4
-  %2 = or i64 %0, 4
+  %2 = or disjoint i64 %0, 4
   %arrayidx2 = getelementptr inbounds i32, ptr %
diff , i64 %2
   %3 = load i32, ptr %arrayidx2, align 4
   %add3 = add nsw i32 %3, %1
   %arrayidx6 = getelementptr inbounds [8 x [8 x i32]], ptr %m2, i64 0, i64 %indvars.iv, i64 0
   store i32 %add3, ptr %arrayidx6, align 16
   %add10 = add nsw i32 %add3, %a.088
-  %4 = or i64 %0, 1
+  %4 = or disjoint i64 %0, 1
   %arrayidx13 = getelementptr inbounds i32, ptr %
diff , i64 %4
   %5 = load i32, ptr %arrayidx13, align 4
-  %6 = or i64 %0, 5
+  %6 = or disjoint i64 %0, 5
   %arrayidx16 = getelementptr inbounds i32, ptr %
diff , i64 %6
   %7 = load i32, ptr %arrayidx16, align 4
   %add17 = add nsw i32 %7, %5
   %arrayidx20 = getelementptr inbounds [8 x [8 x i32]], ptr %m2, i64 0, i64 %indvars.iv, i64 1
   store i32 %add17, ptr %arrayidx20, align 4
   %add24 = add nsw i32 %add10, %add17
-  %8 = or i64 %0, 2
+  %8 = or disjoint i64 %0, 2
   %arrayidx27 = getelementptr inbounds i32, ptr %
diff , i64 %8
   %9 = load i32, ptr %arrayidx27, align 4
-  %10 = or i64 %0, 6
+  %10 = or disjoint i64 %0, 6
   %arrayidx30 = getelementptr inbounds i32, ptr %
diff , i64 %10
   %11 = load i32, ptr %arrayidx30, align 4
   %add31 = add nsw i32 %11, %9
   %arrayidx34 = getelementptr inbounds [8 x [8 x i32]], ptr %m2, i64 0, i64 %indvars.iv, i64 2
   store i32 %add31, ptr %arrayidx34, align 8
   %add38 = add nsw i32 %add24, %add31
-  %12 = or i64 %0, 3
+  %12 = or disjoint i64 %0, 3
   %arrayidx41 = getelementptr inbounds i32, ptr %
diff , i64 %12
   %13 = load i32, ptr %arrayidx41, align 4
-  %14 = or i64 %0, 7
+  %14 = or disjoint i64 %0, 7
   %arrayidx44 = getelementptr inbounds i32, ptr %
diff , i64 %14
   %15 = load i32, ptr %arrayidx44, align 4
   %add45 = add nsw i32 %15, %13

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/simple-loop.ll b/llvm/test/Transforms/SLPVectorizer/X86/simple-loop.ll
index cc7b0dab26ed6..89c64d64b9c84 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/simple-loop.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/simple-loop.ll
@@ -9,16 +9,16 @@ define i32 @rollable(ptr noalias nocapture %in, ptr noalias nocapture %out, i64
 ; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i64 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[TMP1]], label [[DOT_CRIT_EDGE:%.*]], label [[DOTLR_PH:%.*]]
 ; CHECK:       .lr.ph:
-; CHECK-NEXT:    [[I_019:%.*]] = phi i64 [ [[TMP10:%.*]], [[DOTLR_PH]] ], [ 0, [[TMP0:%.*]] ]
+; CHECK-NEXT:    [[I_019:%.*]] = phi i64 [ [[TMP8:%.*]], [[DOTLR_PH]] ], [ 0, [[TMP0:%.*]] ]
 ; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[I_019]], 2
 ; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[IN:%.*]], i64 [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP6:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = mul <4 x i32> [[TMP6]], <i32 7, i32 7, i32 7, i32 7>
-; CHECK-NEXT:    [[TMP8:%.*]] = add <4 x i32> [[TMP7]], <i32 7, i32 14, i32 21, i32 28>
-; CHECK-NEXT:    store <4 x i32> [[TMP8]], ptr [[TMP4]], align 4
-; CHECK-NEXT:    [[TMP10]] = add i64 [[I_019]], 1
-; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[TMP10]], [[N]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = mul <4 x i32> [[TMP5]], <i32 7, i32 7, i32 7, i32 7>
+; CHECK-NEXT:    [[TMP7:%.*]] = add <4 x i32> [[TMP6]], <i32 7, i32 14, i32 21, i32 28>
+; CHECK-NEXT:    store <4 x i32> [[TMP7]], ptr [[TMP4]], align 4
+; CHECK-NEXT:    [[TMP8]] = add i64 [[I_019]], 1
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[TMP8]], [[N]]
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
 ; CHECK:       ._crit_edge:
 ; CHECK-NEXT:    ret i32 undef
@@ -31,13 +31,13 @@ define i32 @rollable(ptr noalias nocapture %in, ptr noalias nocapture %out, i64
   %2 = shl i64 %i.019, 2
   %3 = getelementptr inbounds i32, ptr %in, i64 %2
   %4 = load i32, ptr %3, align 4
-  %5 = or i64 %2, 1
+  %5 = or disjoint i64 %2, 1
   %6 = getelementptr inbounds i32, ptr %in, i64 %5
   %7 = load i32, ptr %6, align 4
-  %8 = or i64 %2, 2
+  %8 = or disjoint i64 %2, 2
   %9 = getelementptr inbounds i32, ptr %in, i64 %8
   %10 = load i32, ptr %9, align 4
-  %11 = or i64 %2, 3
+  %11 = or disjoint i64 %2, 3
   %12 = getelementptr inbounds i32, ptr %in, i64 %11
   %13 = load i32, ptr %12, align 4
   %14 = mul i32 %4, 7
@@ -69,24 +69,24 @@ define i32 @unrollable(ptr %in, ptr %out, i64 %n) nounwind ssp uwtable {
 ; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i64 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[TMP1]], label [[DOT_CRIT_EDGE:%.*]], label [[DOTLR_PH:%.*]]
 ; CHECK:       .lr.ph:
-; CHECK-NEXT:    [[I_019:%.*]] = phi i64 [ [[TMP18:%.*]], [[DOTLR_PH]] ], [ 0, [[TMP0:%.*]] ]
+; CHECK-NEXT:    [[I_019:%.*]] = phi i64 [ [[TMP14:%.*]], [[DOTLR_PH]] ], [ 0, [[TMP0:%.*]] ]
 ; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[I_019]], 2
 ; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[IN:%.*]], i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP4:%.*]] = or i64 [[TMP2]], 2
+; CHECK-NEXT:    [[TMP4:%.*]] = or disjoint i64 [[TMP2]], 2
 ; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 [[TMP4]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[OUT:%.*]], i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP8:%.*]] = load <2 x i32>, ptr [[TMP3]], align 4
-; CHECK-NEXT:    [[TMP9:%.*]] = mul <2 x i32> [[TMP8]], <i32 7, i32 7>
-; CHECK-NEXT:    [[TMP10:%.*]] = add <2 x i32> [[TMP9]], <i32 7, i32 14>
-; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[OUT]], i64 [[TMP4]]
-; CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i32>, ptr [[TMP5]], align 4
-; CHECK-NEXT:    [[TMP15:%.*]] = mul <2 x i32> [[TMP14]], <i32 7, i32 7>
-; CHECK-NEXT:    [[TMP16:%.*]] = add <2 x i32> [[TMP15]], <i32 21, i32 28>
-; CHECK-NEXT:    store <2 x i32> [[TMP10]], ptr [[TMP6]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load <2 x i32>, ptr [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = mul <2 x i32> [[TMP7]], <i32 7, i32 7>
+; CHECK-NEXT:    [[TMP9:%.*]] = add <2 x i32> [[TMP8]], <i32 7, i32 14>
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[OUT]], i64 [[TMP4]]
+; CHECK-NEXT:    [[TMP11:%.*]] = load <2 x i32>, ptr [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = mul <2 x i32> [[TMP11]], <i32 7, i32 7>
+; CHECK-NEXT:    [[TMP13:%.*]] = add <2 x i32> [[TMP12]], <i32 21, i32 28>
+; CHECK-NEXT:    store <2 x i32> [[TMP9]], ptr [[TMP6]], align 4
 ; CHECK-NEXT:    [[BARRIER:%.*]] = call i32 @goo(i32 0)
-; CHECK-NEXT:    store <2 x i32> [[TMP16]], ptr [[TMP12]], align 4
-; CHECK-NEXT:    [[TMP18]] = add i64 [[I_019]], 1
-; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[TMP18]], [[N]]
+; CHECK-NEXT:    store <2 x i32> [[TMP13]], ptr [[TMP10]], align 4
+; CHECK-NEXT:    [[TMP14]] = add i64 [[I_019]], 1
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[TMP14]], [[N]]
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]]
 ; CHECK:       ._crit_edge:
 ; CHECK-NEXT:    ret i32 undef
@@ -99,13 +99,13 @@ define i32 @unrollable(ptr %in, ptr %out, i64 %n) nounwind ssp uwtable {
   %2 = shl i64 %i.019, 2
   %3 = getelementptr inbounds i32, ptr %in, i64 %2
   %4 = load i32, ptr %3, align 4
-  %5 = or i64 %2, 1
+  %5 = or disjoint i64 %2, 1
   %6 = getelementptr inbounds i32, ptr %in, i64 %5
   %7 = load i32, ptr %6, align 4
-  %8 = or i64 %2, 2
+  %8 = or disjoint i64 %2, 2
   %9 = getelementptr inbounds i32, ptr %in, i64 %8
   %10 = load i32, ptr %9, align 4
-  %11 = or i64 %2, 3
+  %11 = or disjoint i64 %2, 3
   %12 = getelementptr inbounds i32, ptr %in, i64 %11
   %13 = load i32, ptr %12, align 4
   %14 = mul i32 %4, 7

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/slp-throttle.ll b/llvm/test/Transforms/SLPVectorizer/X86/slp-throttle.ll
index 17dc47ae9b5a3..51ce40b7a178b 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/slp-throttle.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/slp-throttle.ll
@@ -20,7 +20,7 @@ define dso_local void @rftbsub(ptr %a) local_unnamed_addr #0 {
 entry:
   %arrayidx6 = getelementptr inbounds double, ptr %a, i64 2
   %0 = load double, ptr %arrayidx6, align 8
-  %1 = or i64 2, 1
+  %1 = or disjoint i64 2, 1
   %arrayidx12 = getelementptr inbounds double, ptr %a, i64 %1
   %2 = load double, ptr %arrayidx12, align 8
   %add16 = fadd double %2, undef


        


More information about the llvm-commits mailing list