[llvm] d783455 - Reland [GlobalISel] Start using vectors in GISelKnownBits

Petar Avramovic via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 4 12:47:31 PST 2021


Author: Petar Avramovic
Date: 2021-03-04T21:47:13+01:00
New Revision: d7834556b7ad3bbfc03ed5dd8d875d1d73c674c2

URL: https://github.com/llvm/llvm-project/commit/d7834556b7ad3bbfc03ed5dd8d875d1d73c674c2
DIFF: https://github.com/llvm/llvm-project/commit/d7834556b7ad3bbfc03ed5dd8d875d1d73c674c2.diff

LOG: Reland [GlobalISel] Start using vectors in GISelKnownBits

This is recommit of 4c8fb7ddd6fa49258e0e9427e7345fb56ba522d4.
MIR in one unit test had mismatched types.

For vectors we consider a bit as known if it is the same for all demanded
vector elements (all elements by default). KnownBits BitWidth for vector
type is size of vector element. Add support for G_BUILD_VECTOR.
This allows combines of urem_pow2_to_mask in pre-legalizer combiner.

Differential Revision: https://reviews.llvm.org/D96122

Added: 
    llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp

Modified: 
    llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/combine-urem-pow-2.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i32.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
    llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt
    llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
index 97aa094cfbc5..2d02917552b0 100644
--- a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
@@ -129,7 +129,7 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
     return;
   }
 
-  unsigned BitWidth = DstTy.getSizeInBits();
+  unsigned BitWidth = DstTy.getScalarSizeInBits();
   auto CacheEntry = ComputeKnownBitsCache.find(R);
   if (CacheEntry != ComputeKnownBitsCache.end()) {
     Known = CacheEntry->second;
@@ -140,9 +140,6 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
   }
   Known = KnownBits(BitWidth); // Don't know anything
 
-  if (DstTy.isVector())
-    return; // TODO: Handle vectors.
-
   // Depth may get bigger than max depth if it gets passed to a 
diff erent
   // GISelKnownBits object.
   // This may happen when say a generic part uses a GISelKnownBits object
@@ -164,6 +161,25 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
     TL.computeKnownBitsForTargetInstr(*this, R, Known, DemandedElts, MRI,
                                       Depth);
     break;
+  case TargetOpcode::G_BUILD_VECTOR: {
+    // Collect the known bits that are shared by every demanded vector element.
+    Known.Zero.setAllBits(); Known.One.setAllBits();
+    for (unsigned i = 0, e = MI.getNumOperands() - 1; i < e; ++i) {
+      if (!DemandedElts[i])
+        continue;
+
+      computeKnownBitsImpl(MI.getOperand(i + 1).getReg(), Known2, DemandedElts,
+                           Depth + 1);
+
+      // Known bits are the values that are shared by every demanded element.
+      Known = KnownBits::commonBits(Known, Known2);
+
+      // If we don't know any bits, early out.
+      if (Known.isUnknown())
+        break;
+    }
+    break;
+  }
   case TargetOpcode::COPY:
   case TargetOpcode::G_PHI:
   case TargetOpcode::PHI: {
@@ -244,6 +260,8 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
     break;
   }
   case TargetOpcode::G_PTR_ADD: {
+    if (DstTy.isVector())
+      break;
     // G_PTR_ADD is like G_ADD. FIXME: Is this true for all targets?
     LLT Ty = MRI.getType(MI.getOperand(1).getReg());
     if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace()))
@@ -332,6 +350,8 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
   }
   case TargetOpcode::G_FCMP:
   case TargetOpcode::G_ICMP: {
+    if (DstTy.isVector())
+      break;
     if (TL.getBooleanContents(DstTy.isVector(),
                               Opcode == TargetOpcode::G_FCMP) ==
             TargetLowering::ZeroOrOneBooleanContent &&
@@ -369,6 +389,8 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
     break;
   }
   case TargetOpcode::G_ZEXTLOAD: {
+    if (DstTy.isVector())
+      break;
     // Everything above the retrieved bits is zero
     Known.Zero.setBitsFrom((*MI.memoperands_begin())->getSizeInBits());
     break;
@@ -402,6 +424,8 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
   }
   case TargetOpcode::G_INTTOPTR:
   case TargetOpcode::G_PTRTOINT:
+    if (DstTy.isVector())
+      break;
     // Fall through and handle them the same as zext/trunc.
     LLVM_FALLTHROUGH;
   case TargetOpcode::G_ASSERT_ZEXT:
@@ -440,6 +464,8 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
     break;
   }
   case TargetOpcode::G_UNMERGE_VALUES: {
+    if (DstTy.isVector())
+      break;
     unsigned NumOps = MI.getNumOperands();
     Register SrcReg = MI.getOperand(NumOps - 1).getReg();
     if (MRI.getType(SrcReg).isVector())

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-urem-pow-2.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-urem-pow-2.mir
index da6c8480b25e..93b723c5e730 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-urem-pow-2.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-urem-pow-2.mir
@@ -156,3 +156,74 @@ body:             |
     %rem:_(<2 x s16>) = G_UREM %var, %four_vec
     $vgpr0 = COPY %rem
 ...
+
+---
+name: v_urem_v2i32_pow2k_denom
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; GCN-LABEL: name: v_urem_v2i32_pow2k_denom
+    ; GCN: liveins: $vgpr0_vgpr1
+    ; GCN: %var:_(<2 x s32>) = COPY $vgpr0_vgpr1
+    ; GCN: %pow2:_(s32) = G_CONSTANT i32 4096
+    ; GCN: %pow2_vec:_(<2 x s32>) = G_BUILD_VECTOR %pow2(s32), %pow2(s32)
+    ; GCN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; GCN: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32)
+    ; GCN: [[ADD:%[0-9]+]]:_(<2 x s32>) = G_ADD %pow2_vec, [[BUILD_VECTOR]]
+    ; GCN: %rem:_(<2 x s32>) = G_AND %var, [[ADD]]
+    ; GCN: $vgpr0_vgpr1 = COPY %rem(<2 x s32>)
+    %var:_(<2 x s32>) = COPY $vgpr0_vgpr1
+    %pow2:_(s32) = G_CONSTANT i32 4096
+    %pow2_vec:_(<2 x s32>) = G_BUILD_VECTOR %pow2(s32), %pow2(s32)
+    %rem:_(<2 x s32>) = G_UREM %var, %pow2_vec
+    $vgpr0_vgpr1 = COPY %rem
+...
+
+---
+name: v_urem_v2i32_pow2k_not_splat_denom
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0_vgpr1
+
+    ; GCN-LABEL: name: v_urem_v2i32_pow2k_not_splat_denom
+    ; GCN: liveins: $vgpr0_vgpr1
+    ; GCN: %var:_(<2 x s32>) = COPY $vgpr0_vgpr1
+    ; GCN: %pow2_1:_(s32) = G_CONSTANT i32 4096
+    ; GCN: %pow2_2:_(s32) = G_CONSTANT i32 2048
+    ; GCN: %pow2_vec:_(<2 x s32>) = G_BUILD_VECTOR %pow2_1(s32), %pow2_2(s32)
+    ; GCN: %rem:_(<2 x s32>) = G_UREM %var, %pow2_vec
+    ; GCN: $vgpr0_vgpr1 = COPY %rem(<2 x s32>)
+    %var:_(<2 x s32>) = COPY $vgpr0_vgpr1
+    %pow2_1:_(s32) = G_CONSTANT i32 4096
+    %pow2_2:_(s32) = G_CONSTANT i32 2048
+    %pow2_vec:_(<2 x s32>) = G_BUILD_VECTOR %pow2_1(s32), %pow2_2(s32)
+    %rem:_(<2 x s32>) = G_UREM %var, %pow2_vec
+    $vgpr0_vgpr1 = COPY %rem
+...
+
+---
+name: v_urem_v2i64_pow2k_denom
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0_vgpr1_vgpr2_vgpr3
+
+    ; GCN-LABEL: name: v_urem_v2i64_pow2k_denom
+    ; GCN: liveins: $vgpr0_vgpr1_vgpr2_vgpr3
+    ; GCN: %var:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+    ; GCN: %pow2:_(s64) = G_CONSTANT i64 4096
+    ; GCN: %pow2_vec:_(<2 x s64>) = G_BUILD_VECTOR %pow2(s64), %pow2(s64)
+    ; GCN: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; GCN: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
+    ; GCN: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD %pow2_vec, [[BUILD_VECTOR]]
+    ; GCN: %rem:_(<2 x s64>) = G_AND %var, [[ADD]]
+    ; GCN: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %rem(<2 x s64>)
+    %var:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+    %pow2:_(s64) = G_CONSTANT i64 4096
+    %pow2_vec:_(<2 x s64>) = G_BUILD_VECTOR %pow2(s64), %pow2(s64)
+    %rem:_(<2 x s64>) = G_UREM %var, %pow2_vec
+    $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %rem
+...

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i32.ll
index e6bee5ee92f0..6d4ffa6db73b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i32.ll
@@ -215,45 +215,13 @@ define i32 @v_urem_i32_pow2k_denom(i32 %num) {
 }
 
 define <2 x i32> @v_urem_v2i32_pow2k_denom(<2 x i32> %num) {
-; GISEL-LABEL: v_urem_v2i32_pow2k_denom:
-; GISEL:       ; %bb.0:
-; GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-NEXT:    s_movk_i32 s4, 0x1000
-; GISEL-NEXT:    v_cvt_f32_u32_e32 v2, s4
-; GISEL-NEXT:    s_sub_i32 s5, 0, s4
-; GISEL-NEXT:    v_rcp_iflag_f32_e32 v2, v2
-; GISEL-NEXT:    v_mul_f32_e32 v2, 0x4f7ffffe, v2
-; GISEL-NEXT:    v_cvt_u32_f32_e32 v2, v2
-; GISEL-NEXT:    v_mul_lo_u32 v3, s5, v2
-; GISEL-NEXT:    v_mul_hi_u32 v3, v2, v3
-; GISEL-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GISEL-NEXT:    v_mul_hi_u32 v3, v0, v2
-; GISEL-NEXT:    v_mul_hi_u32 v2, v1, v2
-; GISEL-NEXT:    v_lshlrev_b32_e32 v3, 12, v3
-; GISEL-NEXT:    v_lshlrev_b32_e32 v2, 12, v2
-; GISEL-NEXT:    v_sub_i32_e32 v0, vcc, v0, v3
-; GISEL-NEXT:    v_sub_i32_e32 v1, vcc, v1, v2
-; GISEL-NEXT:    v_subrev_i32_e32 v2, vcc, s4, v0
-; GISEL-NEXT:    v_subrev_i32_e32 v3, vcc, s4, v1
-; GISEL-NEXT:    v_cmp_le_u32_e32 vcc, s4, v0
-; GISEL-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GISEL-NEXT:    v_cmp_le_u32_e32 vcc, s4, v1
-; GISEL-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
-; GISEL-NEXT:    v_subrev_i32_e32 v2, vcc, s4, v0
-; GISEL-NEXT:    v_subrev_i32_e32 v3, vcc, s4, v1
-; GISEL-NEXT:    v_cmp_le_u32_e32 vcc, s4, v0
-; GISEL-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GISEL-NEXT:    v_cmp_le_u32_e32 vcc, s4, v1
-; GISEL-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
-; GISEL-NEXT:    s_setpc_b64 s[30:31]
-;
-; CGP-LABEL: v_urem_v2i32_pow2k_denom:
-; CGP:       ; %bb.0:
-; CGP-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CGP-NEXT:    s_add_i32 s4, 0x1000, -1
-; CGP-NEXT:    v_and_b32_e32 v0, s4, v0
-; CGP-NEXT:    v_and_b32_e32 v1, s4, v1
-; CGP-NEXT:    s_setpc_b64 s[30:31]
+; CHECK-LABEL: v_urem_v2i32_pow2k_denom:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_add_i32 s4, 0x1000, -1
+; CHECK-NEXT:    v_and_b32_e32 v0, s4, v0
+; CHECK-NEXT:    v_and_b32_e32 v1, s4, v1
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %result = urem <2 x i32> %num, <i32 4096, i32 4096>
   ret <2 x i32> %result
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
index 60084a08a1fb..6219bc0f19b8 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
@@ -962,286 +962,25 @@ define i64 @v_urem_i64_pow2k_denom(i64 %num) {
 }
 
 define <2 x i64> @v_urem_v2i64_pow2k_denom(<2 x i64> %num) {
-; GISEL-LABEL: v_urem_v2i64_pow2k_denom:
-; GISEL:       ; %bb.0:
-; GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-NEXT:    s_movk_i32 s10, 0x1000
-; GISEL-NEXT:    v_cvt_f32_u32_e32 v4, s10
-; GISEL-NEXT:    s_sub_u32 s8, 0, s10
-; GISEL-NEXT:    s_cselect_b32 s4, 1, 0
-; GISEL-NEXT:    v_cvt_f32_ubyte0_e32 v5, 0
-; GISEL-NEXT:    v_mov_b32_e32 v6, v4
-; GISEL-NEXT:    s_and_b32 s4, s4, 1
-; GISEL-NEXT:    v_mac_f32_e32 v4, 0x4f800000, v5
-; GISEL-NEXT:    v_mac_f32_e32 v6, 0x4f800000, v5
-; GISEL-NEXT:    v_rcp_iflag_f32_e32 v4, v4
-; GISEL-NEXT:    v_rcp_iflag_f32_e32 v5, v6
-; GISEL-NEXT:    s_cmp_lg_u32 s4, 0
-; GISEL-NEXT:    s_subb_u32 s9, 0, 0
-; GISEL-NEXT:    v_mul_f32_e32 v4, 0x5f7ffffc, v4
-; GISEL-NEXT:    v_mul_f32_e32 v5, 0x5f7ffffc, v5
-; GISEL-NEXT:    v_mul_f32_e32 v6, 0x2f800000, v4
-; GISEL-NEXT:    s_sub_u32 s11, 0, s10
-; GISEL-NEXT:    s_cselect_b32 s4, 1, 0
-; GISEL-NEXT:    v_mul_f32_e32 v7, 0x2f800000, v5
-; GISEL-NEXT:    v_trunc_f32_e32 v6, v6
-; GISEL-NEXT:    s_and_b32 s4, s4, 1
-; GISEL-NEXT:    v_trunc_f32_e32 v7, v7
-; GISEL-NEXT:    v_mac_f32_e32 v4, 0xcf800000, v6
-; GISEL-NEXT:    v_cvt_u32_f32_e32 v6, v6
-; GISEL-NEXT:    v_mac_f32_e32 v5, 0xcf800000, v7
-; GISEL-NEXT:    v_cvt_u32_f32_e32 v7, v7
-; GISEL-NEXT:    v_cvt_u32_f32_e32 v4, v4
-; GISEL-NEXT:    s_cmp_lg_u32 s4, 0
-; GISEL-NEXT:    s_subb_u32 s6, 0, 0
-; GISEL-NEXT:    v_mul_lo_u32 v8, s11, v6
-; GISEL-NEXT:    v_cvt_u32_f32_e32 v5, v5
-; GISEL-NEXT:    v_mul_lo_u32 v9, s8, v7
-; GISEL-NEXT:    v_mul_lo_u32 v10, s11, v4
-; GISEL-NEXT:    v_mul_lo_u32 v11, s6, v4
-; GISEL-NEXT:    v_mul_hi_u32 v12, s11, v4
-; GISEL-NEXT:    v_mul_lo_u32 v13, s8, v5
-; GISEL-NEXT:    v_mul_lo_u32 v14, s9, v5
-; GISEL-NEXT:    v_mul_hi_u32 v15, s8, v5
-; GISEL-NEXT:    v_add_i32_e32 v8, vcc, v11, v8
-; GISEL-NEXT:    v_mul_lo_u32 v11, v6, v10
-; GISEL-NEXT:    v_mul_hi_u32 v16, v4, v10
-; GISEL-NEXT:    v_mul_hi_u32 v10, v6, v10
-; GISEL-NEXT:    v_add_i32_e32 v9, vcc, v14, v9
-; GISEL-NEXT:    v_mul_lo_u32 v14, v7, v13
-; GISEL-NEXT:    v_mul_hi_u32 v17, v5, v13
-; GISEL-NEXT:    v_mul_hi_u32 v13, v7, v13
-; GISEL-NEXT:    v_add_i32_e32 v8, vcc, v8, v12
-; GISEL-NEXT:    v_add_i32_e32 v9, vcc, v9, v15
-; GISEL-NEXT:    v_mul_lo_u32 v12, v4, v8
-; GISEL-NEXT:    v_mul_lo_u32 v15, v6, v8
-; GISEL-NEXT:    v_mul_hi_u32 v18, v4, v8
-; GISEL-NEXT:    v_mul_hi_u32 v8, v6, v8
-; GISEL-NEXT:    v_mul_lo_u32 v19, v5, v9
-; GISEL-NEXT:    v_add_i32_e32 v14, vcc, v14, v19
-; GISEL-NEXT:    v_cndmask_b32_e64 v19, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v14, vcc, v14, v17
-; GISEL-NEXT:    v_mul_lo_u32 v14, v7, v9
-; GISEL-NEXT:    v_mul_hi_u32 v17, v5, v9
-; GISEL-NEXT:    v_mul_hi_u32 v9, v7, v9
-; GISEL-NEXT:    v_add_i32_e64 v11, s[4:5], v11, v12
-; GISEL-NEXT:    v_cndmask_b32_e64 v12, 0, 1, s[4:5]
-; GISEL-NEXT:    v_add_i32_e64 v10, s[4:5], v15, v10
-; GISEL-NEXT:    v_cndmask_b32_e64 v15, 0, 1, s[4:5]
-; GISEL-NEXT:    v_add_i32_e64 v13, s[4:5], v14, v13
-; GISEL-NEXT:    v_cndmask_b32_e64 v14, 0, 1, s[4:5]
-; GISEL-NEXT:    v_add_i32_e64 v11, s[4:5], v11, v16
-; GISEL-NEXT:    v_cndmask_b32_e64 v11, 0, 1, s[4:5]
-; GISEL-NEXT:    v_add_i32_e64 v10, s[4:5], v10, v18
-; GISEL-NEXT:    v_cndmask_b32_e64 v16, 0, 1, s[4:5]
-; GISEL-NEXT:    v_cndmask_b32_e64 v18, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v13, vcc, v13, v17
-; GISEL-NEXT:    v_cndmask_b32_e64 v17, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v11, vcc, v12, v11
-; GISEL-NEXT:    v_add_i32_e32 v12, vcc, v15, v16
-; GISEL-NEXT:    v_add_i32_e32 v15, vcc, v19, v18
-; GISEL-NEXT:    v_add_i32_e32 v14, vcc, v14, v17
-; GISEL-NEXT:    v_add_i32_e32 v10, vcc, v10, v11
-; GISEL-NEXT:    v_cndmask_b32_e64 v11, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v13, vcc, v13, v15
-; GISEL-NEXT:    v_cndmask_b32_e64 v15, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v11, vcc, v12, v11
-; GISEL-NEXT:    v_add_i32_e32 v12, vcc, v14, v15
-; GISEL-NEXT:    v_add_i32_e32 v8, vcc, v8, v11
-; GISEL-NEXT:    v_add_i32_e32 v9, vcc, v9, v12
-; GISEL-NEXT:    v_add_i32_e32 v4, vcc, v4, v10
-; GISEL-NEXT:    v_addc_u32_e64 v10, s[4:5], v6, v8, vcc
-; GISEL-NEXT:    v_add_i32_e64 v6, s[4:5], v6, v8
-; GISEL-NEXT:    v_mul_lo_u32 v8, s11, v4
-; GISEL-NEXT:    v_mul_lo_u32 v11, s6, v4
-; GISEL-NEXT:    v_mul_hi_u32 v12, s11, v4
-; GISEL-NEXT:    v_add_i32_e64 v5, s[4:5], v5, v13
-; GISEL-NEXT:    v_addc_u32_e64 v13, s[6:7], v7, v9, s[4:5]
-; GISEL-NEXT:    v_add_i32_e64 v7, s[6:7], v7, v9
-; GISEL-NEXT:    v_mul_lo_u32 v9, s8, v5
-; GISEL-NEXT:    v_mul_lo_u32 v14, s9, v5
-; GISEL-NEXT:    v_mul_hi_u32 v15, s8, v5
-; GISEL-NEXT:    v_mul_lo_u32 v16, s11, v10
-; GISEL-NEXT:    v_mul_lo_u32 v17, v10, v8
-; GISEL-NEXT:    v_mul_hi_u32 v18, v4, v8
-; GISEL-NEXT:    v_mul_hi_u32 v8, v10, v8
-; GISEL-NEXT:    v_mul_lo_u32 v19, s8, v13
-; GISEL-NEXT:    v_add_i32_e64 v11, s[6:7], v11, v16
-; GISEL-NEXT:    v_mul_lo_u32 v16, v13, v9
-; GISEL-NEXT:    v_add_i32_e64 v14, s[6:7], v14, v19
-; GISEL-NEXT:    v_mul_hi_u32 v19, v5, v9
-; GISEL-NEXT:    v_mul_hi_u32 v9, v13, v9
-; GISEL-NEXT:    v_add_i32_e64 v11, s[6:7], v11, v12
-; GISEL-NEXT:    v_add_i32_e64 v12, s[6:7], v14, v15
-; GISEL-NEXT:    v_mul_lo_u32 v14, v4, v11
-; GISEL-NEXT:    v_mul_lo_u32 v15, v5, v12
-; GISEL-NEXT:    v_add_i32_e64 v15, s[6:7], v16, v15
-; GISEL-NEXT:    v_cndmask_b32_e64 v16, 0, 1, s[6:7]
-; GISEL-NEXT:    v_add_i32_e64 v15, s[6:7], v15, v19
-; GISEL-NEXT:    v_mul_lo_u32 v15, v10, v11
-; GISEL-NEXT:    v_mul_hi_u32 v19, v4, v11
-; GISEL-NEXT:    v_mul_hi_u32 v10, v10, v11
-; GISEL-NEXT:    v_mul_lo_u32 v11, v13, v12
-; GISEL-NEXT:    v_mul_hi_u32 v13, v13, v12
-; GISEL-NEXT:    v_mul_hi_u32 v12, v5, v12
-; GISEL-NEXT:    v_add_i32_e64 v14, s[8:9], v17, v14
-; GISEL-NEXT:    v_cndmask_b32_e64 v17, 0, 1, s[8:9]
-; GISEL-NEXT:    v_add_i32_e64 v8, s[8:9], v15, v8
-; GISEL-NEXT:    v_cndmask_b32_e64 v15, 0, 1, s[8:9]
-; GISEL-NEXT:    v_add_i32_e64 v9, s[8:9], v11, v9
-; GISEL-NEXT:    v_cndmask_b32_e64 v11, 0, 1, s[8:9]
-; GISEL-NEXT:    v_add_i32_e64 v14, s[8:9], v14, v18
-; GISEL-NEXT:    v_cndmask_b32_e64 v14, 0, 1, s[8:9]
-; GISEL-NEXT:    v_add_i32_e64 v8, s[8:9], v8, v19
-; GISEL-NEXT:    v_cndmask_b32_e64 v18, 0, 1, s[8:9]
-; GISEL-NEXT:    v_cndmask_b32_e64 v19, 0, 1, s[6:7]
-; GISEL-NEXT:    v_add_i32_e64 v9, s[6:7], v9, v12
-; GISEL-NEXT:    v_cndmask_b32_e64 v12, 0, 1, s[6:7]
-; GISEL-NEXT:    v_add_i32_e64 v14, s[6:7], v17, v14
-; GISEL-NEXT:    v_add_i32_e64 v15, s[6:7], v15, v18
-; GISEL-NEXT:    v_add_i32_e64 v16, s[6:7], v16, v19
-; GISEL-NEXT:    v_add_i32_e64 v11, s[6:7], v11, v12
-; GISEL-NEXT:    v_add_i32_e64 v8, s[6:7], v8, v14
-; GISEL-NEXT:    v_cndmask_b32_e64 v12, 0, 1, s[6:7]
-; GISEL-NEXT:    v_add_i32_e64 v9, s[6:7], v9, v16
-; GISEL-NEXT:    v_cndmask_b32_e64 v14, 0, 1, s[6:7]
-; GISEL-NEXT:    v_add_i32_e64 v12, s[6:7], v15, v12
-; GISEL-NEXT:    v_add_i32_e64 v11, s[6:7], v11, v14
-; GISEL-NEXT:    v_add_i32_e64 v10, s[6:7], v10, v12
-; GISEL-NEXT:    v_add_i32_e64 v11, s[6:7], v13, v11
-; GISEL-NEXT:    v_addc_u32_e32 v6, vcc, v6, v10, vcc
-; GISEL-NEXT:    v_addc_u32_e64 v7, vcc, v7, v11, s[4:5]
-; GISEL-NEXT:    v_add_i32_e32 v4, vcc, v4, v8
-; GISEL-NEXT:    v_addc_u32_e32 v6, vcc, 0, v6, vcc
-; GISEL-NEXT:    v_mul_lo_u32 v8, v3, v4
-; GISEL-NEXT:    v_mul_hi_u32 v10, v2, v4
-; GISEL-NEXT:    v_mul_hi_u32 v4, v3, v4
-; GISEL-NEXT:    v_add_i32_e32 v5, vcc, v5, v9
-; GISEL-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GISEL-NEXT:    v_mul_lo_u32 v9, v1, v5
-; GISEL-NEXT:    v_mul_hi_u32 v11, v0, v5
-; GISEL-NEXT:    v_mul_hi_u32 v5, v1, v5
-; GISEL-NEXT:    v_mul_lo_u32 v12, v2, v6
-; GISEL-NEXT:    v_mul_lo_u32 v13, v3, v6
-; GISEL-NEXT:    v_mul_hi_u32 v14, v2, v6
-; GISEL-NEXT:    v_mul_hi_u32 v6, v3, v6
-; GISEL-NEXT:    v_mul_lo_u32 v15, v0, v7
-; GISEL-NEXT:    v_mul_lo_u32 v16, v1, v7
-; GISEL-NEXT:    v_mul_hi_u32 v17, v0, v7
-; GISEL-NEXT:    v_mul_hi_u32 v7, v1, v7
-; GISEL-NEXT:    v_add_i32_e32 v8, vcc, v8, v12
-; GISEL-NEXT:    v_cndmask_b32_e64 v12, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v4, vcc, v13, v4
-; GISEL-NEXT:    v_cndmask_b32_e64 v13, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v9, vcc, v9, v15
-; GISEL-NEXT:    v_cndmask_b32_e64 v15, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v5, vcc, v16, v5
-; GISEL-NEXT:    v_cndmask_b32_e64 v16, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v8, vcc, v8, v10
-; GISEL-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v4, vcc, v4, v14
-; GISEL-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v9, vcc, v9, v11
-; GISEL-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v5, vcc, v5, v17
-; GISEL-NEXT:    v_cndmask_b32_e64 v11, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v8, vcc, v12, v8
-; GISEL-NEXT:    v_add_i32_e32 v10, vcc, v13, v10
-; GISEL-NEXT:    v_add_i32_e32 v9, vcc, v15, v9
-; GISEL-NEXT:    v_add_i32_e32 v11, vcc, v16, v11
-; GISEL-NEXT:    v_add_i32_e32 v4, vcc, v4, v8
-; GISEL-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v5, vcc, v5, v9
-; GISEL-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
-; GISEL-NEXT:    v_add_i32_e32 v8, vcc, v10, v8
-; GISEL-NEXT:    v_mul_lo_u32 v10, s10, v4
-; GISEL-NEXT:    v_mul_lo_u32 v12, 0, v4
-; GISEL-NEXT:    v_mul_hi_u32 v4, s10, v4
-; GISEL-NEXT:    v_add_i32_e32 v9, vcc, v11, v9
-; GISEL-NEXT:    v_mul_lo_u32 v11, s10, v5
-; GISEL-NEXT:    v_mul_lo_u32 v13, 0, v5
-; GISEL-NEXT:    v_mul_hi_u32 v5, s10, v5
-; GISEL-NEXT:    v_add_i32_e32 v6, vcc, v6, v8
-; GISEL-NEXT:    v_add_i32_e32 v7, vcc, v7, v9
-; GISEL-NEXT:    v_mul_lo_u32 v6, s10, v6
-; GISEL-NEXT:    v_mul_lo_u32 v7, s10, v7
-; GISEL-NEXT:    v_add_i32_e32 v6, vcc, v12, v6
-; GISEL-NEXT:    v_add_i32_e32 v7, vcc, v13, v7
-; GISEL-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
-; GISEL-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
-; GISEL-NEXT:    v_sub_i32_e32 v2, vcc, v2, v10
-; GISEL-NEXT:    v_subb_u32_e64 v6, s[4:5], v3, v4, vcc
-; GISEL-NEXT:    v_sub_i32_e64 v3, s[4:5], v3, v4
-; GISEL-NEXT:    v_cmp_le_u32_e64 s[4:5], s10, v2
-; GISEL-NEXT:    v_cndmask_b32_e64 v4, 0, -1, s[4:5]
-; GISEL-NEXT:    v_sub_i32_e64 v0, s[4:5], v0, v11
-; GISEL-NEXT:    v_subb_u32_e64 v7, s[6:7], v1, v5, s[4:5]
-; GISEL-NEXT:    v_sub_i32_e64 v1, s[6:7], v1, v5
-; GISEL-NEXT:    v_cmp_le_u32_e64 s[6:7], s10, v0
-; GISEL-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[6:7]
-; GISEL-NEXT:    v_cmp_le_u32_e64 s[6:7], 0, v6
-; GISEL-NEXT:    v_cndmask_b32_e64 v8, 0, -1, s[6:7]
-; GISEL-NEXT:    v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
-; GISEL-NEXT:    v_cmp_le_u32_e32 vcc, 0, v7
-; GISEL-NEXT:    v_cndmask_b32_e64 v9, 0, -1, vcc
-; GISEL-NEXT:    v_subbrev_u32_e64 v1, vcc, 0, v1, s[4:5]
-; GISEL-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v6
-; GISEL-NEXT:    v_cndmask_b32_e32 v4, v8, v4, vcc
-; GISEL-NEXT:    v_subrev_i32_e32 v8, vcc, s10, v2
-; GISEL-NEXT:    v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
-; GISEL-NEXT:    v_cmp_le_u32_e32 vcc, s10, v8
-; GISEL-NEXT:    v_cndmask_b32_e64 v10, 0, -1, vcc
-; GISEL-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v7
-; GISEL-NEXT:    v_cndmask_b32_e32 v5, v9, v5, vcc
-; GISEL-NEXT:    v_subrev_i32_e32 v9, vcc, s10, v0
-; GISEL-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
-; GISEL-NEXT:    v_cmp_le_u32_e32 vcc, s10, v9
-; GISEL-NEXT:    v_cndmask_b32_e64 v11, 0, -1, vcc
-; GISEL-NEXT:    v_cmp_le_u32_e32 vcc, 0, v3
-; GISEL-NEXT:    v_cndmask_b32_e64 v12, 0, -1, vcc
-; GISEL-NEXT:    v_subrev_i32_e32 v13, vcc, s10, v8
-; GISEL-NEXT:    v_subbrev_u32_e32 v14, vcc, 0, v3, vcc
-; GISEL-NEXT:    v_cmp_le_u32_e32 vcc, 0, v1
-; GISEL-NEXT:    v_cndmask_b32_e64 v15, 0, -1, vcc
-; GISEL-NEXT:    v_subrev_i32_e32 v16, vcc, s10, v9
-; GISEL-NEXT:    v_subbrev_u32_e32 v17, vcc, 0, v1, vcc
-; GISEL-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
-; GISEL-NEXT:    v_cndmask_b32_e32 v10, v12, v10, vcc
-; GISEL-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
-; GISEL-NEXT:    v_cndmask_b32_e32 v11, v15, v11, vcc
-; GISEL-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v10
-; GISEL-NEXT:    v_cndmask_b32_e32 v8, v8, v13, vcc
-; GISEL-NEXT:    v_cmp_ne_u32_e64 s[4:5], 0, v11
-; GISEL-NEXT:    v_cndmask_b32_e64 v9, v9, v16, s[4:5]
-; GISEL-NEXT:    v_cndmask_b32_e32 v3, v3, v14, vcc
-; GISEL-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
-; GISEL-NEXT:    v_cndmask_b32_e32 v2, v2, v8, vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v1, v1, v17, s[4:5]
-; GISEL-NEXT:    v_cmp_ne_u32_e64 s[4:5], 0, v5
-; GISEL-NEXT:    v_cndmask_b32_e64 v0, v0, v9, s[4:5]
-; GISEL-NEXT:    v_cndmask_b32_e64 v1, v7, v1, s[4:5]
-; GISEL-NEXT:    v_cndmask_b32_e32 v3, v6, v3, vcc
-; GISEL-NEXT:    s_setpc_b64 s[30:31]
-;
-; CGP-LABEL: v_urem_v2i64_pow2k_denom:
-; CGP:       ; %bb.0:
-; CGP-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CGP-NEXT:    s_movk_i32 s4, 0x1000
-; CGP-NEXT:    s_add_u32 s5, s4, -1
-; CGP-NEXT:    s_cselect_b32 s6, 1, 0
-; CGP-NEXT:    s_and_b32 s6, s6, 1
-; CGP-NEXT:    s_cmp_lg_u32 s6, 0
-; CGP-NEXT:    s_addc_u32 s6, 0, -1
-; CGP-NEXT:    s_add_u32 s4, s4, -1
-; CGP-NEXT:    s_cselect_b32 s7, 1, 0
-; CGP-NEXT:    v_and_b32_e32 v0, s5, v0
-; CGP-NEXT:    s_and_b32 s5, s7, 1
-; CGP-NEXT:    v_and_b32_e32 v1, s6, v1
-; CGP-NEXT:    s_cmp_lg_u32 s5, 0
-; CGP-NEXT:    s_addc_u32 s5, 0, -1
-; CGP-NEXT:    v_and_b32_e32 v2, s4, v2
-; CGP-NEXT:    v_and_b32_e32 v3, s5, v3
-; CGP-NEXT:    s_setpc_b64 s[30:31]
+; CHECK-LABEL: v_urem_v2i64_pow2k_denom:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_movk_i32 s4, 0x1000
+; CHECK-NEXT:    s_add_u32 s5, s4, -1
+; CHECK-NEXT:    s_cselect_b32 s6, 1, 0
+; CHECK-NEXT:    s_and_b32 s6, s6, 1
+; CHECK-NEXT:    s_cmp_lg_u32 s6, 0
+; CHECK-NEXT:    s_addc_u32 s6, 0, -1
+; CHECK-NEXT:    s_add_u32 s4, s4, -1
+; CHECK-NEXT:    s_cselect_b32 s7, 1, 0
+; CHECK-NEXT:    v_and_b32_e32 v0, s5, v0
+; CHECK-NEXT:    s_and_b32 s5, s7, 1
+; CHECK-NEXT:    v_and_b32_e32 v1, s6, v1
+; CHECK-NEXT:    s_cmp_lg_u32 s5, 0
+; CHECK-NEXT:    s_addc_u32 s5, 0, -1
+; CHECK-NEXT:    v_and_b32_e32 v2, s4, v2
+; CHECK-NEXT:    v_and_b32_e32 v3, s5, v3
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %result = urem <2 x i64> %num, <i64 4096, i64 4096>
   ret <2 x i64> %result
 }

diff  --git a/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt b/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt
index 4b278217f738..5d43e4c8f88c 100644
--- a/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt
+++ b/llvm/unittests/CodeGen/GlobalISel/CMakeLists.txt
@@ -20,5 +20,6 @@ add_llvm_unittest(GlobalISelTests
   GISelMITest.cpp
   PatternMatchTest.cpp
   KnownBitsTest.cpp
+  KnownBitsVectorTest.cpp
   GISelUtilsTest.cpp
   )

diff  --git a/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp b/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp
index eacad3469e5e..22e21600df05 100644
--- a/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp
@@ -259,7 +259,106 @@ TEST_F(AArch64GISelMITest, TestKnownBitsPtrToIntViceVersa) {
   EXPECT_EQ(256u, Res.One.getZExtValue());
   EXPECT_EQ(0xfffffeffu, Res.Zero.getZExtValue());
 }
+
+TEST_F(AArch64GISelMITest, TestKnownBitsAND) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(s8) = G_LOAD %ptr(p0) :: (load 1)
+   %mask0:_(s8) = G_CONSTANT i8 52
+   %mask1:_(s8) = G_CONSTANT i8 10
+   %tmp0:_(s8) = G_AND %unknown, %mask0
+   %val0:_(s8) = G_OR %tmp0, %mask1
+   %mask2:_(s8) = G_CONSTANT i8 32
+   %mask3:_(s8) = G_CONSTANT i8 24
+   %tmp1:_(s8) = G_AND %unknown, %mask2
+   %val1:_(s8) = G_OR %tmp1, %mask3
+   %and:_(s8) = G_AND %val0, %val1
+   %copy_and:_(s8) = COPY %and
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  //   00??1?10
+  // & 00?11000
+  // = 00??1000
+  EXPECT_EQ(0x08u, Res.One.getZExtValue());
+  EXPECT_EQ(0xC7u, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsOR) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(s8) = G_LOAD %ptr(p0) :: (load 1)
+   %mask0:_(s8) = G_CONSTANT i8 52
+   %mask1:_(s8) = G_CONSTANT i8 10
+   %tmp0:_(s8) = G_AND %unknown, %mask0
+   %val0:_(s8) = G_OR %tmp0, %mask1
+   %mask2:_(s8) = G_CONSTANT i8 32
+   %mask3:_(s8) = G_CONSTANT i8 24
+   %tmp1:_(s8) = G_AND %unknown, %mask2
+   %val1:_(s8) = G_OR %tmp1, %mask3
+   %or:_(s8) = G_OR %val0, %val1
+   %copy_or:_(s8) = COPY %or
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  //   00??1?10
+  // | 00?11000
+  // = 00?11?10
+  EXPECT_EQ(0x1Au, Res.One.getZExtValue());
+  EXPECT_EQ(0xC1u, Res.Zero.getZExtValue());
+}
+
 TEST_F(AArch64GISelMITest, TestKnownBitsXOR) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(s8) = G_LOAD %ptr(p0) :: (load 1)
+   %mask0:_(s8) = G_CONSTANT i8 52
+   %mask1:_(s8) = G_CONSTANT i8 10
+   %tmp0:_(s8) = G_AND %unknown, %mask0
+   %val0:_(s8) = G_OR %tmp0, %mask1
+   %mask2:_(s8) = G_CONSTANT i8 32
+   %mask3:_(s8) = G_CONSTANT i8 24
+   %tmp1:_(s8) = G_AND %unknown, %mask2
+   %val1:_(s8) = G_OR %tmp1, %mask3
+   %xor:_(s8) = G_XOR %val0, %val1
+   %copy_xor:_(s8) = COPY %xor
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  // Xor KnowBits does not track if we are doing xor of unknown bit with itself
+  // or negated itself.
+  //   00??1?10
+  // ^ 00?11000
+  // = 00??0?10
+  EXPECT_EQ(0x02u, Res.One.getZExtValue());
+  EXPECT_EQ(0xC9u, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsXORConstant) {
   StringRef MIRString = "  %3:_(s8) = G_CONSTANT i8 4\n"
                         "  %4:_(s8) = G_CONSTANT i8 7\n"
                         "  %5:_(s8) = G_XOR %3, %4\n"
@@ -276,6 +375,299 @@ TEST_F(AArch64GISelMITest, TestKnownBitsXOR) {
   EXPECT_EQ(252u, Res.Zero.getZExtValue());
 }
 
+TEST_F(AArch64GISelMITest, TestKnownBitsASHR) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(s8) = G_LOAD %ptr(p0) :: (load 1)
+   %mask0:_(s8) = G_CONSTANT i8 38
+   %mask1:_(s8) = G_CONSTANT i8 202
+   %tmp0:_(s8) = G_AND %unknown, %mask0
+   %val0:_(s8) = G_OR %tmp0, %mask1
+   %cst0:_(s8) = G_CONSTANT i8 2
+   %ashr0:_(s8) = G_ASHR %val0, %cst0
+   %copy_ashr0:_(s8) = COPY %ashr0
+
+   %mask2:_(s8) = G_CONSTANT i8 204
+   %mask3:_(s8) = G_CONSTANT i8 18
+   %tmp1:_(s8) = G_AND %unknown, %mask2
+   %val1:_(s8) = G_OR %tmp1, %mask3
+   %ashr1:_(s8) = G_ASHR %val1, %cst0
+   %copy_ashr1:_(s8) = COPY %ashr1
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg0 = Copies[Copies.size() - 2];
+  MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
+  Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res0 = Info.getKnownBits(SrcReg0);
+  //   11?01??0 >> 2
+  // = 1111?01?
+  EXPECT_EQ(0xF2u, Res0.One.getZExtValue());
+  EXPECT_EQ(0x04u, Res0.Zero.getZExtValue());
+
+  Register CopyReg1 = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy1 = MRI->getVRegDef(CopyReg1);
+  Register SrcReg1 = FinalCopy1->getOperand(1).getReg();
+  KnownBits Res1 = Info.getKnownBits(SrcReg1);
+  //   ??01??10 >> 2
+  // = ????01??
+  EXPECT_EQ(0x04u, Res1.One.getZExtValue());
+  EXPECT_EQ(0x08u, Res1.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsLSHR) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(s8) = G_LOAD %ptr(p0) :: (load 1)
+   %mask0:_(s8) = G_CONSTANT i8 38
+   %mask1:_(s8) = G_CONSTANT i8 202
+   %tmp0:_(s8) = G_AND %unknown, %mask0
+   %val0:_(s8) = G_OR %tmp0, %mask1
+   %cst0:_(s8) = G_CONSTANT i8 2
+   %lshr0:_(s8) = G_LSHR %val0, %cst0
+   %copy_lshr0:_(s8) = COPY %lshr0
+
+   %mask2:_(s8) = G_CONSTANT i8 204
+   %mask3:_(s8) = G_CONSTANT i8 18
+   %tmp1:_(s8) = G_AND %unknown, %mask2
+   %val1:_(s8) = G_OR %tmp1, %mask3
+   %lshr1:_(s8) = G_LSHR %val1, %cst0
+   %copy_lshr1:_(s8) = COPY %lshr1
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg0 = Copies[Copies.size() - 2];
+  MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
+  Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res0 = Info.getKnownBits(SrcReg0);
+  //   11?01??0 >> 2
+  // = 0011?01?
+  EXPECT_EQ(0x32u, Res0.One.getZExtValue());
+  EXPECT_EQ(0xC4u, Res0.Zero.getZExtValue());
+
+  Register CopyReg1 = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy1 = MRI->getVRegDef(CopyReg1);
+  Register SrcReg1 = FinalCopy1->getOperand(1).getReg();
+  KnownBits Res1 = Info.getKnownBits(SrcReg1);
+  //   ??01??10 >> 2
+  // = 00??01??
+  EXPECT_EQ(0x04u, Res1.One.getZExtValue());
+  EXPECT_EQ(0xC8u, Res1.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsSHL) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(s8) = G_LOAD %ptr(p0) :: (load 1)
+   %mask0:_(s8) = G_CONSTANT i8 51
+   %mask1:_(s8) = G_CONSTANT i8 72
+   %tmp:_(s8) = G_AND %unknown, %mask0
+   %val:_(s8) = G_OR %tmp, %mask1
+   %cst:_(s8) = G_CONSTANT i8 3
+   %shl:_(s8) = G_SHL %val, %cst
+   %copy_shl:_(s8) = COPY %shl
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  //   01??10?? << 3
+  // = ?10??000
+  EXPECT_EQ(0x40u, Res.One.getZExtValue());
+  EXPECT_EQ(0x27u, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsADD) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(s16) = G_LOAD %ptr(p0) :: (load 2)
+   %mask0:_(s16) = G_CONSTANT i16 4642
+   %mask1:_(s16) = G_CONSTANT i16 9536
+   %tmp0:_(s16) = G_AND %unknown, %mask0
+   %val0:_(s16) = G_OR %tmp0, %mask1
+   %mask2:_(s16) = G_CONSTANT i16 4096
+   %mask3:_(s16) = G_CONSTANT i16 371
+   %tmp1:_(s16) = G_AND %unknown, %mask2
+   %val1:_(s16) = G_OR %tmp1, %mask3
+   %add:_(s16) = G_ADD %val0, %val1
+   %copy_add:_(s16) = COPY %add
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  // Add KnowBits works out known carry bits first and then calculates result.
+  //   001?01?101?000?0
+  // + 000?000101110011
+  // = 0??????01??10??1
+  EXPECT_EQ(0x0091u, Res.One.getZExtValue());
+  EXPECT_EQ(0x8108u, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsSUB) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(s16) = G_LOAD %ptr(p0) :: (load 2)
+   %mask0:_(s16) = G_CONSTANT i16 4642
+   %mask1:_(s16) = G_CONSTANT i16 9536
+   %tmp0:_(s16) = G_AND %unknown, %mask0
+   %val0:_(s16) = G_OR %tmp0, %mask1
+   %mask2:_(s16) = G_CONSTANT i16 4096
+   %mask3:_(s16) = G_CONSTANT i16 371
+   %tmp1:_(s16) = G_AND %unknown, %mask2
+   %val1:_(s16) = G_OR %tmp1, %mask3
+   %sub:_(s16) = G_SUB %val0, %val1
+   %copy_sub:_(s16) = COPY %sub
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  // Sub KnowBits for LHS - RHS use Add KnownBits for LHS + ~RHS + 1.
+  EXPECT_EQ(0x01CDu, Res.One.getZExtValue());
+  EXPECT_EQ(0xC810u, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsMUL) {
+  StringRef MIRString = R"(
+   %ptr0:_(p0) = G_IMPLICIT_DEF
+   %load0:_(s16) = G_LOAD %ptr0(p0) :: (load 2)
+   %mask0:_(s16) = G_CONSTANT i16 4
+   %mask1:_(s16) = G_CONSTANT i16 18
+   %tmp:_(s16) = G_AND %load0, %mask0
+   %val0:_(s16) = G_OR %tmp, %mask1
+   %cst:_(s16) = G_CONSTANT i16 12
+   %mul:_(s16) = G_MUL %val0, %cst
+   %copy_mul:_(s16) = COPY %mul
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  // Mul KnowBits are conservatively correct, but not guaranteed to be precise.
+  // Precise for trailing bits up to the first unknown bit.
+  // 00010?10 * 00001100 =
+  //          00010?1000
+  //  +      00010?10000
+  //  = 0000000010??1000
+  // KB 0000000?????1000
+  EXPECT_EQ(0x0008u, Res.One.getZExtValue());
+  EXPECT_EQ(0xFE07u, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsICMP) {
+  StringRef MIRString = R"(
+   %cst0:_(s32) = G_CONSTANT i32 0
+   %cst1:_(s32) = G_CONSTANT i32 1
+   %icmp:_(s32) = G_ICMP intpred(ne), %cst0, %cst1
+   %copy_icmp:_(s32) = COPY %icmp
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  // For targets that use 0 or 1 as icmp result in large register set high bits
+  // to 0, does not analyze operands/compare predicate.
+  EXPECT_EQ(0x00000000u, Res.One.getZExtValue());
+  EXPECT_EQ(0xFFFFFFFEu, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsFCMP) {
+  StringRef MIRString = R"(
+   %cst0:_(s32) = G_FCONSTANT float 0.0
+   %cst1:_(s32) = G_FCONSTANT float 1.0
+   %fcmp:_(s32) = G_FCMP floatpred(one), %cst0, %cst1
+   %copy_fcmp:_(s32) = COPY %fcmp
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  // For targets that use 0 or 1 as fcmp result in large register set high bits
+  // to 0, does not analyze operands/compare predicate.
+  EXPECT_EQ(0x00000000u, Res.One.getZExtValue());
+  EXPECT_EQ(0xFFFFFFFEu, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsSelect) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(s8) = G_LOAD %ptr(p0) :: (load 1)
+   %mask0:_(s8) = G_CONSTANT i8 24
+   %mask1:_(s8) = G_CONSTANT i8 224
+   %tmp0:_(s8) = G_AND %unknown, %mask0
+   %val0:_(s8) = G_OR %tmp0, %mask1
+   %mask2:_(s8) = G_CONSTANT i8 146
+   %mask3:_(s8) = G_CONSTANT i8 36
+   %tmp1:_(s8) = G_AND %unknown, %mask2
+   %val1:_(s8) = G_OR %tmp1, %mask3
+   %cond:_(s1) = G_CONSTANT i1 false
+   %select:_(s8) = G_SELECT %cond, %val0, %val1
+   %copy_select:_(s8) = COPY %select
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  // Select KnownBits takes common bits of LHS and RHS, does not analyze
+  // condition operand.
+  //        111??000
+  // select ?01?01?0
+  //      = ??1????0
+  EXPECT_EQ(0x20u, Res.One.getZExtValue());
+  EXPECT_EQ(0x01u, Res.Zero.getZExtValue());
+}
+
 TEST_F(AArch64GISelMITest, TestKnownBits) {
 
   StringRef MIR = "  %3:_(s32) = G_TRUNC %0\n"
@@ -995,6 +1387,63 @@ TEST_F(AArch64GISelMITest, TestKnownBitsBSwapBitReverse) {
   EXPECT_EQ(~TestVal, BitReverseKnown.Zero.getZExtValue());
 }
 
+TEST_F(AArch64GISelMITest, TestKnownBitsUMAX) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(s8) = G_LOAD %ptr(p0) :: (load 1)
+   %mask0:_(s8) = G_CONSTANT i8 10
+   %mask1:_(s8) = G_CONSTANT i8 1
+   %tmp0:_(s8) = G_AND %unknown, %mask0
+   %val0:_(s8) = G_OR %tmp0, %mask1
+   %mask2:_(s8) = G_CONSTANT i8 3
+   %mask3:_(s8) = G_CONSTANT i8 12
+   %tmp1:_(s8) = G_AND %unknown, %mask2
+   %val1:_(s8) = G_OR %tmp1, %mask3
+   %umax0:_(s8) = G_UMAX %val0, %val1
+   %copy_umax0:_(s8) = COPY %umax0
+
+   %mask4:_(s8) = G_CONSTANT i8 14
+   %mask5:_(s8) = G_CONSTANT i8 2
+   %tmp3:_(s8) = G_AND %unknown, %mask4
+   %val3:_(s8) = G_OR %tmp3, %mask5
+   %mask6:_(s8) = G_CONSTANT i8 4
+   %mask7:_(s8) = G_CONSTANT i8 11
+   %tmp4:_(s8) = G_AND %unknown, %mask6
+   %val4:_(s8) = G_OR %tmp4, %mask7
+   %umax1:_(s8) = G_UMAX %val3, %val4
+   %copy_umax1:_(s8) = COPY %umax1
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg0 = Copies[Copies.size() - 2];
+  MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
+  Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  // Compares min/max of LHS and RHS, min uses 0 for unknown bits, max uses 1.
+  // If min(LHS) >= max(RHS) returns KnownBits for LHS, similar for RHS. If this
+  // fails tries to calculate individual bits: common bits for both operands and
+  // a few leading bits in some cases.
+  //      0000?0?1
+  // umax 000011??
+  //    = 000011??
+  KnownBits Res0 = Info.getKnownBits(SrcReg0);
+  EXPECT_EQ(0x0Cu, Res0.One.getZExtValue());
+  EXPECT_EQ(0xF0u, Res0.Zero.getZExtValue());
+
+  Register CopyReg1 = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy1 = MRI->getVRegDef(CopyReg1);
+  Register SrcReg1 = FinalCopy1->getOperand(1).getReg();
+  KnownBits Res1 = Info.getKnownBits(SrcReg1);
+  //      0000??10
+  // umax 00001?11
+  //    = 00001?1?
+  EXPECT_EQ(0x0Au, Res1.One.getZExtValue());
+  EXPECT_EQ(0xF0u, Res1.Zero.getZExtValue());
+}
+
 TEST_F(AArch64GISelMITest, TestKnownBitsUMax) {
   StringRef MIRString = R"(
    %val:_(s32) = COPY $w0
@@ -1019,6 +1468,110 @@ TEST_F(AArch64GISelMITest, TestKnownBitsUMax) {
   EXPECT_EQ(0xffffffffffffff00, KnownUmax.One.getZExtValue());
 }
 
+TEST_F(AArch64GISelMITest, TestKnownBitsUMIN) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(s8) = G_LOAD %ptr(p0) :: (load 1)
+   %mask0:_(s8) = G_CONSTANT i8 10
+   %mask1:_(s8) = G_CONSTANT i8 1
+   %tmp0:_(s8) = G_AND %unknown, %mask0
+   %val0:_(s8) = G_OR %tmp0, %mask1
+   %mask2:_(s8) = G_CONSTANT i8 3
+   %mask3:_(s8) = G_CONSTANT i8 12
+   %tmp1:_(s8) = G_AND %unknown, %mask2
+   %val1:_(s8) = G_OR %tmp1, %mask3
+   %umin:_(s8) = G_UMIN %val0, %val1
+   %copy_umin:_(s8) = COPY %umin
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg0 = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
+  Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res0 = Info.getKnownBits(SrcReg0);
+  // Flips the range of operands: [0, 0xFFFFFFFF] <-> [0xFFFFFFFF, 0],
+  // uses umax and flips result back.
+  //      0000?0?1
+  // umin 000011??
+  //    = 0000?0?1
+  EXPECT_EQ(0x01u, Res0.One.getZExtValue());
+  EXPECT_EQ(0xF4u, Res0.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsSMAX) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(s8) = G_LOAD %ptr(p0) :: (load 1)
+   %mask0:_(s8) = G_CONSTANT i8 128
+   %mask1:_(s8) = G_CONSTANT i8 64
+   %tmp0:_(s8) = G_AND %unknown, %mask0
+   %val0:_(s8) = G_OR %tmp0, %mask1
+   %mask2:_(s8) = G_CONSTANT i8 1
+   %mask3:_(s8) = G_CONSTANT i8 128
+   %tmp1:_(s8) = G_AND %unknown, %mask2
+   %val1:_(s8) = G_OR %tmp1, %mask3
+   %smax:_(s8) = G_SMAX %val0, %val1
+   %copy_smax:_(s8) = COPY %smax
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg0 = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
+  Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res0 = Info.getKnownBits(SrcReg0);
+  // Flips the range of operands: [-0x80000000, 0x7FFFFFFF] <-> [0, 0xFFFFFFFF],
+  // uses umax and flips result back.
+  // RHS is negative, LHS is either positive or negative with smaller abs value.
+  //      ?1000000
+  // smax 1000000?
+  //    = ?1000000
+  EXPECT_EQ(0x40u, Res0.One.getZExtValue());
+  EXPECT_EQ(0x3Fu, Res0.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsSMIN) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(s8) = G_LOAD %ptr(p0) :: (load 1)
+   %mask0:_(s8) = G_CONSTANT i8 128
+   %mask1:_(s8) = G_CONSTANT i8 64
+   %tmp0:_(s8) = G_AND %unknown, %mask0
+   %val0:_(s8) = G_OR %tmp0, %mask1
+   %mask2:_(s8) = G_CONSTANT i8 1
+   %mask3:_(s8) = G_CONSTANT i8 128
+   %tmp1:_(s8) = G_AND %unknown, %mask2
+   %val1:_(s8) = G_OR %tmp1, %mask3
+   %smin:_(s8) = G_SMIN %val0, %val1
+   %copy_smin:_(s8) = COPY %smin
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg0 = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
+  Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res0 = Info.getKnownBits(SrcReg0);
+  // Flips the range of operands: [-0x80000000, 0x7FFFFFFF] <-> [0xFFFFFFFF, 0],
+  // uses umax and flips result back.
+  // RHS is negative, LHS is either positive or negative with smaller abs value.
+  //      ?1000000
+  // smin 1000000?
+  //    = 1000000?
+  EXPECT_EQ(0x80u, Res0.One.getZExtValue());
+  EXPECT_EQ(0x7Eu, Res0.Zero.getZExtValue());
+}
+
 TEST_F(AArch64GISelMITest, TestInvalidQueries) {
   StringRef MIRString = R"(
    %src:_(s32) = COPY $w0

diff  --git a/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp b/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp
new file mode 100644
index 000000000000..9cb8a365ae99
--- /dev/null
+++ b/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp
@@ -0,0 +1,1527 @@
+//===- KnownBitsTest.cpp -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "GISelMITest.h"
+#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+
+TEST_F(AArch64GISelMITest, TestKnownBitsBuildVector) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(s8) = G_LOAD %ptr(p0) :: (load 1)
+   %mask0:_(s8) = G_CONSTANT i8 24
+   %mask1:_(s8) = G_CONSTANT i8 224
+   %tmp0:_(s8) = G_AND %unknown, %mask0
+   %val0:_(s8) = G_OR %tmp0, %mask1
+   %mask2:_(s8) = G_CONSTANT i8 146
+   %mask3:_(s8) = G_CONSTANT i8 36
+   %tmp1:_(s8) = G_AND %unknown, %mask2
+   %val1:_(s8) = G_OR %tmp1, %mask3
+   %vector:_(<2 x s8>) = G_BUILD_VECTOR %val0, %val1
+   %copy_vector:_(<2 x s8>) = COPY %vector
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  // BuildVector KnownBits takes common bits of all elements.
+  //        111??000
+  // common ?01?01?0
+  //      = ??1????0
+  EXPECT_EQ(0x20u, Res.One.getZExtValue());
+  EXPECT_EQ(0x01u, Res.Zero.getZExtValue());
+}
+
+// Vector KnownBits track bits that are common for all vector scalar elements.
+// For tests below KnownBits analysis is same as for scalar/pointer types, tests
+// are mostly copied from KnownBitsTest.cpp using splat vectors and have the
+// same result.
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorCstPHI) {
+  StringRef MIRString = R"(
+   bb.10:
+   %10:_(s8) = G_CONSTANT i8 3
+   %11:_(<2 x s8>) = G_BUILD_VECTOR %10:_(s8), %10:_(s8)
+   %12:_(s1) = G_IMPLICIT_DEF
+   G_BRCOND %12(s1), %bb.11
+   G_BR %bb.12
+
+   bb.11:
+   %13:_(s8) = G_CONSTANT i8 2
+   %14:_(<2 x s8>) = G_BUILD_VECTOR %13:_(s8), %13:_(s8)
+   G_BR %bb.12
+
+   bb.12:
+   %15:_(<2 x s8>) = PHI %11(<2 x s8>), %bb.10, %14(<2 x s8>), %bb.11
+   %16:_(<2 x s8>) = COPY %15
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  Register DstReg = FinalCopy->getOperand(0).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ((uint64_t)2, Res.One.getZExtValue());
+  EXPECT_EQ((uint64_t)0xfc, Res.Zero.getZExtValue());
+
+  KnownBits Res2 = Info.getKnownBits(DstReg);
+  EXPECT_EQ(Res.One.getZExtValue(), Res2.One.getZExtValue());
+  EXPECT_EQ(Res.Zero.getZExtValue(), Res2.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorCstPHIToNonGenericReg) {
+  StringRef MIRString = R"(
+   bb.10:
+   %10:gpr32 = MOVi32imm 771
+   %11:_(s1) = G_IMPLICIT_DEF
+   G_BRCOND %11(s1), %bb.11
+   G_BR %bb.12
+
+   bb.11:
+   %12:_(s16) = G_CONSTANT i16 2
+   %13:_(<2 x s16>) = G_BUILD_VECTOR %12:_(s16), %12:_(s16)
+   G_BR %bb.12
+
+   bb.12:
+   %15:_(<2 x s16>) = PHI %10, %bb.10, %13(<2 x s16>), %bb.11
+   %16:_(<2 x s16>) = COPY %15
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  Register DstReg = FinalCopy->getOperand(0).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ((uint64_t)0, Res.One.getZExtValue());
+  EXPECT_EQ((uint64_t)0, Res.Zero.getZExtValue());
+
+  KnownBits Res2 = Info.getKnownBits(DstReg);
+  EXPECT_EQ(Res.One.getZExtValue(), Res2.One.getZExtValue());
+  EXPECT_EQ(Res.Zero.getZExtValue(), Res2.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorUnknownPHI) {
+  StringRef MIRString = R"(
+   bb.10:
+   %10:_(<2 x s32>) = G_BITCAST %0
+   %11:_(s1) = G_IMPLICIT_DEF
+   G_BRCOND %11(s1), %bb.11
+   G_BR %bb.12
+
+   bb.11:
+   %12:_(s32) = G_CONSTANT i32 2
+   %13:_(<2 x s32>) = G_BUILD_VECTOR %12:_(s32), %12:_(s32)
+   G_BR %bb.12
+
+   bb.12:
+   %14:_(<2 x s32>) = PHI %10(<2 x s32>), %bb.10, %13(<2 x s32>), %bb.11
+   %15:_(<2 x s32>) = COPY %14
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  Register DstReg = FinalCopy->getOperand(0).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ((uint64_t)0, Res.One.getZExtValue());
+  EXPECT_EQ((uint64_t)0, Res.Zero.getZExtValue());
+
+  KnownBits Res2 = Info.getKnownBits(DstReg);
+  EXPECT_EQ(Res.One.getZExtValue(), Res2.One.getZExtValue());
+  EXPECT_EQ(Res.Zero.getZExtValue(), Res2.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorCstPHIWithLoop) {
+  StringRef MIRString = R"(
+   bb.10:
+   %10:_(s8) = G_CONSTANT i8 3
+   %11:_(<2 x s8>) = G_BUILD_VECTOR %10:_(s8), %10:_(s8)
+   %12:_(s1) = G_IMPLICIT_DEF
+   G_BRCOND %12(s1), %bb.11
+   G_BR %bb.12
+
+   bb.11:
+   %13:_(s8) = G_CONSTANT i8 2
+   %14:_(<2 x s8>) = G_BUILD_VECTOR %13:_(s8), %13:_(s8)
+   G_BR %bb.12
+
+   bb.12:
+   %15:_(<2 x s8>) = PHI %11(<2 x s8>), %bb.10, %14(<2 x s8>), %bb.11, %16(<2 x s8>), %bb.12
+   %16:_(<2 x s8>) = COPY %15
+   G_BR %bb.12
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  Register DstReg = FinalCopy->getOperand(0).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ((uint64_t)0, Res.One.getZExtValue());
+  EXPECT_EQ((uint64_t)0, Res.Zero.getZExtValue());
+
+  KnownBits Res2 = Info.getKnownBits(DstReg);
+  EXPECT_EQ(Res.One.getZExtValue(), Res2.One.getZExtValue());
+  EXPECT_EQ(Res.Zero.getZExtValue(), Res2.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorDecreasingCstPHIWithLoop) {
+  StringRef MIRString = R"(
+   bb.10:
+   %10:_(s8) = G_CONSTANT i8 5
+   %11:_(<2 x s8>) = G_BUILD_VECTOR %10:_(s8), %10:_(s8)
+   %12:_(s8) = G_CONSTANT i8 1
+
+   bb.12:
+   %13:_(<2 x s8>) = PHI %11(<2 x s8>), %bb.10, %14(<2 x s8>), %bb.12
+   %14:_(<2 x s8>) = G_LSHR %13, %12
+   %15:_(<2 x s8>) = COPY %14
+   G_BR %bb.12
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  Register DstReg = FinalCopy->getOperand(0).getReg();
+  GISelKnownBits Info(*MF, /*MaxDepth=*/24);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ((uint64_t)0, Res.One.getZExtValue());
+  EXPECT_EQ((uint64_t)0xC0, Res.Zero.getZExtValue());
+
+  KnownBits Res2 = Info.getKnownBits(DstReg);
+  EXPECT_EQ(Res.One.getZExtValue(), Res2.One.getZExtValue());
+  EXPECT_EQ(Res.Zero.getZExtValue(), Res2.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorAND) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(<2 x s8>) = G_LOAD %ptr(p0) :: (load 2)
+   %mask0:_(s8) = G_CONSTANT i8 52
+   %mask0_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask0, %mask0
+   %mask1:_(s8) = G_CONSTANT i8 10
+   %mask1_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask1, %mask1
+   %tmp0:_(<2 x s8>) = G_AND %unknown, %mask0_splat
+   %val0:_(<2 x s8>) = G_OR %tmp0, %mask1_splat
+   %mask2:_(s8) = G_CONSTANT i8 32
+   %mask2_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask2, %mask2
+   %mask3:_(s8) = G_CONSTANT i8 24
+   %mask3_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask3, %mask3
+   %tmp1:_(<2 x s8>) = G_AND %unknown, %mask2_splat
+   %val1:_(<2 x s8>) = G_OR %tmp1, %mask3_splat
+   %and:_(<2 x s8>) = G_AND %val0, %val1
+   %copy_and:_(<2 x s8>) = COPY %and
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ(0x08u, Res.One.getZExtValue());
+  EXPECT_EQ(0xC7u, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorOR) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(<2 x s8>) = G_LOAD %ptr(p0) :: (load 2)
+   %mask0:_(s8) = G_CONSTANT i8 52
+   %mask0_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask0, %mask0
+   %mask1:_(s8) = G_CONSTANT i8 10
+   %mask1_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask1, %mask1
+   %tmp0:_(<2 x s8>) = G_AND %unknown, %mask0_splat
+   %val0:_(<2 x s8>) = G_OR %tmp0, %mask1_splat
+   %mask2:_(s8) = G_CONSTANT i8 32
+   %mask2_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask2, %mask2
+   %mask3:_(s8) = G_CONSTANT i8 24
+   %mask3_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask3, %mask3
+   %tmp1:_(<2 x s8>) = G_AND %unknown, %mask2_splat
+   %val1:_(<2 x s8>) = G_OR %tmp1, %mask3_splat
+   %or:_(<2 x s8>) = G_OR %val0, %val1
+   %copy_or:_(<2 x s8>) = COPY %or
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ(0x1Au, Res.One.getZExtValue());
+  EXPECT_EQ(0xC1u, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorXOR) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(<2 x s8>) = G_LOAD %ptr(p0) :: (load 2)
+   %mask0:_(s8) = G_CONSTANT i8 52
+   %mask0_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask0, %mask0
+   %mask1:_(s8) = G_CONSTANT i8 10
+   %mask1_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask1, %mask1
+   %tmp0:_(<2 x s8>) = G_AND %unknown, %mask0_splat
+   %val0:_(<2 x s8>) = G_OR %tmp0, %mask1_splat
+   %mask2:_(s8) = G_CONSTANT i8 32
+   %mask2_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask2, %mask2
+   %mask3:_(s8) = G_CONSTANT i8 24
+   %mask3_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask3, %mask3
+   %tmp1:_(<2 x s8>) = G_AND %unknown, %mask2_splat
+   %val1:_(<2 x s8>) = G_OR %tmp1, %mask3_splat
+   %xor:_(<2 x s8>) = G_XOR %val0, %val1
+   %copy_xor:_(<2 x s8>) = COPY %xor
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ(0x02u, Res.One.getZExtValue());
+  EXPECT_EQ(0xC9u, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorXORConstant) {
+  StringRef MIRString = R"(
+   %3:_(s8) = G_CONSTANT i8 4
+   %4:_(<2 x s8>) = G_BUILD_VECTOR %3:_(s8), %3:_(s8)
+   %5:_(s8) = G_CONSTANT i8 7
+   %6:_(<2 x s8>) = G_BUILD_VECTOR %5:_(s8), %5:_(s8)
+   %7:_(<2 x s8>) = G_XOR %4, %6
+   %8:_(<2 x s8>) = COPY %7
+
+   %9:_(s8) = G_CONSTANT i8 12
+   %10:_(<2 x s8>) = G_BUILD_VECTOR %3:_(s8), %9:_(s8)
+   %11:_(<2 x s8>) = G_XOR %10, %6
+   %12:_(<2 x s8>) = COPY %11
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  GISelKnownBits Info(*MF);
+  Register CopySplatReg = Copies[Copies.size() - 2];
+  MachineInstr *FinalSplatCopy = MRI->getVRegDef(CopySplatReg);
+  Register SrcSplatReg = FinalSplatCopy->getOperand(1).getReg();
+  KnownBits ResNonSplat = Info.getKnownBits(SrcSplatReg);
+  EXPECT_EQ(3u, ResNonSplat.One.getZExtValue());
+  EXPECT_EQ(252u, ResNonSplat.Zero.getZExtValue());
+
+  Register CopyNonSplatReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalNonSplatCopy = MRI->getVRegDef(CopyNonSplatReg);
+  Register SrcNonSplatReg = FinalNonSplatCopy->getOperand(1).getReg();
+  KnownBits ResSplat = Info.getKnownBits(SrcNonSplatReg);
+  EXPECT_EQ(3u, ResSplat.One.getZExtValue());
+  EXPECT_EQ(244u, ResSplat.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorASHR) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(<2 x s8>) = G_LOAD %ptr(p0) :: (load 2)
+   %mask0:_(s8) = G_CONSTANT i8 38
+   %mask0_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask0, %mask0
+   %mask1:_(s8) = G_CONSTANT i8 202
+   %mask1_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask1, %mask1
+   %tmp0:_(<2 x s8>) = G_AND %unknown, %mask0_splat
+   %val0:_(<2 x s8>) = G_OR %tmp0, %mask1_splat
+   %cst0:_(s8) = G_CONSTANT i8 2
+   %cst0_splat:_(<2 x s8>) = G_BUILD_VECTOR %cst0, %cst0
+   %ashr0:_(<2 x s8>) = G_ASHR %val0, %cst0_splat
+   %copy_ashr0:_(<2 x s8>) = COPY %ashr0
+
+   %mask2:_(s8) = G_CONSTANT i8 204
+   %mask2_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask2, %mask2
+   %mask3:_(s8) = G_CONSTANT i8 18
+   %mask3_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask3, %mask3
+   %tmp1:_(<2 x s8>) = G_AND %unknown, %mask2_splat
+   %val1:_(<2 x s8>) = G_OR %tmp1, %mask3_splat
+   %ashr1:_(<2 x s8>) = G_ASHR %val1, %cst0_splat
+   %copy_ashr1:_(<2 x s8>) = COPY %ashr1
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg0 = Copies[Copies.size() - 2];
+  MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
+  Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res0 = Info.getKnownBits(SrcReg0);
+  EXPECT_EQ(0xF2u, Res0.One.getZExtValue());
+  EXPECT_EQ(0x04u, Res0.Zero.getZExtValue());
+
+  Register CopyReg1 = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy1 = MRI->getVRegDef(CopyReg1);
+  Register SrcReg1 = FinalCopy1->getOperand(1).getReg();
+  KnownBits Res1 = Info.getKnownBits(SrcReg1);
+  EXPECT_EQ(0x04u, Res1.One.getZExtValue());
+  EXPECT_EQ(0x08u, Res1.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorLSHR) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(<2 x s8>) = G_LOAD %ptr(p0) :: (load 2)
+   %mask0:_(s8) = G_CONSTANT i8 38
+   %mask0_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask0, %mask0
+   %mask1:_(s8) = G_CONSTANT i8 202
+   %mask1_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask1, %mask1
+   %tmp0:_(<2 x s8>) = G_AND %unknown, %mask0_splat
+   %val0:_(<2 x s8>) = G_OR %tmp0, %mask1_splat
+   %cst0:_(s8) = G_CONSTANT i8 2
+   %cst0_splat:_(<2 x s8>) = G_BUILD_VECTOR %cst0, %cst0
+   %lshr0:_(<2 x s8>) = G_LSHR %val0, %cst0_splat
+   %copy_lshr0:_(<2 x s8>) = COPY %lshr0
+
+   %mask2:_(s8) = G_CONSTANT i8 204
+   %mask2_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask2, %mask2
+   %mask3:_(s8) = G_CONSTANT i8 18
+   %mask3_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask3, %mask3
+   %tmp1:_(<2 x s8>) = G_AND %unknown, %mask2_splat
+   %val1:_(<2 x s8>) = G_OR %tmp1, %mask3_splat
+   %lshr1:_(<2 x s8>) = G_LSHR %val1, %cst0_splat
+   %copy_lshr1:_(<2 x s8>) = COPY %lshr1
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg0 = Copies[Copies.size() - 2];
+  MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
+  Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res0 = Info.getKnownBits(SrcReg0);
+  EXPECT_EQ(0x32u, Res0.One.getZExtValue());
+  EXPECT_EQ(0xC4u, Res0.Zero.getZExtValue());
+
+  Register CopyReg1 = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy1 = MRI->getVRegDef(CopyReg1);
+  Register SrcReg1 = FinalCopy1->getOperand(1).getReg();
+  KnownBits Res1 = Info.getKnownBits(SrcReg1);
+  EXPECT_EQ(0x04u, Res1.One.getZExtValue());
+  EXPECT_EQ(0xC8u, Res1.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorSHL) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(<2 x s8>) = G_LOAD %ptr(p0) :: (load 2)
+   %mask0:_(s8) = G_CONSTANT i8 51
+   %mask0_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask0, %mask0
+   %mask1:_(s8) = G_CONSTANT i8 72
+   %mask1_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask1, %mask1
+   %tmp:_(<2 x s8>) = G_AND %unknown, %mask0_splat
+   %val:_(<2 x s8>) = G_OR %tmp, %mask1_splat
+   %cst:_(s8) = G_CONSTANT i8 3
+   %cst_splat:_(<2 x s8>) = G_BUILD_VECTOR %cst, %cst
+   %shl:_(<2 x s8>) = G_SHL %val, %cst_splat
+   %copy_shl:_(<2 x s8>) = COPY %shl
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ(0x40u, Res.One.getZExtValue());
+  EXPECT_EQ(0x27u, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorADD) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(<2 x s16>) = G_LOAD %ptr(p0) :: (load 4)
+   %mask0:_(s16) = G_CONSTANT i16 4642
+   %mask0_splat:_(<2 x s16>) = G_BUILD_VECTOR %mask0, %mask0
+   %mask1:_(s16) = G_CONSTANT i16 9536
+   %mask1_splat:_(<2 x s16>) = G_BUILD_VECTOR %mask1, %mask1
+   %tmp0:_(<2 x s16>) = G_AND %unknown, %mask0_splat
+   %val0:_(<2 x s16>) = G_OR %tmp0, %mask1_splat
+   %mask2:_(s16) = G_CONSTANT i16 4096
+   %mask2_splat:_(<2 x s16>) = G_BUILD_VECTOR %mask2, %mask2
+   %mask3:_(s16) = G_CONSTANT i16 371
+   %mask3_splat:_(<2 x s16>) = G_BUILD_VECTOR %mask3, %mask3
+   %tmp1:_(<2 x s16>) = G_AND %unknown, %mask2_splat
+   %val1:_(<2 x s16>) = G_OR %tmp1, %mask3_splat
+   %add:_(<2 x s16>) = G_ADD %val0, %val1
+   %copy_add:_(<2 x s16>) = COPY %add
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ(0x0091u, Res.One.getZExtValue());
+  EXPECT_EQ(0x8108u, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorSUB) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(<2 x s16>) = G_LOAD %ptr(p0) :: (load 4)
+   %mask0:_(s16) = G_CONSTANT i16 4642
+   %mask0_splat:_(<2 x s16>) = G_BUILD_VECTOR %mask0, %mask0
+   %mask1:_(s16) = G_CONSTANT i16 9536
+   %mask1_splat:_(<2 x s16>) = G_BUILD_VECTOR %mask1, %mask1
+   %tmp0:_(<2 x s16>) = G_AND %unknown, %mask0_splat
+   %val0:_(<2 x s16>) = G_OR %tmp0, %mask1_splat
+   %mask2:_(s16) = G_CONSTANT i16 4096
+   %mask2_splat:_(<2 x s16>) = G_BUILD_VECTOR %mask2, %mask2
+   %mask3:_(s16) = G_CONSTANT i16 371
+   %mask3_splat:_(<2 x s16>) = G_BUILD_VECTOR %mask3, %mask3
+   %tmp1:_(<2 x s16>) = G_AND %unknown, %mask2_splat
+   %val1:_(<2 x s16>) = G_OR %tmp1, %mask3_splat
+   %sub:_(<2 x s16>) = G_SUB %val0, %val1
+   %copy_sub:_(<2 x s16>) = COPY %sub
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ(0x01CDu, Res.One.getZExtValue());
+  EXPECT_EQ(0xC810u, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorMUL) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(<2 x s16>) = G_LOAD %ptr(p0) :: (load 4)
+   %mask0:_(s16) = G_CONSTANT i16 4
+   %mask0_splat:_(<2 x s16>) = G_BUILD_VECTOR %mask0, %mask0
+   %mask1:_(s16) = G_CONSTANT i16 18
+   %mask1_splat:_(<2 x s16>) = G_BUILD_VECTOR %mask1, %mask1
+   %tmp:_(<2 x s16>) = G_AND %unknown, %mask0_splat
+   %val0:_(<2 x s16>) = G_OR %tmp, %mask1_splat
+   %cst:_(s16) = G_CONSTANT i16 12
+   %cst_splat:_(<2 x s16>) = G_BUILD_VECTOR %cst, %cst
+   %mul:_(<2 x s16>) = G_MUL %val0, %cst_splat
+   %copy_mul:_(<2 x s16>) = COPY %mul
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ(0x0008u, Res.One.getZExtValue());
+  EXPECT_EQ(0xFE07u, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorSelect) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(<2 x s8>) = G_LOAD %ptr(p0) :: (load 2)
+   %mask0:_(s8) = G_CONSTANT i8 24
+   %mask0_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask0, %mask0
+   %mask1:_(s8) = G_CONSTANT i8 224
+   %mask1_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask1, %mask1
+   %tmp0:_(<2 x s8>) = G_AND %unknown, %mask0_splat
+   %val0:_(<2 x s8>) = G_OR %tmp0, %mask1_splat
+   %mask2:_(s8) = G_CONSTANT i8 146
+   %mask2_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask2, %mask2
+   %mask3:_(s8) = G_CONSTANT i8 36
+   %mask3_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask3, %mask3
+   %tmp1:_(<2 x s8>) = G_AND %unknown, %mask2_splat
+   %val1:_(<2 x s8>) = G_OR %tmp1, %mask3_splat
+   %cond:_(s1) = G_CONSTANT i1 false
+   %cond_splat:_(<2 x s1>) = G_BUILD_VECTOR %cond, %cond
+   %select:_(<2 x s8>) = G_SELECT %cond_splat, %val0, %val1
+   %copy_select:_(<2 x s8>) = COPY %select
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ(0x20u, Res.One.getZExtValue());
+  EXPECT_EQ(0x01u, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestVectorSignBitIsZero) {
+  setUp();
+  if (!TM)
+    return;
+
+  const LLT V2S32 = LLT::vector(2, 32);
+  // Vector buildConstant makes splat G_BUILD_VECTOR instruction.
+  auto SignBit = B.buildConstant(V2S32, 0x80000000);
+  auto Zero = B.buildConstant(V2S32, 0);
+
+  const LLT S32 = LLT::scalar(32);
+  auto NonSplat =
+      B.buildBuildVector(V2S32, {B.buildConstant(S32, 1).getReg(0),
+                                 B.buildConstant(S32, 2).getReg(0)});
+  auto NonSplat2 =
+      B.buildBuildVector(V2S32, {B.buildConstant(S32, 0x80000000).getReg(0),
+                                 B.buildConstant(S32, 0x80000004).getReg(0)});
+  // signBitIsZero is true for elt 0 and false for elt 1 GISelKnownBits takes
+  // common bits so this is false.
+  auto NonSplat3 =
+      B.buildBuildVector(V2S32, {B.buildConstant(S32, 0x80000000).getReg(0),
+                                 B.buildConstant(S32, 0x8).getReg(0)});
+  GISelKnownBits KnownBits(*MF);
+
+  EXPECT_TRUE(KnownBits.signBitIsZero(Zero.getReg(0)));
+  EXPECT_FALSE(KnownBits.signBitIsZero(SignBit.getReg(0)));
+  EXPECT_TRUE(KnownBits.signBitIsZero(NonSplat.getReg(0)));
+  EXPECT_FALSE(KnownBits.signBitIsZero(NonSplat2.getReg(0)));
+  EXPECT_FALSE(KnownBits.signBitIsZero(NonSplat3.getReg(0)));
+}
+
+TEST_F(AArch64GISelMITest, TestVectorNumSignBitsConstant) {
+  StringRef MIRString = R"(
+   %3:_(s8) = G_CONSTANT i8 1
+   %4:_(<2 x s8>) = G_BUILD_VECTOR %3:_(s8), %3:_(s8)
+   %5:_(<2 x s8>) = COPY %4
+
+   %6:_(s8) = G_CONSTANT i8 -1
+   %7:_(<2 x s8>) = G_BUILD_VECTOR %6:_(s8), %6:_(s8)
+   %8:_(<2 x s8>) = COPY %7
+
+   %9:_(s8) = G_CONSTANT i8 127
+   %10:_(<2 x s8>) = G_BUILD_VECTOR %9:_(s8), %9:_(s8)
+   %11:_(<2 x s8>) = COPY %10
+
+   %12:_(s8) = G_CONSTANT i8 32
+   %13:_(<2 x s8>) = G_BUILD_VECTOR %12:_(s8), %12:_(s8)
+   %14:_(<2 x s8>) = COPY %13
+
+   %15:_(s8) = G_CONSTANT i8 -32
+   %16:_(<2 x s8>) = G_BUILD_VECTOR %15:_(s8), %15:_(s8)
+   %17:_(<2 x s8>) = COPY %16
+
+   %18:_(<2 x s8>) = G_BUILD_VECTOR %6:_(s8), %15:_(s8)
+   %19:_(<2 x s8>) = COPY %18
+
+   %20:_(<2 x s8>) = G_BUILD_VECTOR %12:_(s8), %15:_(s8)
+   %21:_(<2 x s8>) = COPY %20
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg1 = Copies[Copies.size() - 7];
+  Register CopyRegNeg1 = Copies[Copies.size() - 6];
+  Register CopyReg127 = Copies[Copies.size() - 5];
+  Register CopyReg32 = Copies[Copies.size() - 4];
+  Register CopyRegNeg32 = Copies[Copies.size() - 3];
+  Register NonSplatSameSign = Copies[Copies.size() - 2];
+  Register NonSplatDifferentSign = Copies[Copies.size() - 1];
+
+  GISelKnownBits Info(*MF);
+  // If it is known that all elts have same sign looks at common bits and
+  // effectively returns smallest NumSignBits of all the elts. Otherwise returns
+  // default value 1.
+  EXPECT_EQ(7u, Info.computeNumSignBits(CopyReg1));
+  EXPECT_EQ(8u, Info.computeNumSignBits(CopyRegNeg1));
+  EXPECT_EQ(1u, Info.computeNumSignBits(CopyReg127));
+  EXPECT_EQ(2u, Info.computeNumSignBits(CopyReg32));
+  EXPECT_EQ(3u, Info.computeNumSignBits(CopyRegNeg32));
+  EXPECT_EQ(3u, Info.computeNumSignBits(NonSplatSameSign));
+  EXPECT_EQ(1u, Info.computeNumSignBits(NonSplatDifferentSign));
+}
+
+TEST_F(AArch64GISelMITest, TestVectorNumSignBitsSext) {
+  StringRef MIRString = R"(
+   %3:_(p0) = G_IMPLICIT_DEF
+   %4:_(<2 x s8>) = G_LOAD %3 :: (load 1)
+   %5:_(<2 x s32>) = G_SEXT %4
+   %6:_(<2 x s32>) = COPY %5
+
+   %7:_(s8) = G_CONSTANT i8 -1
+   %8:_(<2 x s8>) = G_BUILD_VECTOR %7:_(s8), %7:_(s8)
+   %9:_(<2 x s32>) = G_SEXT %8
+   %10:_(<2 x s32>) = COPY %9
+
+   %11:_(s8) = G_CONSTANT i8 -10
+   %12:_(<2 x s8>) = G_BUILD_VECTOR %7:_(s8), %11:_(s8)
+   %13:_(<2 x s32>) = G_SEXT %12
+   %14:_(<2 x s32>) = COPY %13
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+  Register CopySextLoad = Copies[Copies.size() - 3];
+  Register CopySextNeg1 = Copies[Copies.size() - 2];
+  Register CopySextNonSplat = Copies[Copies.size() - 1];
+
+  GISelKnownBits Info(*MF);
+  EXPECT_EQ(25u, Info.computeNumSignBits(CopySextLoad));
+  EXPECT_EQ(32u, Info.computeNumSignBits(CopySextNeg1));
+  EXPECT_EQ(28u, Info.computeNumSignBits(CopySextNonSplat));
+}
+
+TEST_F(AArch64GISelMITest, TestVectorNumSignBitsSextInReg) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %load2x4:_(<2 x s32>) = G_LOAD %ptr :: (load 8)
+
+   %inreg7:_(<2 x s32>) = G_SEXT_INREG %load2x4, 7
+   %copy_inreg7:_(<2 x s32>) = COPY %inreg7
+
+   %inreg8:_(<2 x s32>) = G_SEXT_INREG %load2x4, 8
+   %copy_inreg8:_(<2 x s32>) = COPY %inreg8
+
+   %inreg9:_(<2 x s32>) = G_SEXT_INREG %load2x4, 9
+   %copy_inreg9:_(<2 x s32>) = COPY %inreg9
+
+   %inreg31:_(<2 x s32>) = G_SEXT_INREG %load2x4, 31
+   %copy_inreg31:_(<2 x s32>) = COPY %inreg31
+
+   %load2x1:_(<2 x s8>) = G_LOAD %ptr :: (load 2)
+   %sext_load2x1:_(<2 x s32>) = G_SEXT %load2x1
+
+   %inreg6_sext:_(<2 x s32>) = G_SEXT_INREG %sext_load2x1, 6
+   %copy_inreg6_sext:_(<2 x s32>) = COPY %inreg6_sext
+
+   %inreg7_sext:_(<2 x s32>) = G_SEXT_INREG %sext_load2x1, 7
+   %copy_inreg7_sext:_(<2 x s32>) = COPY %inreg7_sext
+
+   %inreg8_sext:_(<2 x s32>) = G_SEXT_INREG %sext_load2x1, 8
+   %copy_inreg8_sext:_(<2 x s32>) = COPY %inreg8_sext
+
+   %inreg9_sext:_(<2 x s32>) = G_SEXT_INREG %sext_load2x1, 9
+   %copy_inreg9_sext:_(<2 x s32>) = COPY %inreg9_sext
+
+   %inreg31_sext:_(<2 x s32>) = G_SEXT_INREG %sext_load2x1, 31
+   %copy_inreg31_sext:_(<2 x s32>) = COPY %inreg31_sext
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyInReg7 = Copies[Copies.size() - 9];
+  Register CopyInReg8 = Copies[Copies.size() - 8];
+  Register CopyInReg9 = Copies[Copies.size() - 7];
+  Register CopyInReg31 = Copies[Copies.size() - 6];
+
+  Register CopyInReg6Sext = Copies[Copies.size() - 5];
+  Register CopyInReg7Sext = Copies[Copies.size() - 4];
+  Register CopyInReg8Sext = Copies[Copies.size() - 3];
+  Register CopyInReg9Sext = Copies[Copies.size() - 2];
+  Register CopyInReg31Sext = Copies[Copies.size() - 1];
+
+  GISelKnownBits Info(*MF);
+  EXPECT_EQ(26u, Info.computeNumSignBits(CopyInReg7));
+  EXPECT_EQ(25u, Info.computeNumSignBits(CopyInReg8));
+  EXPECT_EQ(24u, Info.computeNumSignBits(CopyInReg9));
+  EXPECT_EQ(2u, Info.computeNumSignBits(CopyInReg31));
+
+  EXPECT_EQ(27u, Info.computeNumSignBits(CopyInReg6Sext));
+  EXPECT_EQ(26u, Info.computeNumSignBits(CopyInReg7Sext));
+  EXPECT_EQ(25u, Info.computeNumSignBits(CopyInReg8Sext));
+  EXPECT_EQ(25u, Info.computeNumSignBits(CopyInReg9Sext));
+  EXPECT_EQ(25u, Info.computeNumSignBits(CopyInReg31Sext));
+}
+
+TEST_F(AArch64GISelMITest, TestNumSignBitsVectorAssertSext) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %load2x4:_(<2 x s32>) = G_LOAD %ptr :: (load 8)
+
+   %assert_sext1:_(<2 x s32>) = G_ASSERT_SEXT %load2x4, 1
+   %copy_assert_sext1:_(<2 x s32>) = COPY %assert_sext1
+
+   %assert_sext7:_(<2 x s32>) = G_ASSERT_SEXT %load2x4, 7
+   %copy_assert_sext7:_(<2 x s32>) = COPY %assert_sext7
+
+   %assert_sext8:_(<2 x s32>) = G_ASSERT_SEXT %load2x4, 8
+   %copy_assert_sext8:_(<2 x s32>) = COPY %assert_sext8
+
+   %assert_sext9:_(<2 x s32>) = G_ASSERT_SEXT %load2x4, 9
+   %copy_assert_sext9:_(<2 x s32>) = COPY %assert_sext9
+
+   %assert_sext31:_(<2 x s32>) = G_ASSERT_SEXT %load2x4, 31
+   %copy_assert_sext31:_(<2 x s32>) = COPY %assert_sext31
+
+   %load2x1:_(<2 x s8>) = G_LOAD %ptr :: (load 2)
+   %sext_load2x1:_(<2 x s32>) = G_SEXT %load2x1
+
+   %assert_sext6_sext:_(<2 x s32>) = G_ASSERT_SEXT %sext_load2x1, 6
+   %copy_assert_sext6_sext:_(<2 x s32>) = COPY %assert_sext6_sext
+
+   %assert_sext7_sext:_(<2 x s32>) = G_ASSERT_SEXT %sext_load2x1, 7
+   %copy_assert_sext7_sext:_(<2 x s32>) = COPY %assert_sext7_sext
+
+   %assert_sext8_sext:_(<2 x s32>) = G_ASSERT_SEXT %sext_load2x1, 8
+   %copy_assert_sext8_sext:_(<2 x s32>) = COPY %assert_sext8_sext
+
+   %assert_sext9_sext:_(<2 x s32>) = G_ASSERT_SEXT %sext_load2x1, 9
+   %copy_assert_sext9_sext:_(<2 x s32>) = COPY %assert_sext9_sext
+
+   %assert_sext31_sext:_(<2 x s32>) = G_ASSERT_SEXT %sext_load2x1, 31
+   %copy_assert_sext31_sext:_(<2 x s32>) = COPY %assert_sext31_sext
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyInReg1 = Copies[Copies.size() - 10];
+  Register CopyInReg7 = Copies[Copies.size() - 9];
+  Register CopyInReg8 = Copies[Copies.size() - 8];
+  Register CopyInReg9 = Copies[Copies.size() - 7];
+  Register CopyInReg31 = Copies[Copies.size() - 6];
+
+  Register CopyInReg6Sext = Copies[Copies.size() - 5];
+  Register CopyInReg7Sext = Copies[Copies.size() - 4];
+  Register CopyInReg8Sext = Copies[Copies.size() - 3];
+  Register CopyInReg9Sext = Copies[Copies.size() - 2];
+  Register CopyInReg31Sext = Copies[Copies.size() - 1];
+
+  GISelKnownBits Info(*MF);
+  EXPECT_EQ(32u, Info.computeNumSignBits(CopyInReg1));
+  EXPECT_EQ(26u, Info.computeNumSignBits(CopyInReg7));
+  EXPECT_EQ(25u, Info.computeNumSignBits(CopyInReg8));
+  EXPECT_EQ(24u, Info.computeNumSignBits(CopyInReg9));
+  EXPECT_EQ(2u, Info.computeNumSignBits(CopyInReg31));
+
+  EXPECT_EQ(27u, Info.computeNumSignBits(CopyInReg6Sext));
+  EXPECT_EQ(26u, Info.computeNumSignBits(CopyInReg7Sext));
+  EXPECT_EQ(25u, Info.computeNumSignBits(CopyInReg8Sext));
+  EXPECT_EQ(25u, Info.computeNumSignBits(CopyInReg9Sext));
+  EXPECT_EQ(25u, Info.computeNumSignBits(CopyInReg31Sext));
+}
+
+TEST_F(AArch64GISelMITest, TestVectorNumSignBitsTrunc) {
+  StringRef MIRString = R"(
+   %3:_(p0) = G_IMPLICIT_DEF
+   %4:_(<2 x s32>) = G_LOAD %3 :: (load 4)
+   %5:_(<2 x s8>) = G_TRUNC %4
+   %6:_(<2 x s8>) = COPY %5
+
+   %7:_(s32) = G_CONSTANT i32 -1
+   %8:_(<2 x s32>) = G_BUILD_VECTOR %7:_(s32), %7:_(s32)
+   %9:_(<2 x s8>) = G_TRUNC %8
+   %10:_(<2 x s8>) = COPY %9
+
+   %11:_(s32) = G_CONSTANT i32 7
+   %12:_(<2 x s32>) = G_BUILD_VECTOR %11:_(s32), %11:_(s32)
+   %13:_(<2 x s8>) = G_TRUNC %12
+   %14:_(<2 x s8>) = COPY %13
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyTruncLoad = Copies[Copies.size() - 3];
+  Register CopyTruncNeg1 = Copies[Copies.size() - 2];
+  Register CopyTrunc7 = Copies[Copies.size() - 1];
+
+  GISelKnownBits Info(*MF);
+  EXPECT_EQ(1u, Info.computeNumSignBits(CopyTruncLoad));
+  EXPECT_EQ(8u, Info.computeNumSignBits(CopyTruncNeg1));
+  EXPECT_EQ(5u, Info.computeNumSignBits(CopyTrunc7));
+}
+
+TEST_F(AMDGPUGISelMITest, TestVectorIsKnownToBeAPowerOfTwo) {
+
+  StringRef MIRString = R"(
+  %zero:_(s32) = G_CONSTANT i32 0
+  %zero_splat:_(<2 x s32>) = G_BUILD_VECTOR %zero:_(s32), %zero:_(s32)
+  %one:_(s32) = G_CONSTANT i32 1
+  %one_splat:_(<2 x s32>) = G_BUILD_VECTOR %one:_(s32), %one:_(s32)
+  %two:_(s32) = G_CONSTANT i32 2
+  %two_splat:_(<2 x s32>) = G_BUILD_VECTOR %two:_(s32), %two:_(s32)
+  %three:_(s32) = G_CONSTANT i32 3
+  %three_splat:_(<2 x s32>) = G_BUILD_VECTOR %three:_(s32), %three:_(s32)
+  %five:_(s32) = G_CONSTANT i32 5
+  %five_splat:_(<2 x s32>) = G_BUILD_VECTOR %five:_(s32), %five:_(s32)
+  %copy_zero_splat:_(<2 x s32>) = COPY %zero_splat
+  %copy_one_splat:_(<2 x s32>) = COPY %one_splat
+  %copy_two_splat:_(<2 x s32>) = COPY %two_splat
+  %copy_three_splat:_(<2 x s32>) = COPY %three_splat
+
+  %trunc_two_splat:_(<2 x s1>) = G_TRUNC %two_splat
+  %trunc_three_splat:_(<2 x s1>) = G_TRUNC %three_splat
+  %trunc_five_splat:_(<2 x s1>) = G_TRUNC %five_splat
+
+  %copy_trunc_two_splat:_(<2 x s1>) = COPY %trunc_two_splat
+  %copy_trunc_three_splat:_(<2 x s1>) = COPY %trunc_three_splat
+  %copy_trunc_five_splat:_(<2 x s1>) = COPY %trunc_five_splat
+
+  %ptr:_(p1) = G_IMPLICIT_DEF
+  %shift_amt:_(<2 x s32>) = G_LOAD %ptr :: (load 4, addrspace 1)
+
+  %shl_1:_(<2 x s32>) = G_SHL %one_splat, %shift_amt
+  %copy_shl_1:_(<2 x s32>) = COPY %shl_1
+
+  %shl_2:_(<2 x s32>) = G_SHL %two_splat, %shift_amt
+  %copy_shl_2:_(<2 x s32>) = COPY %shl_2
+
+  %not_sign_mask:_(<2 x s32>) = G_LOAD %ptr :: (load 4, addrspace 1)
+  %sign_mask:_(s32) = G_CONSTANT i32 -2147483648
+  %sign_mask_splat:_(<2 x s32>) = G_BUILD_VECTOR %sign_mask:_(s32), %sign_mask:_(s32)
+
+  %lshr_not_sign_mask:_(<2 x s32>) = G_LSHR %not_sign_mask, %shift_amt
+  %copy_lshr_not_sign_mask:_(<2 x s32>) = COPY %lshr_not_sign_mask
+
+  %lshr_sign_mask:_(<2 x s32>) = G_LSHR %sign_mask_splat, %shift_amt
+  %copy_lshr_sign_mask:_(<2 x s32>) = COPY %lshr_sign_mask
+
+  %or_pow2:_(<2 x s32>) = G_OR %zero_splat, %two_splat
+  %copy_or_pow2:_(<2 x s32>) = COPY %or_pow2
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  GISelKnownBits KB(*MF);
+
+  Register CopyZero = Copies[Copies.size() - 12];
+  Register CopyOne = Copies[Copies.size() - 11];
+  Register CopyTwo = Copies[Copies.size() - 10];
+  Register CopyThree = Copies[Copies.size() - 9];
+  Register CopyTruncTwo = Copies[Copies.size() - 8];
+  Register CopyTruncThree = Copies[Copies.size() - 7];
+  Register CopyTruncFive = Copies[Copies.size() - 6];
+
+  Register CopyShl1 = Copies[Copies.size() - 5];
+  Register CopyShl2 = Copies[Copies.size() - 4];
+
+  Register CopyLShrNotSignMask = Copies[Copies.size() - 3];
+  Register CopyLShrSignMask = Copies[Copies.size() - 2];
+  Register CopyOrPow2 = Copies[Copies.size() - 1];
+
+  EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyZero, *MRI, &KB));
+  EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyOne, *MRI, &KB));
+  EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyTwo, *MRI, &KB));
+  EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyThree, *MRI, &KB));
+
+  EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyTruncTwo, *MRI, &KB));
+  EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyTruncThree, *MRI, &KB));
+  EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyTruncFive, *MRI, &KB));
+  // TODO: check for vector(splat) shift amount.
+  EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyShl1, *MRI, &KB));
+  EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyShl2, *MRI, &KB));
+
+  EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyLShrNotSignMask, *MRI, &KB));
+  EXPECT_FALSE(isKnownToBeAPowerOfTwo(CopyLShrSignMask, *MRI, &KB));
+  EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyOrPow2, *MRI, &KB));
+}
+
+TEST_F(AArch64GISelMITest, TestVectorMetadata) {
+  StringRef MIRString = R"(
+   %imp:_(p0) = G_IMPLICIT_DEF
+   %load:_(<2 x s8>) = G_LOAD %imp(p0) :: (load 2)
+   %ext:_(<2 x s32>) = G_ZEXT %load(<2 x s8>)
+   %cst_elt:_(s32) = G_CONSTANT i32 1
+   %cst:_(<2 x s32>) = G_BUILD_VECTOR %cst_elt:_(s32), %cst_elt:_(s32)
+   %and:_(<2 x s32>) = G_AND %ext, %cst
+   %copy:_(<2 x s32>) = COPY %and(<2 x s32>)
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+  Register SrcReg = FinalCopy->getOperand(1).getReg();
+
+  MachineInstr *And = MRI->getVRegDef(SrcReg);
+  MachineInstr *Ext = MRI->getVRegDef(And->getOperand(1).getReg());
+  MachineInstr *Load = MRI->getVRegDef(Ext->getOperand(1).getReg());
+  IntegerType *Int8Ty = Type::getInt8Ty(Context);
+
+  Metadata *LowAndHigh[] = {
+      ConstantAsMetadata::get(ConstantInt::get(Int8Ty, 0)),
+      ConstantAsMetadata::get(ConstantInt::get(Int8Ty, 2))};
+  auto *NewMDNode = MDNode::get(Context, LowAndHigh);
+  const MachineMemOperand *OldMMO = *Load->memoperands_begin();
+  MachineMemOperand NewMMO(OldMMO->getPointerInfo(), OldMMO->getFlags(),
+                           OldMMO->getSizeInBits(), OldMMO->getAlign(),
+                           OldMMO->getAAInfo(), NewMDNode);
+  MachineIRBuilder MIB(*Load);
+  MIB.buildLoad(Load->getOperand(0), Load->getOperand(1), NewMMO);
+  Load->eraseFromParent();
+
+  GISelKnownBits Info(*MF);
+  KnownBits Res = Info.getKnownBits(And->getOperand(1).getReg());
+
+  EXPECT_TRUE(Res.One.isNullValue());
+
+  APInt Mask(Res.getBitWidth(), 1);
+  Mask.flipAllBits();
+  EXPECT_EQ(Mask.getZExtValue(), Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestVectorKnownBitsExt) {
+  StringRef MIRString = R"(
+   %c1:_(s16) = G_CONSTANT i16 1
+   %c1_splat:_(<2 x s16>) = G_BUILD_VECTOR %c1:_(s16), %c1:_(s16)
+   %x:_(<2 x s16>) = G_IMPLICIT_DEF
+   %y:_(<2 x s16>) = G_AND %x, %c1_splat
+   %anyext:_(<2 x s32>) = G_ANYEXT %y(<2 x s16>)
+   %r1:_(<2 x s32>) = COPY %anyext
+   %zext:_(<2 x s32>) = G_ZEXT %y(<2 x s16>)
+   %r2:_(<2 x s32>) = COPY %zext
+   %sext:_(<2 x s32>) = G_SEXT %y(<2 x s16>)
+   %r3:_(<2 x s32>) = COPY %sext
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+  Register CopyRegAny = Copies[Copies.size() - 3];
+  Register CopyRegZ = Copies[Copies.size() - 2];
+  Register CopyRegS = Copies[Copies.size() - 1];
+
+  GISelKnownBits Info(*MF);
+  MachineInstr *Copy;
+  Register SrcReg;
+  KnownBits Res;
+
+  Copy = MRI->getVRegDef(CopyRegAny);
+  SrcReg = Copy->getOperand(1).getReg();
+  Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ((uint64_t)32, Res.getBitWidth());
+  EXPECT_EQ((uint64_t)0, Res.One.getZExtValue());
+  EXPECT_EQ((uint64_t)0x0000fffe, Res.Zero.getZExtValue());
+
+  Copy = MRI->getVRegDef(CopyRegZ);
+  SrcReg = Copy->getOperand(1).getReg();
+  Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ((uint64_t)32, Res.getBitWidth());
+  EXPECT_EQ((uint64_t)0, Res.One.getZExtValue());
+  EXPECT_EQ((uint64_t)0xfffffffe, Res.Zero.getZExtValue());
+
+  Copy = MRI->getVRegDef(CopyRegS);
+  SrcReg = Copy->getOperand(1).getReg();
+  Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ((uint64_t)32, Res.getBitWidth());
+  EXPECT_EQ((uint64_t)0, Res.One.getZExtValue());
+  EXPECT_EQ((uint64_t)0xfffffffe, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorSextInReg) {
+  StringRef MIRString = R"(
+   ; 000...0001
+   %one:_(s32) = G_CONSTANT i32 1
+   %one_splat:_(<2 x s32>) = G_BUILD_VECTOR %one:_(s32), %one:_(s32)
+
+   ; 000...0010
+   %two:_(s32) = G_CONSTANT i32 2
+   %two_splat:_(<2 x s32>) = G_BUILD_VECTOR %two:_(s32), %two:_(s32)
+
+   ; 000...1010
+   %ten:_(s32) = G_CONSTANT i32 10
+   %ten_splat:_(<2 x s32>) = G_BUILD_VECTOR %ten:_(s32), %ten:_(s32)
+
+   ; ???...????
+   %x0:_(<2 x s32>) = COPY $x0
+
+   ; ???...?1?
+   %or:_(<2 x s32>) = G_OR %x0, %two_splat
+
+   ; All bits are known.
+   %inreg1:_(<2 x s32>) = G_SEXT_INREG %one_splat, 1
+   %copy_inreg1:_(<2 x s32>) = COPY %inreg1
+
+   ; All bits unknown
+   %inreg2:_(<2 x s32>) = G_SEXT_INREG %or, 1
+   %copy_inreg2:_(<2 x s32>) = COPY %inreg2
+
+   ; Extending from the only (known) set bit
+   ; 111...11?
+   %inreg3:_(<2 x s32>) = G_SEXT_INREG %or, 2
+   %copy_inreg3:_(<2 x s32>) = COPY %inreg3
+
+   ; Extending from a known set bit, overwriting all of the high set bits.
+   ; 111...1110
+   %inreg4:_(<2 x s32>) = G_SEXT_INREG %ten_splat, 2
+   %copy_inreg4:_(<2 x s32>) = COPY %inreg4
+
+)";
+  setUp(MIRString);
+  if (!TM)
+    return;
+  GISelKnownBits Info(*MF);
+  KnownBits Res;
+  auto GetKB = [&](unsigned Idx) {
+    Register CopyReg = Copies[Idx];
+    auto *Copy = MRI->getVRegDef(CopyReg);
+    return Info.getKnownBits(Copy->getOperand(1).getReg());
+  };
+
+  Res = GetKB(Copies.size() - 4);
+  EXPECT_EQ(32u, Res.getBitWidth());
+  EXPECT_TRUE(Res.isAllOnes());
+
+  Res = GetKB(Copies.size() - 3);
+  EXPECT_EQ(32u, Res.getBitWidth());
+  EXPECT_TRUE(Res.isUnknown());
+
+  Res = GetKB(Copies.size() - 2);
+  EXPECT_EQ(32u, Res.getBitWidth());
+  EXPECT_EQ(0xFFFFFFFEu, Res.One.getZExtValue());
+  EXPECT_EQ(0u, Res.Zero.getZExtValue());
+
+  Res = GetKB(Copies.size() - 1);
+  EXPECT_EQ(32u, Res.getBitWidth());
+  EXPECT_EQ(0xFFFFFFFEu, Res.One.getZExtValue());
+  EXPECT_EQ(1u, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorAssertSext) {
+  StringRef MIRString = R"(
+   ; 000...0001
+   %one:_(s32) = G_CONSTANT i32 1
+   %one_splat:_(<2 x s32>) = G_BUILD_VECTOR %one, %one
+
+   ; 000...0010
+   %two:_(s32) = G_CONSTANT i32 2
+   %two_splat:_(<2 x s32>) = G_BUILD_VECTOR %two, %two
+
+   ; 000...1010
+   %ten:_(s32) = G_CONSTANT i32 10
+   %ten_splat:_(<2 x s32>) = G_BUILD_VECTOR %ten, %ten
+
+   ; ???...????
+   %x0:_(<2 x s32>) = COPY $x0
+
+   ; ???...?1?
+   %or:_(<2 x s32>) = G_OR %x0, %two_splat
+
+   ; All bits are known.
+   %assert_sext1:_(<2 x s32>) = G_ASSERT_SEXT %one_splat, 1
+   %copy_assert_sext1:_(<2 x s32>) = COPY %assert_sext1
+
+   ; All bits unknown
+   %assert_sext2:_(<2 x s32>) = G_ASSERT_SEXT %or, 1
+   %copy_assert_sext2:_(<2 x s32>) = COPY %assert_sext2
+
+   ; Extending from the only (known) set bit
+   ; 111...11?
+   %assert_sext3:_(<2 x s32>) = G_ASSERT_SEXT %or, 2
+   %copy_assert_sext3:_(<2 x s32>) = COPY %assert_sext3
+
+   ; Extending from a known set bit, overwriting all of the high set bits.
+   ; 111...1110
+   %assert_sext4:_(<2 x s32>) = G_ASSERT_SEXT %ten_splat, 2
+   %copy_assert_sext4:_(<2 x s32>) = COPY %assert_sext4
+)";
+  setUp(MIRString);
+  if (!TM)
+    return;
+  GISelKnownBits Info(*MF);
+  KnownBits Res;
+  auto GetKB = [&](unsigned Idx) {
+    Register CopyReg = Copies[Idx];
+    auto *Copy = MRI->getVRegDef(CopyReg);
+    return Info.getKnownBits(Copy->getOperand(1).getReg());
+  };
+
+  // Every bit is known to be a 1.
+  Res = GetKB(Copies.size() - 4);
+  EXPECT_EQ(32u, Res.getBitWidth());
+  EXPECT_TRUE(Res.isAllOnes());
+
+  // All bits are unknown
+  Res = GetKB(Copies.size() - 3);
+  EXPECT_EQ(32u, Res.getBitWidth());
+  EXPECT_TRUE(Res.isUnknown());
+
+  // Extending from the only known set bit
+  // 111...11?
+  Res = GetKB(Copies.size() - 2);
+  EXPECT_EQ(32u, Res.getBitWidth());
+  EXPECT_EQ(0xFFFFFFFEu, Res.One.getZExtValue());
+  EXPECT_EQ(0u, Res.Zero.getZExtValue());
+
+  // Extending from a known set bit, overwriting all of the high set bits.
+  // 111...1110
+  Res = GetKB(Copies.size() - 1);
+  EXPECT_EQ(32u, Res.getBitWidth());
+  EXPECT_EQ(0xFFFFFFFEu, Res.One.getZExtValue());
+  EXPECT_EQ(1u, Res.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestVectorKnownBitsBSwapBitReverse) {
+  StringRef MIRString = R"(
+   %const:_(s32) = G_CONSTANT i32 287454020
+   %const_splat:_(<2 x s32>) = G_BUILD_VECTOR %const:_(s32), %const:_(s32)
+   %bswap:_(<2 x s32>) = G_BSWAP %const_splat
+   %bitreverse:_(<2 x s32>) = G_BITREVERSE %const_splat
+   %copy_bswap:_(<2 x s32>) = COPY %bswap
+   %copy_bitreverse:_(<2 x s32>) = COPY %bitreverse
+)";
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  const uint32_t TestVal = 0x11223344;
+
+  Register CopyBSwap = Copies[Copies.size() - 2];
+  Register CopyBitReverse = Copies[Copies.size() - 1];
+
+  GISelKnownBits Info(*MF);
+
+  KnownBits BSwapKnown = Info.getKnownBits(CopyBSwap);
+  EXPECT_EQ(32u, BSwapKnown.getBitWidth());
+  EXPECT_EQ(TestVal, BSwapKnown.One.getZExtValue());
+  EXPECT_EQ(~TestVal, BSwapKnown.Zero.getZExtValue());
+
+  KnownBits BitReverseKnown = Info.getKnownBits(CopyBitReverse);
+  EXPECT_EQ(32u, BitReverseKnown.getBitWidth());
+  EXPECT_EQ(TestVal, BitReverseKnown.One.getZExtValue());
+  EXPECT_EQ(~TestVal, BitReverseKnown.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorUMAX) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(<2 x s8>) = G_LOAD %ptr(p0) :: (load 2)
+   %mask0:_(s8) = G_CONSTANT i8 10
+   %mask0_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask0, %mask0
+   %mask1:_(s8) = G_CONSTANT i8 1
+   %mask1_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask1, %mask1
+   %tmp0:_(<2 x s8>) = G_AND %unknown, %mask0_splat
+   %val0:_(<2 x s8>) = G_OR %tmp0, %mask1_splat
+   %mask2:_(s8) = G_CONSTANT i8 3
+   %mask2_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask2, %mask2
+   %mask3:_(s8) = G_CONSTANT i8 12
+   %mask3_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask3, %mask3
+   %tmp1:_(<2 x s8>) = G_AND %unknown, %mask2_splat
+   %val1:_(<2 x s8>) = G_OR %tmp1, %mask3_splat
+   %umax0:_(<2 x s8>) = G_UMAX %val0, %val1
+   %copy_umax0:_(<2 x s8>) = COPY %umax0
+
+   %mask4:_(s8) = G_CONSTANT i8 14
+   %mask4_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask4, %mask4
+   %mask5:_(s8) = G_CONSTANT i8 2
+   %mask5_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask5, %mask5
+   %tmp3:_(<2 x s8>) = G_AND %unknown, %mask4_splat
+   %val3:_(<2 x s8>) = G_OR %tmp3, %mask5_splat
+   %mask6:_(s8) = G_CONSTANT i8 4
+   %mask6_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask6, %mask6
+   %mask7:_(s8) = G_CONSTANT i8 11
+   %mask7_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask7, %mask7
+   %tmp4:_(<2 x s8>) = G_AND %unknown, %mask6_splat
+   %val4:_(<2 x s8>) = G_OR %tmp4, %mask7_splat
+   %umax1:_(<2 x s8>) = G_UMAX %val3, %val4
+   %copy_umax1:_(<2 x s8>) = COPY %umax1
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg0 = Copies[Copies.size() - 2];
+  MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
+  Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res0 = Info.getKnownBits(SrcReg0);
+  EXPECT_EQ(0x0Cu, Res0.One.getZExtValue());
+  EXPECT_EQ(0xF0u, Res0.Zero.getZExtValue());
+
+  Register CopyReg1 = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy1 = MRI->getVRegDef(CopyReg1);
+  Register SrcReg1 = FinalCopy1->getOperand(1).getReg();
+  KnownBits Res1 = Info.getKnownBits(SrcReg1);
+  EXPECT_EQ(0x0Au, Res1.One.getZExtValue());
+  EXPECT_EQ(0xF0u, Res1.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestVectorKnownBitsUMax) {
+  StringRef MIRString = R"(
+   %val:_(<2 x s32>) = COPY $x0
+   %zext:_(<2 x s64>) = G_ZEXT %val
+   %const:_(s64) = G_CONSTANT i64 -256
+   %const_splat:_(<2 x s64>) = G_BUILD_VECTOR %const:_(s64), %const:_(s64)
+   %umax:_(<2 x s64>) = G_UMAX %zext, %const_splat
+   %copy_umax:_(<2 x s64>) = COPY %umax
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyUMax = Copies[Copies.size() - 1];
+  GISelKnownBits Info(*MF);
+
+  KnownBits KnownUmax = Info.getKnownBits(CopyUMax);
+  EXPECT_EQ(64u, KnownUmax.getBitWidth());
+  EXPECT_EQ(0xffu, KnownUmax.Zero.getZExtValue());
+  EXPECT_EQ(0xffffffffffffff00, KnownUmax.One.getZExtValue());
+
+  EXPECT_EQ(0xffu, KnownUmax.Zero.getZExtValue());
+  EXPECT_EQ(0xffffffffffffff00, KnownUmax.One.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorUMIN) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(<2 x s8>) = G_LOAD %ptr(p0) :: (load 2)
+   %mask0:_(s8) = G_CONSTANT i8 10
+   %mask0_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask0, %mask0
+   %mask1:_(s8) = G_CONSTANT i8 1
+   %mask1_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask1, %mask1
+   %tmp0:_(<2 x s8>) = G_AND %unknown, %mask0_splat
+   %val0:_(<2 x s8>) = G_OR %tmp0, %mask1_splat
+   %mask2:_(s8) = G_CONSTANT i8 3
+   %mask2_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask2, %mask2
+   %mask3:_(s8) = G_CONSTANT i8 12
+   %mask3_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask3, %mask3
+   %tmp1:_(<2 x s8>) = G_AND %unknown, %mask2_splat
+   %val1:_(<2 x s8>) = G_OR %tmp1, %mask3_splat
+   %umin:_(<2 x s8>) = G_UMIN %val0, %val1
+   %copy_umin:_(<2 x s8>) = COPY %umin
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg0 = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
+  Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res0 = Info.getKnownBits(SrcReg0);
+  EXPECT_EQ(0x01u, Res0.One.getZExtValue());
+  EXPECT_EQ(0xF4u, Res0.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorSMAX) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(<2 x s8>) = G_LOAD %ptr(p0) :: (load 2)
+   %mask0:_(s8) = G_CONSTANT i8 128
+   %mask0_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask0, %mask0
+   %mask1:_(s8) = G_CONSTANT i8 64
+   %mask1_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask1, %mask1
+   %tmp0:_(<2 x s8>) = G_AND %unknown, %mask0_splat
+   %val0:_(<2 x s8>) = G_OR %tmp0, %mask1_splat
+   %mask2:_(s8) = G_CONSTANT i8 1
+   %mask2_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask2, %mask2
+   %mask3:_(s8) = G_CONSTANT i8 128
+   %mask3_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask3, %mask3
+   %tmp1:_(<2 x s8>) = G_AND %unknown, %mask2_splat
+   %val1:_(<2 x s8>) = G_OR %tmp1, %mask3_splat
+   %smax:_(<2 x s8>) = G_SMAX %val0, %val1
+   %copy_smax:_(<2 x s8>) = COPY %smax
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg0 = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
+  Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res0 = Info.getKnownBits(SrcReg0);
+  EXPECT_EQ(0x40u, Res0.One.getZExtValue());
+  EXPECT_EQ(0x3Fu, Res0.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorSMIN) {
+  StringRef MIRString = R"(
+   %ptr:_(p0) = G_IMPLICIT_DEF
+   %unknown:_(<2 x s8>) = G_LOAD %ptr(p0) :: (load 2)
+   %mask0:_(s8) = G_CONSTANT i8 128
+   %mask0_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask0, %mask0
+   %mask1:_(s8) = G_CONSTANT i8 64
+   %mask1_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask1, %mask1
+   %tmp0:_(<2 x s8>) = G_AND %unknown, %mask0_splat
+   %val0:_(<2 x s8>) = G_OR %tmp0, %mask1_splat
+   %mask2:_(s8) = G_CONSTANT i8 1
+   %mask2_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask2, %mask2
+   %mask3:_(s8) = G_CONSTANT i8 128
+   %mask3_splat:_(<2 x s8>) = G_BUILD_VECTOR %mask3, %mask3
+   %tmp1:_(<2 x s8>) = G_AND %unknown, %mask2_splat
+   %val1:_(<2 x s8>) = G_OR %tmp1, %mask3_splat
+   %smin:_(<2 x s8>) = G_SMIN %val0, %val1
+   %copy_smin:_(<2 x s8>) = COPY %smin
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyReg0 = Copies[Copies.size() - 1];
+  MachineInstr *FinalCopy0 = MRI->getVRegDef(CopyReg0);
+  Register SrcReg0 = FinalCopy0->getOperand(1).getReg();
+  GISelKnownBits Info(*MF);
+  KnownBits Res0 = Info.getKnownBits(SrcReg0);
+  EXPECT_EQ(0x80u, Res0.One.getZExtValue());
+  EXPECT_EQ(0x7Eu, Res0.Zero.getZExtValue());
+}
+
+TEST_F(AArch64GISelMITest, TestVectorInvalidQueries) {
+  StringRef MIRString = R"(
+   %src:_(<2 x s32>) = COPY $x0
+   %thirty2:_(s32) = G_CONSTANT i32 32
+   %thirty2_splat:_(<2 x s32>) = G_BUILD_VECTOR %thirty2:_(s32), %thirty2:_(s32)
+   %equalSized:_(<2 x s32>) = G_SHL %src, %thirty2_splat
+   %copy1:_(<2 x s32>) = COPY %equalSized
+   %thirty3:_(s32) = G_CONSTANT i32 33
+   %thirty3_splat:_(<2 x s32>) = G_BUILD_VECTOR %thirty3:_(s32), %thirty3:_(s32)
+   %biggerSized:_(<2 x s32>) = G_SHL %src, %thirty3_splat
+   %copy2:_(<2 x s32>) = COPY %biggerSized
+)";
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register EqSizedCopyReg = Copies[Copies.size() - 2];
+  MachineInstr *EqSizedCopy = MRI->getVRegDef(EqSizedCopyReg);
+  Register EqSizedShl = EqSizedCopy->getOperand(1).getReg();
+
+  Register BiggerSizedCopyReg = Copies[Copies.size() - 1];
+  MachineInstr *BiggerSizedCopy = MRI->getVRegDef(BiggerSizedCopyReg);
+  Register BiggerSizedShl = BiggerSizedCopy->getOperand(1).getReg();
+
+  GISelKnownBits Info(*MF);
+  KnownBits EqSizeRes = Info.getKnownBits(EqSizedShl);
+  KnownBits BiggerSizeRes = Info.getKnownBits(BiggerSizedShl);
+
+  EXPECT_TRUE(EqSizeRes.One.isNullValue());
+  EXPECT_TRUE(EqSizeRes.Zero.isNullValue());
+
+  EXPECT_TRUE(BiggerSizeRes.One.isNullValue());
+  EXPECT_TRUE(BiggerSizeRes.Zero.isNullValue());
+}
+
+TEST_F(AArch64GISelMITest, TestKnownBitsVectorAssertZext) {
+  StringRef MIRString = R"(
+   %copy_x0:_(s64) = COPY $x0
+   %copy_x1:_(s64) = COPY $x1
+   %x0_x1:_(<2 x s64>) = G_BUILD_VECTOR %copy_x0, %copy_x1
+
+   %assert8:_(<2 x s64>) = G_ASSERT_ZEXT %x0_x1, 8
+   %copy_assert8:_(<2 x s64>) = COPY %assert8
+
+   %assert1:_(<2 x s64>) = G_ASSERT_ZEXT %x0_x1, 1
+   %copy_assert1:_(<2 x s64>) = COPY %assert1
+
+   %assert63:_(<2 x s64>) = G_ASSERT_ZEXT %x0_x1, 63
+   %copy_assert63:_(<2 x s64>) = COPY %assert63
+
+   %assert3:_(<2 x s64>) = G_ASSERT_ZEXT %x0_x1, 3
+   %copy_assert3:_(<2 x s64>) = COPY %assert3
+)";
+
+  setUp(MIRString);
+  if (!TM)
+    return;
+
+  Register CopyAssert8 = Copies[Copies.size() - 4];
+  Register CopyAssert1 = Copies[Copies.size() - 3];
+  Register CopyAssert63 = Copies[Copies.size() - 2];
+  Register CopyAssert3 = Copies[Copies.size() - 1];
+
+  GISelKnownBits Info(*MF);
+  MachineInstr *Copy;
+  Register SrcReg;
+  KnownBits Res;
+
+  // Assert zero-extension from an 8-bit value.
+  Copy = MRI->getVRegDef(CopyAssert8);
+  SrcReg = Copy->getOperand(1).getReg();
+  Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ(64u, Res.getBitWidth());
+  EXPECT_EQ(0u, Res.One.getZExtValue());
+  EXPECT_EQ(0xFFFFFFFFFFFFFF00u, Res.Zero.getZExtValue());
+
+  // Assert zero-extension from a 1-bit value.
+  Copy = MRI->getVRegDef(CopyAssert1);
+  SrcReg = Copy->getOperand(1).getReg();
+  Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ(64u, Res.getBitWidth());
+  EXPECT_EQ(0u, Res.One.getZExtValue());
+  EXPECT_EQ(0xFFFFFFFFFFFFFFFE, Res.Zero.getZExtValue());
+
+  // Assert zero-extension from a 63-bit value.
+  Copy = MRI->getVRegDef(CopyAssert63);
+  SrcReg = Copy->getOperand(1).getReg();
+  Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ(64u, Res.getBitWidth());
+  EXPECT_EQ(0u, Res.One.getZExtValue());
+  EXPECT_EQ(0x8000000000000000u, Res.Zero.getZExtValue());
+
+  // Assert zero-extension from a 3-bit value.
+  Copy = MRI->getVRegDef(CopyAssert3);
+  SrcReg = Copy->getOperand(1).getReg();
+  Res = Info.getKnownBits(SrcReg);
+  EXPECT_EQ(64u, Res.getBitWidth());
+  EXPECT_EQ(0u, Res.One.getZExtValue());
+  EXPECT_EQ(0xFFFFFFFFFFFFFFF8u, Res.Zero.getZExtValue());
+}


        


More information about the llvm-commits mailing list