[llvm] 8f49204 - [SelectionDAG] ComputeKnownBits - minimum leading/trailing zero bits in LSHR/SHL (PR44526)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 13 03:09:03 PST 2020


Author: Simon Pilgrim
Date: 2020-01-13T11:08:12Z
New Revision: 8f49204f26ea8856b870d4c2344b98f4b706bea0

URL: https://github.com/llvm/llvm-project/commit/8f49204f26ea8856b870d4c2344b98f4b706bea0
DIFF: https://github.com/llvm/llvm-project/commit/8f49204f26ea8856b870d4c2344b98f4b706bea0.diff

LOG: [SelectionDAG] ComputeKnownBits - minimum leading/trailing zero bits in LSHR/SHL (PR44526)

As detailed in https://blog.regehr.org/archives/1709 we don't make use of the known leading/trailing zeros for shifted values in cases where we don't know the shift amount value.

This patch adds support to SelectionDAG::ComputeKnownBits to use KnownBits::countMinTrailingZeros and countMinLeadingZeros to set the minimum guaranteed leading/trailing known zero bits.

Differential Revision: https://reviews.llvm.org/D72573

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
    llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
    llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
    llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll
    llvm/test/CodeGen/AMDGPU/shl.ll
    llvm/test/CodeGen/AMDGPU/shl.v2i16.ll
    llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
    llvm/test/CodeGen/ARM/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
    llvm/test/CodeGen/BPF/shifts.ll
    llvm/test/CodeGen/Mips/llvm-ir/lshr.ll
    llvm/test/CodeGen/X86/avx2-shift.ll
    llvm/test/CodeGen/X86/avx2-vector-shifts.ll
    llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
    llvm/test/CodeGen/X86/vector-fshl-128.ll
    llvm/test/CodeGen/X86/vector-fshl-rot-128.ll
    llvm/test/CodeGen/X86/vector-fshr-128.ll
    llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
    llvm/test/CodeGen/X86/vector-rotate-128.ll
    llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
    llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 1e5e0724f08e..4fa438a2795d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -2834,6 +2834,12 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
       Known.One <<= Shift;
       // Low bits are known zero.
       Known.Zero.setLowBits(Shift);
+    } else {
+      // No matter the shift amount, the trailing zeros will stay zero.
+      Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+      Known.Zero =
+          APInt::getLowBitsSet(BitWidth, Known.countMinTrailingZeros());
+      Known.One.clearAllBits();
     }
     break;
   case ISD::SRL:
@@ -2847,6 +2853,11 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
     } else if (const APInt *ShMinAmt = getValidMinimumShiftAmountConstant(Op)) {
       // Minimum shift high bits are known zero.
       Known.Zero.setHighBits(ShMinAmt->getZExtValue());
+    } else {
+      // No matter the shift amount, the leading zeros will stay zero.
+      Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+      Known.Zero = APInt::getHighBitsSet(BitWidth, Known.countMinLeadingZeros());
+      Known.One.clearAllBits();
     }
     break;
   case ISD::SRA:

diff  --git a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
index fcbe5a615c3b..11f552437a88 100644
--- a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
@@ -300,10 +300,8 @@ define i1 @scalar_i32_x_is_const2_eq(i32 %y) nounwind {
 ; CHECK-LABEL: scalar_i32_x_is_const2_eq:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1
-; CHECK-NEXT:    mov w9, #43605
 ; CHECK-NEXT:    lsr w8, w8, w0
-; CHECK-NEXT:    movk w9, #43605, lsl #16
-; CHECK-NEXT:    tst w8, w9
+; CHECK-NEXT:    cmp w8, #0 // =0
 ; CHECK-NEXT:    cset w0, eq
 ; CHECK-NEXT:    ret
   %t0 = lshr i32 1, %y
@@ -322,9 +320,7 @@ define i1 @negative_scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind {
 ; CHECK-NEXT:    mov w8, #24
 ; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
 ; CHECK-NEXT:    lsr w8, w8, w1
-; CHECK-NEXT:    and w8, w8, w0
-; CHECK-NEXT:    sxtb w8, w8
-; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    tst w8, w0
 ; CHECK-NEXT:    cset w0, lt
 ; CHECK-NEXT:    ret
   %t0 = lshr i8 24, %y
@@ -340,7 +336,6 @@ define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
 ; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
 ; CHECK-NEXT:    lsr w8, w8, w1
 ; CHECK-NEXT:    and w8, w8, w0
-; CHECK-NEXT:    and w8, w8, #0xff
 ; CHECK-NEXT:    cmp w8, #1 // =1
 ; CHECK-NEXT:    cset w0, eq
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
index 90e007cbd775..2a5bfeb3082e 100644
--- a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
@@ -274,7 +274,7 @@ define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind {
 ; CHECK-NEXT:    and w8, w0, #0xff
 ; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
 ; CHECK-NEXT:    lsr w8, w8, w1
-; CHECK-NEXT:    ubfx w0, w8, #7, #1
+; CHECK-NEXT:    lsr w0, w8, #7
 ; CHECK-NEXT:    ret
   %t0 = shl i8 128, %y
   %t1 = and i8 %t0, %x
@@ -344,7 +344,7 @@ define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
 ; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
 ; CHECK-NEXT:    lsl w8, w8, w1
 ; CHECK-NEXT:    and w8, w8, w0
-; CHECK-NEXT:    and w8, w8, #0xff
+; CHECK-NEXT:    and w8, w8, #0x80
 ; CHECK-NEXT:    cmp w8, #1 // =1
 ; CHECK-NEXT:    cset w0, eq
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll b/llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll
index 5275a8190807..95e88ebfc307 100644
--- a/llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,CIVI %s
-; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI,CIVI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CIVI,VI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CIVI,CI %s
 
 define amdgpu_kernel void @s_lshr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %lhs, <2 x i16> %rhs) #0 {
 ; GFX9-LABEL: s_lshr_v2i16:
@@ -24,17 +24,17 @@ define amdgpu_kernel void @s_lshr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16>
 ; VI-NEXT:    s_load_dword s0, s[0:1], 0x30
 ; VI-NEXT:    s_mov_b32 s4, 0xffff
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_and_b32 s1, s5, s4
-; VI-NEXT:    s_and_b32 s4, s0, s4
-; VI-NEXT:    s_lshr_b32 s5, s5, 16
-; VI-NEXT:    s_lshr_b32 s0, s0, 16
-; VI-NEXT:    s_lshr_b32 s0, s5, s0
-; VI-NEXT:    v_mov_b32_e32 v0, s4
-; VI-NEXT:    v_bfe_u32 v0, s1, v0, 16
-; VI-NEXT:    s_lshl_b32 s0, s0, 16
-; VI-NEXT:    v_or_b32_e32 v2, s0, v0
 ; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    s_lshr_b32 s1, s5, 16
+; VI-NEXT:    s_lshr_b32 s6, s0, 16
+; VI-NEXT:    s_lshr_b32 s1, s1, s6
+; VI-NEXT:    s_and_b32 s5, s5, s4
+; VI-NEXT:    s_and_b32 s0, s0, s4
+; VI-NEXT:    s_lshr_b32 s0, s5, s0
+; VI-NEXT:    s_lshl_b32 s1, s1, 16
+; VI-NEXT:    s_or_b32 s0, s0, s1
 ; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_mov_b32_e32 v2, s0
 ; VI-NEXT:    flat_store_dword v[0:1], v2
 ; VI-NEXT:    s_endpgm
 ;
@@ -49,13 +49,13 @@ define amdgpu_kernel void @s_lshr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16>
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    s_lshr_b32 s1, s2, 16
 ; CI-NEXT:    s_lshr_b32 s8, s0, 16
+; CI-NEXT:    s_lshr_b32 s1, s1, s8
+; CI-NEXT:    s_and_b32 s2, s2, s3
 ; CI-NEXT:    s_and_b32 s0, s0, s3
+; CI-NEXT:    s_lshr_b32 s0, s2, s0
+; CI-NEXT:    s_lshl_b32 s1, s1, 16
+; CI-NEXT:    s_or_b32 s0, s0, s1
 ; CI-NEXT:    v_mov_b32_e32 v0, s0
-; CI-NEXT:    s_lshr_b32 s0, s1, s8
-; CI-NEXT:    s_and_b32 s2, s2, s3
-; CI-NEXT:    v_bfe_u32 v0, s2, v0, 16
-; CI-NEXT:    s_lshl_b32 s0, s0, 16
-; CI-NEXT:    v_or_b32_e32 v0, s0, v0
 ; CI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; CI-NEXT:    s_endpgm
   %result = lshr <2 x i16> %lhs, %rhs
@@ -123,7 +123,7 @@ define amdgpu_kernel void @v_lshr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16>
 ; CI-NEXT:    v_lshrrev_b32_e32 v5, 16, v3
 ; CI-NEXT:    v_and_b32_e32 v2, s8, v2
 ; CI-NEXT:    v_and_b32_e32 v3, s8, v3
-; CI-NEXT:    v_bfe_u32 v2, v2, v3, 16
+; CI-NEXT:    v_lshrrev_b32_e32 v2, v3, v2
 ; CI-NEXT:    v_lshrrev_b32_e32 v3, v5, v4
 ; CI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; CI-NEXT:    v_or_b32_e32 v2, v2, v3
@@ -201,7 +201,7 @@ define amdgpu_kernel void @lshr_v_s_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16
 ; CI-NEXT:    v_lshrrev_b32_e32 v3, 16, v2
 ; CI-NEXT:    v_and_b32_e32 v2, s10, v2
 ; CI-NEXT:    v_lshrrev_b32_e32 v3, s9, v3
-; CI-NEXT:    v_bfe_u32 v2, v2, s8, 16
+; CI-NEXT:    v_lshrrev_b32_e32 v2, s8, v2
 ; CI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; CI-NEXT:    v_or_b32_e32 v2, v2, v3
 ; CI-NEXT:    buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
@@ -276,7 +276,7 @@ define amdgpu_kernel void @lshr_s_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16
 ; CI-NEXT:    v_lshrrev_b32_e32 v3, 16, v2
 ; CI-NEXT:    v_and_b32_e32 v2, s10, v2
 ; CI-NEXT:    v_lshr_b32_e32 v3, s9, v3
-; CI-NEXT:    v_bfe_u32 v2, s8, v2, 16
+; CI-NEXT:    v_lshr_b32_e32 v2, s8, v2
 ; CI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; CI-NEXT:    v_or_b32_e32 v2, v2, v3
 ; CI-NEXT:    buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
@@ -344,7 +344,7 @@ define amdgpu_kernel void @lshr_imm_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i
 ; CI-NEXT:    v_lshrrev_b32_e32 v3, 16, v2
 ; CI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
 ; CI-NEXT:    v_lshr_b32_e32 v3, 8, v3
-; CI-NEXT:    v_bfe_u32 v2, 8, v2, 16
+; CI-NEXT:    v_lshr_b32_e32 v2, 8, v2
 ; CI-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; CI-NEXT:    v_or_b32_e32 v2, v2, v3
 ; CI-NEXT:    buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
@@ -490,9 +490,9 @@ define amdgpu_kernel void @v_lshr_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16>
 ; CI-NEXT:    v_and_b32_e32 v4, s8, v4
 ; CI-NEXT:    v_and_b32_e32 v3, s8, v3
 ; CI-NEXT:    v_and_b32_e32 v5, s8, v5
-; CI-NEXT:    v_bfe_u32 v3, v3, v5, 16
+; CI-NEXT:    v_lshrrev_b32_e32 v3, v5, v3
 ; CI-NEXT:    v_lshrrev_b32_e32 v5, v9, v7
-; CI-NEXT:    v_bfe_u32 v2, v2, v4, 16
+; CI-NEXT:    v_lshrrev_b32_e32 v2, v4, v2
 ; CI-NEXT:    v_lshrrev_b32_e32 v4, v8, v6
 ; CI-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
 ; CI-NEXT:    v_lshlrev_b32_e32 v4, 16, v4

diff  --git a/llvm/test/CodeGen/AMDGPU/shl.ll b/llvm/test/CodeGen/AMDGPU/shl.ll
index 72c822cd8e5d..4adacbc126d8 100644
--- a/llvm/test/CodeGen/AMDGPU/shl.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl.ll
@@ -1184,25 +1184,20 @@ define amdgpu_kernel void @s_shl_inline_imm_1_i64(i64 addrspace(1)* %out, i64 ad
 ;
 ; EG-LABEL: s_shl_inline_imm_1_i64:
 ; EG:       ; %bb.0:
-; EG-NEXT:    ALU 13, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT:    ALU 8, @4, KC0[CB0:0-32], KC1[]
 ; EG-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
 ; EG-NEXT:    CF_END
 ; EG-NEXT:    PAD
 ; EG-NEXT:    ALU clause starting at 4:
-; EG-NEXT:     SUB_INT * T0.W, literal.x, KC0[2].W,
-; EG-NEXT:    31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT:     LSHR T0.W, 1, PV.W,
-; EG-NEXT:     ADD_INT * T1.W, KC0[2].W, literal.x,
-; EG-NEXT:    -32(nan), 0(0.000000e+00)
-; EG-NEXT:     LSHL T0.Z, 1, PS,
-; EG-NEXT:     LSHR T0.W, PV.W, 1,
-; EG-NEXT:     SETGT_UINT * T1.W, KC0[2].W, literal.x,
-; EG-NEXT:    31(4.344025e-44), 0(0.000000e+00)
-; EG-NEXT:     CNDE_INT T0.Y, PS, PV.W, PV.Z,
-; EG-NEXT:     LSHL * T0.W, 1, KC0[2].W,
-; EG-NEXT:     CNDE_INT T0.X, T1.W, PV.W, 0.0,
+; EG-NEXT:     ADD_INT T0.Z, KC0[2].W, literal.x,
+; EG-NEXT:     SETGT_UINT T0.W, KC0[2].W, literal.y,
+; EG-NEXT:     LSHL * T1.W, 1, KC0[2].W,
+; EG-NEXT:    -32(nan), 31(4.344025e-44)
+; EG-NEXT:     CNDE_INT T0.X, PV.W, PS, 0.0,
+; EG-NEXT:     LSHL T1.W, 1, PV.Z,
 ; EG-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
 ; EG-NEXT:    2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT:     CNDE_INT * T0.Y, T0.W, 0.0, PV.W,
   %shl = shl i64 1, %a
   store i64 %shl, i64 addrspace(1)* %out, align 8
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/shl.v2i16.ll b/llvm/test/CodeGen/AMDGPU/shl.v2i16.ll
index c6816d351f23..7968aa4c0264 100644
--- a/llvm/test/CodeGen/AMDGPU/shl.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl.v2i16.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,CIVI %s
-; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI,CIVI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CIVI,VI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CIVI,CI %s
 
 define amdgpu_kernel void @s_shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %lhs, <2 x i16> %rhs) #0 {
 ; GFX9-LABEL: s_shl_v2i16:
@@ -340,15 +340,14 @@ define amdgpu_kernel void @shl_imm_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i1
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    s_mov_b64 s[4:5], s[2:3]
 ; CI-NEXT:    buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
-; CI-NEXT:    s_mov_b32 s4, 0xffff
 ; CI-NEXT:    s_mov_b64 s[2:3], s[6:7]
 ; CI-NEXT:    s_waitcnt vmcnt(0)
-; CI-NEXT:    v_and_b32_e32 v3, s4, v2
+; CI-NEXT:    v_and_b32_e32 v3, 0xffff, v2
 ; CI-NEXT:    v_lshrrev_b32_e32 v2, 16, v2
 ; CI-NEXT:    v_lshl_b32_e32 v2, 8, v2
 ; CI-NEXT:    v_lshl_b32_e32 v3, 8, v3
 ; CI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
-; CI-NEXT:    v_and_b32_e32 v3, s4, v3
+; CI-NEXT:    v_and_b32_e32 v3, 0xfff8, v3
 ; CI-NEXT:    v_or_b32_e32 v2, v3, v2
 ; CI-NEXT:    buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
 ; CI-NEXT:    s_endpgm

diff  --git a/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll b/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
index cc97f2f01559..bf029ee862af 100644
--- a/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
@@ -967,52 +967,25 @@ define i1 @scalar_i32_x_is_const_eq(i32 %y) nounwind {
   ret i1 %res
 }
 define i1 @scalar_i32_x_is_const2_eq(i32 %y) nounwind {
-; ARM6-LABEL: scalar_i32_x_is_const2_eq:
-; ARM6:       @ %bb.0:
-; ARM6-NEXT:    ldr r2, .LCPI19_0
-; ARM6-NEXT:    mov r1, #1
-; ARM6-NEXT:    and r0, r2, r1, lsr r0
-; ARM6-NEXT:    clz r0, r0
-; ARM6-NEXT:    lsr r0, r0, #5
-; ARM6-NEXT:    bx lr
-; ARM6-NEXT:    .p2align 2
-; ARM6-NEXT:  @ %bb.1:
-; ARM6-NEXT:  .LCPI19_0:
-; ARM6-NEXT:    .long 2857740885 @ 0xaa55aa55
-;
-; ARM78-LABEL: scalar_i32_x_is_const2_eq:
-; ARM78:       @ %bb.0:
-; ARM78-NEXT:    movw r1, #43605
-; ARM78-NEXT:    mov r2, #1
-; ARM78-NEXT:    movt r1, #43605
-; ARM78-NEXT:    and r0, r1, r2, lsr r0
-; ARM78-NEXT:    clz r0, r0
-; ARM78-NEXT:    lsr r0, r0, #5
-; ARM78-NEXT:    bx lr
+; ARM-LABEL: scalar_i32_x_is_const2_eq:
+; ARM:       @ %bb.0:
+; ARM-NEXT:    mov r1, #1
+; ARM-NEXT:    eor r0, r1, r1, lsr r0
+; ARM-NEXT:    bx lr
 ;
 ; THUMB6-LABEL: scalar_i32_x_is_const2_eq:
 ; THUMB6:       @ %bb.0:
 ; THUMB6-NEXT:    movs r1, #1
 ; THUMB6-NEXT:    lsrs r1, r0
-; THUMB6-NEXT:    ldr r2, .LCPI19_0
-; THUMB6-NEXT:    ands r2, r1
-; THUMB6-NEXT:    rsbs r0, r2, #0
-; THUMB6-NEXT:    adcs r0, r2
+; THUMB6-NEXT:    rsbs r0, r1, #0
+; THUMB6-NEXT:    adcs r0, r1
 ; THUMB6-NEXT:    bx lr
-; THUMB6-NEXT:    .p2align 2
-; THUMB6-NEXT:  @ %bb.1:
-; THUMB6-NEXT:  .LCPI19_0:
-; THUMB6-NEXT:    .long 2857740885 @ 0xaa55aa55
 ;
 ; THUMB78-LABEL: scalar_i32_x_is_const2_eq:
 ; THUMB78:       @ %bb.0:
 ; THUMB78-NEXT:    movs r1, #1
 ; THUMB78-NEXT:    lsr.w r0, r1, r0
-; THUMB78-NEXT:    movw r1, #43605
-; THUMB78-NEXT:    movt r1, #43605
-; THUMB78-NEXT:    ands r0, r1
-; THUMB78-NEXT:    clz r0, r0
-; THUMB78-NEXT:    lsrs r0, r0, #5
+; THUMB78-NEXT:    eor r0, r0, #1
 ; THUMB78-NEXT:    bx lr
   %t0 = lshr i32 1, %y
   %t1 = and i32 %t0, 2857740885
@@ -1029,8 +1002,7 @@ define i1 @negative_scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind {
 ; ARM6:       @ %bb.0:
 ; ARM6-NEXT:    uxtb r1, r1
 ; ARM6-NEXT:    mov r2, #24
-; ARM6-NEXT:    and r0, r0, r2, lsr r1
-; ARM6-NEXT:    sxtb r1, r0
+; ARM6-NEXT:    and r1, r0, r2, lsr r1
 ; ARM6-NEXT:    mov r0, #0
 ; ARM6-NEXT:    cmp r1, #0
 ; ARM6-NEXT:    movmi r0, #1
@@ -1040,8 +1012,7 @@ define i1 @negative_scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind {
 ; ARM78:       @ %bb.0:
 ; ARM78-NEXT:    uxtb r1, r1
 ; ARM78-NEXT:    mov r2, #24
-; ARM78-NEXT:    and r0, r0, r2, lsr r1
-; ARM78-NEXT:    sxtb r1, r0
+; ARM78-NEXT:    and r1, r0, r2, lsr r1
 ; ARM78-NEXT:    mov r0, #0
 ; ARM78-NEXT:    cmp r1, #0
 ; ARM78-NEXT:    movwmi r0, #1
@@ -1053,8 +1024,6 @@ define i1 @negative_scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind {
 ; THUMB6-NEXT:    movs r2, #24
 ; THUMB6-NEXT:    lsrs r2, r1
 ; THUMB6-NEXT:    ands r2, r0
-; THUMB6-NEXT:    sxtb r0, r2
-; THUMB6-NEXT:    cmp r0, #0
 ; THUMB6-NEXT:    bmi .LBB20_2
 ; THUMB6-NEXT:  @ %bb.1:
 ; THUMB6-NEXT:    movs r0, #0
@@ -1069,9 +1038,7 @@ define i1 @negative_scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind {
 ; THUMB78-NEXT:    movs r2, #24
 ; THUMB78-NEXT:    lsr.w r1, r2, r1
 ; THUMB78-NEXT:    ands r0, r1
-; THUMB78-NEXT:    sxtb r1, r0
-; THUMB78-NEXT:    movs r0, #0
-; THUMB78-NEXT:    cmp r1, #0
+; THUMB78-NEXT:    mov.w r0, #0
 ; THUMB78-NEXT:    it mi
 ; THUMB78-NEXT:    movmi r0, #1
 ; THUMB78-NEXT:    bx lr
@@ -1087,8 +1054,7 @@ define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
 ; ARM-NEXT:    uxtb r1, r1
 ; ARM-NEXT:    mov r2, #128
 ; ARM-NEXT:    and r0, r0, r2, lsr r1
-; ARM-NEXT:    mvn r1, #0
-; ARM-NEXT:    uxtab r0, r1, r0
+; ARM-NEXT:    sub r0, r0, #1
 ; ARM-NEXT:    clz r0, r0
 ; ARM-NEXT:    lsr r0, r0, #5
 ; ARM-NEXT:    bx lr
@@ -1099,8 +1065,7 @@ define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
 ; THUMB6-NEXT:    movs r2, #128
 ; THUMB6-NEXT:    lsrs r2, r1
 ; THUMB6-NEXT:    ands r2, r0
-; THUMB6-NEXT:    uxtb r0, r2
-; THUMB6-NEXT:    subs r1, r0, #1
+; THUMB6-NEXT:    subs r1, r2, #1
 ; THUMB6-NEXT:    rsbs r0, r1, #0
 ; THUMB6-NEXT:    adcs r0, r1
 ; THUMB6-NEXT:    bx lr
@@ -1111,8 +1076,7 @@ define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
 ; THUMB78-NEXT:    movs r2, #128
 ; THUMB78-NEXT:    lsr.w r1, r2, r1
 ; THUMB78-NEXT:    ands r0, r1
-; THUMB78-NEXT:    mov.w r1, #-1
-; THUMB78-NEXT:    uxtab r0, r1, r0
+; THUMB78-NEXT:    subs r0, #1
 ; THUMB78-NEXT:    clz r0, r0
 ; THUMB78-NEXT:    lsrs r0, r0, #5
 ; THUMB78-NEXT:    bx lr

diff  --git a/llvm/test/CodeGen/ARM/hoist-and-by-const-from-shl-in-eqcmp-zero.ll b/llvm/test/CodeGen/ARM/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
index b59c8a1d9550..0de18e557222 100644
--- a/llvm/test/CodeGen/ARM/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/ARM/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
@@ -24,7 +24,6 @@ define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
 ; ARM-NEXT:    uxtb r0, r0
 ; ARM-NEXT:    lsr r0, r0, r1
 ; ARM-NEXT:    mov r1, #1
-; ARM-NEXT:    uxtb r0, r0
 ; ARM-NEXT:    eor r0, r1, r0, lsr #7
 ; ARM-NEXT:    bx lr
 ;
@@ -45,7 +44,6 @@ define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
 ; THUMB7-NEXT:    uxtb r0, r0
 ; THUMB7-NEXT:    lsrs r0, r1
 ; THUMB7-NEXT:    movs r1, #1
-; THUMB7-NEXT:    uxtb r0, r0
 ; THUMB7-NEXT:    eor.w r0, r1, r0, lsr #7
 ; THUMB7-NEXT:    bx lr
 ;
@@ -55,7 +53,6 @@ define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
 ; THUMB8-NEXT:    uxtb r1, r1
 ; THUMB8-NEXT:    lsrs r0, r1
 ; THUMB8-NEXT:    movs r1, #1
-; THUMB8-NEXT:    uxtb r0, r0
 ; THUMB8-NEXT:    eor.w r0, r1, r0, lsr #7
 ; THUMB8-NEXT:    bx lr
   %t0 = shl i8 128, %y
@@ -163,7 +160,6 @@ define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
 ; ARM-NEXT:    uxth r0, r0
 ; ARM-NEXT:    lsr r0, r0, r1
 ; ARM-NEXT:    mov r1, #1
-; ARM-NEXT:    uxth r0, r0
 ; ARM-NEXT:    eor r0, r1, r0, lsr #15
 ; ARM-NEXT:    bx lr
 ;
@@ -185,7 +181,6 @@ define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
 ; THUMB7-NEXT:    uxth r0, r0
 ; THUMB7-NEXT:    lsrs r0, r1
 ; THUMB7-NEXT:    movs r1, #1
-; THUMB7-NEXT:    uxth r0, r0
 ; THUMB7-NEXT:    eor.w r0, r1, r0, lsr #15
 ; THUMB7-NEXT:    bx lr
 ;
@@ -195,7 +190,6 @@ define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
 ; THUMB8-NEXT:    uxth r1, r1
 ; THUMB8-NEXT:    lsrs r0, r1
 ; THUMB8-NEXT:    movs r1, #1
-; THUMB8-NEXT:    uxth r0, r0
 ; THUMB8-NEXT:    eor.w r0, r1, r0, lsr #15
 ; THUMB8-NEXT:    bx lr
   %t0 = shl i16 32768, %y
@@ -973,7 +967,6 @@ define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind {
 ; ARM-NEXT:    uxtb r1, r1
 ; ARM-NEXT:    uxtb r0, r0
 ; ARM-NEXT:    lsr r0, r0, r1
-; ARM-NEXT:    uxtb r0, r0
 ; ARM-NEXT:    lsr r0, r0, #7
 ; ARM-NEXT:    bx lr
 ;
@@ -982,7 +975,6 @@ define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind {
 ; THUMB6-NEXT:    uxtb r1, r1
 ; THUMB6-NEXT:    uxtb r0, r0
 ; THUMB6-NEXT:    lsrs r0, r1
-; THUMB6-NEXT:    uxtb r0, r0
 ; THUMB6-NEXT:    lsrs r0, r0, #7
 ; THUMB6-NEXT:    bx lr
 ;
@@ -991,7 +983,6 @@ define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind {
 ; THUMB7-NEXT:    uxtb r1, r1
 ; THUMB7-NEXT:    uxtb r0, r0
 ; THUMB7-NEXT:    lsrs r0, r1
-; THUMB7-NEXT:    uxtb r0, r0
 ; THUMB7-NEXT:    lsrs r0, r0, #7
 ; THUMB7-NEXT:    bx lr
 ;
@@ -1000,7 +991,6 @@ define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind {
 ; THUMB8-NEXT:    uxtb r0, r0
 ; THUMB8-NEXT:    uxtb r1, r1
 ; THUMB8-NEXT:    lsrs r0, r1
-; THUMB8-NEXT:    uxtb r0, r0
 ; THUMB8-NEXT:    lsrs r0, r0, #7
 ; THUMB8-NEXT:    bx lr
   %t0 = shl i8 128, %y
@@ -1178,13 +1168,7 @@ define i1 @negative_scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind {
 define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
 ; ARM-LABEL: scalar_i8_signbit_eq_with_nonzero:
 ; ARM:       @ %bb.0:
-; ARM-NEXT:    uxtb r1, r1
-; ARM-NEXT:    mvn r2, #127
-; ARM-NEXT:    and r0, r0, r2, lsl r1
-; ARM-NEXT:    mvn r1, #0
-; ARM-NEXT:    uxtab r0, r1, r0
-; ARM-NEXT:    clz r0, r0
-; ARM-NEXT:    lsr r0, r0, #5
+; ARM-NEXT:    mov r0, #0
 ; ARM-NEXT:    bx lr
 ;
 ; THUMB6-LABEL: scalar_i8_signbit_eq_with_nonzero:
@@ -1202,14 +1186,7 @@ define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
 ;
 ; THUMB78-LABEL: scalar_i8_signbit_eq_with_nonzero:
 ; THUMB78:       @ %bb.0:
-; THUMB78-NEXT:    uxtb r1, r1
-; THUMB78-NEXT:    mvn r2, #127
-; THUMB78-NEXT:    lsl.w r1, r2, r1
-; THUMB78-NEXT:    ands r0, r1
-; THUMB78-NEXT:    mov.w r1, #-1
-; THUMB78-NEXT:    uxtab r0, r1, r0
-; THUMB78-NEXT:    clz r0, r0
-; THUMB78-NEXT:    lsrs r0, r0, #5
+; THUMB78-NEXT:    movs r0, #0
 ; THUMB78-NEXT:    bx lr
   %t0 = shl i8 128, %y
   %t1 = and i8 %t0, %x

diff  --git a/llvm/test/CodeGen/BPF/shifts.ll b/llvm/test/CodeGen/BPF/shifts.ll
index 2d73ffddf82b..2ffcb8891c30 100644
--- a/llvm/test/CodeGen/BPF/shifts.ll
+++ b/llvm/test/CodeGen/BPF/shifts.ll
@@ -52,7 +52,7 @@ define zeroext i32 @lshr32(i32 zeroext %a, i32 zeroext %cnt) nounwind readnone {
 entry:
 ; CHECK-LABEL: lshr32:
 ; CHECK: r0 >>= r2 # encoding: [0x7f,0x20,0x00,0x00,0x00,0x00,0x00,0x00]
-; CHECK: r0 <<= 32 # encoding: [0x67,0x00,0x00,0x00,0x20,0x00,0x00,0x00]
+; CHECK: exit # encoding: [0x95,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
   %shr = lshr i32 %a, %cnt
   ret i32 %shr
 }

diff  --git a/llvm/test/CodeGen/Mips/llvm-ir/lshr.ll b/llvm/test/CodeGen/Mips/llvm-ir/lshr.ll
index b3efdcca4adc..ed2bfc9fcf60 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/lshr.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/lshr.ll
@@ -94,68 +94,57 @@ entry:
 define zeroext i8 @lshr_i8(i8 zeroext %a, i8 zeroext %b) {
 ; MIPS2-LABEL: lshr_i8:
 ; MIPS2:       # %bb.0: # %entry
-; MIPS2-NEXT:    srlv $1, $4, $5
 ; MIPS2-NEXT:    jr $ra
-; MIPS2-NEXT:    andi $2, $1, 255
+; MIPS2-NEXT:    srlv $2, $4, $5
 ;
 ; MIPS32-LABEL: lshr_i8:
 ; MIPS32:       # %bb.0: # %entry
-; MIPS32-NEXT:    srlv $1, $4, $5
 ; MIPS32-NEXT:    jr $ra
-; MIPS32-NEXT:    andi $2, $1, 255
+; MIPS32-NEXT:    srlv $2, $4, $5
 ;
 ; MIPS32R2-LABEL: lshr_i8:
 ; MIPS32R2:       # %bb.0: # %entry
-; MIPS32R2-NEXT:    srlv $1, $4, $5
 ; MIPS32R2-NEXT:    jr $ra
-; MIPS32R2-NEXT:    andi $2, $1, 255
+; MIPS32R2-NEXT:    srlv $2, $4, $5
 ;
 ; MIPS32R6-LABEL: lshr_i8:
 ; MIPS32R6:       # %bb.0: # %entry
-; MIPS32R6-NEXT:    srlv $1, $4, $5
 ; MIPS32R6-NEXT:    jr $ra
-; MIPS32R6-NEXT:    andi $2, $1, 255
+; MIPS32R6-NEXT:    srlv $2, $4, $5
 ;
 ; MIPS3-LABEL: lshr_i8:
 ; MIPS3:       # %bb.0: # %entry
-; MIPS3-NEXT:    srlv $1, $4, $5
 ; MIPS3-NEXT:    jr $ra
-; MIPS3-NEXT:    andi $2, $1, 255
+; MIPS3-NEXT:    srlv $2, $4, $5
 ;
 ; MIPS4-LABEL: lshr_i8:
 ; MIPS4:       # %bb.0: # %entry
-; MIPS4-NEXT:    srlv $1, $4, $5
 ; MIPS4-NEXT:    jr $ra
-; MIPS4-NEXT:    andi $2, $1, 255
+; MIPS4-NEXT:    srlv $2, $4, $5
 ;
 ; MIPS64-LABEL: lshr_i8:
 ; MIPS64:       # %bb.0: # %entry
-; MIPS64-NEXT:    srlv $1, $4, $5
 ; MIPS64-NEXT:    jr $ra
-; MIPS64-NEXT:    andi $2, $1, 255
+; MIPS64-NEXT:    srlv $2, $4, $5
 ;
 ; MIPS64R2-LABEL: lshr_i8:
 ; MIPS64R2:       # %bb.0: # %entry
-; MIPS64R2-NEXT:    srlv $1, $4, $5
 ; MIPS64R2-NEXT:    jr $ra
-; MIPS64R2-NEXT:    andi $2, $1, 255
+; MIPS64R2-NEXT:    srlv $2, $4, $5
 ;
 ; MIPS64R6-LABEL: lshr_i8:
 ; MIPS64R6:       # %bb.0: # %entry
-; MIPS64R6-NEXT:    srlv $1, $4, $5
 ; MIPS64R6-NEXT:    jr $ra
-; MIPS64R6-NEXT:    andi $2, $1, 255
+; MIPS64R6-NEXT:    srlv $2, $4, $5
 ;
 ; MMR3-LABEL: lshr_i8:
 ; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    jr $ra
 ; MMR3-NEXT:    srlv $2, $4, $5
-; MMR3-NEXT:    andi16 $2, $2, 255
-; MMR3-NEXT:    jrc $ra
 ;
 ; MMR6-LABEL: lshr_i8:
 ; MMR6:       # %bb.0: # %entry
 ; MMR6-NEXT:    srlv $2, $4, $5
-; MMR6-NEXT:    andi16 $2, $2, 255
 ; MMR6-NEXT:    jrc $ra
 entry:
 
@@ -166,68 +155,57 @@ entry:
 define zeroext i16 @lshr_i16(i16 zeroext %a, i16 zeroext %b) {
 ; MIPS2-LABEL: lshr_i16:
 ; MIPS2:       # %bb.0: # %entry
-; MIPS2-NEXT:    srlv $1, $4, $5
 ; MIPS2-NEXT:    jr $ra
-; MIPS2-NEXT:    andi $2, $1, 65535
+; MIPS2-NEXT:    srlv $2, $4, $5
 ;
 ; MIPS32-LABEL: lshr_i16:
 ; MIPS32:       # %bb.0: # %entry
-; MIPS32-NEXT:    srlv $1, $4, $5
 ; MIPS32-NEXT:    jr $ra
-; MIPS32-NEXT:    andi $2, $1, 65535
+; MIPS32-NEXT:    srlv $2, $4, $5
 ;
 ; MIPS32R2-LABEL: lshr_i16:
 ; MIPS32R2:       # %bb.0: # %entry
-; MIPS32R2-NEXT:    srlv $1, $4, $5
 ; MIPS32R2-NEXT:    jr $ra
-; MIPS32R2-NEXT:    andi $2, $1, 65535
+; MIPS32R2-NEXT:    srlv $2, $4, $5
 ;
 ; MIPS32R6-LABEL: lshr_i16:
 ; MIPS32R6:       # %bb.0: # %entry
-; MIPS32R6-NEXT:    srlv $1, $4, $5
 ; MIPS32R6-NEXT:    jr $ra
-; MIPS32R6-NEXT:    andi $2, $1, 65535
+; MIPS32R6-NEXT:    srlv $2, $4, $5
 ;
 ; MIPS3-LABEL: lshr_i16:
 ; MIPS3:       # %bb.0: # %entry
-; MIPS3-NEXT:    srlv $1, $4, $5
 ; MIPS3-NEXT:    jr $ra
-; MIPS3-NEXT:    andi $2, $1, 65535
+; MIPS3-NEXT:    srlv $2, $4, $5
 ;
 ; MIPS4-LABEL: lshr_i16:
 ; MIPS4:       # %bb.0: # %entry
-; MIPS4-NEXT:    srlv $1, $4, $5
 ; MIPS4-NEXT:    jr $ra
-; MIPS4-NEXT:    andi $2, $1, 65535
+; MIPS4-NEXT:    srlv $2, $4, $5
 ;
 ; MIPS64-LABEL: lshr_i16:
 ; MIPS64:       # %bb.0: # %entry
-; MIPS64-NEXT:    srlv $1, $4, $5
 ; MIPS64-NEXT:    jr $ra
-; MIPS64-NEXT:    andi $2, $1, 65535
+; MIPS64-NEXT:    srlv $2, $4, $5
 ;
 ; MIPS64R2-LABEL: lshr_i16:
 ; MIPS64R2:       # %bb.0: # %entry
-; MIPS64R2-NEXT:    srlv $1, $4, $5
 ; MIPS64R2-NEXT:    jr $ra
-; MIPS64R2-NEXT:    andi $2, $1, 65535
+; MIPS64R2-NEXT:    srlv $2, $4, $5
 ;
 ; MIPS64R6-LABEL: lshr_i16:
 ; MIPS64R6:       # %bb.0: # %entry
-; MIPS64R6-NEXT:    srlv $1, $4, $5
 ; MIPS64R6-NEXT:    jr $ra
-; MIPS64R6-NEXT:    andi $2, $1, 65535
+; MIPS64R6-NEXT:    srlv $2, $4, $5
 ;
 ; MMR3-LABEL: lshr_i16:
 ; MMR3:       # %bb.0: # %entry
+; MMR3-NEXT:    jr $ra
 ; MMR3-NEXT:    srlv $2, $4, $5
-; MMR3-NEXT:    andi16 $2, $2, 65535
-; MMR3-NEXT:    jrc $ra
 ;
 ; MMR6-LABEL: lshr_i16:
 ; MMR6:       # %bb.0: # %entry
 ; MMR6-NEXT:    srlv $2, $4, $5
-; MMR6-NEXT:    andi16 $2, $2, 65535
 ; MMR6-NEXT:    jrc $ra
 entry:
 

diff  --git a/llvm/test/CodeGen/X86/avx2-shift.ll b/llvm/test/CodeGen/X86/avx2-shift.ll
index f4924b3e2fb4..1a0993bd23d7 100644
--- a/llvm/test/CodeGen/X86/avx2-shift.ll
+++ b/llvm/test/CodeGen/X86/avx2-shift.ll
@@ -580,9 +580,8 @@ define <8 x i16> @variable_lshr16(<8 x i16> %lhs, <8  x i16> %rhs) {
 ; X32-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 ; X32-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; X32-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
-; X32-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; X32-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; X32-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; X32-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
@@ -591,9 +590,8 @@ define <8 x i16> @variable_lshr16(<8 x i16> %lhs, <8  x i16> %rhs) {
 ; X64-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 ; X64-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; X64-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
-; X64-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; X64-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; X64-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
   %res = lshr <8 x i16> %lhs, %rhs

diff  --git a/llvm/test/CodeGen/X86/avx2-vector-shifts.ll b/llvm/test/CodeGen/X86/avx2-vector-shifts.ll
index d95f8a014510..905101cd1fc0 100644
--- a/llvm/test/CodeGen/X86/avx2-vector-shifts.ll
+++ b/llvm/test/CodeGen/X86/avx2-vector-shifts.ll
@@ -637,9 +637,8 @@ define <8 x i16> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
 ; X32-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 ; X32-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; X32-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
-; X32-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; X32-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; X32-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; X32-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; X32-NEXT:    vzeroupper
 ; X32-NEXT:    retl
 ;
@@ -648,9 +647,8 @@ define <8 x i16> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
 ; X64-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 ; X64-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; X64-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
-; X64-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; X64-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; X64-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
   %lshr = lshr <8 x i16> %r, %a

diff  --git a/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll b/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
index 363df5335ef3..6663459f49d5 100644
--- a/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
@@ -762,7 +762,7 @@ define i1 @scalar_i32_x_is_const2_eq(i32 %y) nounwind {
 ; X86-BMI1-NEXT:    movb {{[0-9]+}}(%esp), %cl
 ; X86-BMI1-NEXT:    movl $1, %eax
 ; X86-BMI1-NEXT:    shrl %cl, %eax
-; X86-BMI1-NEXT:    testl $-1437226411, %eax # imm = 0xAA55AA55
+; X86-BMI1-NEXT:    testb %al, %al
 ; X86-BMI1-NEXT:    sete %al
 ; X86-BMI1-NEXT:    retl
 ;
@@ -771,7 +771,7 @@ define i1 @scalar_i32_x_is_const2_eq(i32 %y) nounwind {
 ; X86-BMI2-NEXT:    movb {{[0-9]+}}(%esp), %al
 ; X86-BMI2-NEXT:    movl $1, %ecx
 ; X86-BMI2-NEXT:    shrxl %eax, %ecx, %eax
-; X86-BMI2-NEXT:    testl $-1437226411, %eax # imm = 0xAA55AA55
+; X86-BMI2-NEXT:    testb %al, %al
 ; X86-BMI2-NEXT:    sete %al
 ; X86-BMI2-NEXT:    retl
 ;
@@ -781,7 +781,7 @@ define i1 @scalar_i32_x_is_const2_eq(i32 %y) nounwind {
 ; X64-BMI1-NEXT:    movl $1, %eax
 ; X64-BMI1-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-BMI1-NEXT:    shrl %cl, %eax
-; X64-BMI1-NEXT:    testl $-1437226411, %eax # imm = 0xAA55AA55
+; X64-BMI1-NEXT:    testb %al, %al
 ; X64-BMI1-NEXT:    sete %al
 ; X64-BMI1-NEXT:    retq
 ;
@@ -789,7 +789,7 @@ define i1 @scalar_i32_x_is_const2_eq(i32 %y) nounwind {
 ; X64-BMI2:       # %bb.0:
 ; X64-BMI2-NEXT:    movl $1, %eax
 ; X64-BMI2-NEXT:    shrxl %edi, %eax, %eax
-; X64-BMI2-NEXT:    testl $-1437226411, %eax # imm = 0xAA55AA55
+; X64-BMI2-NEXT:    testb %al, %al
 ; X64-BMI2-NEXT:    sete %al
 ; X64-BMI2-NEXT:    retq
   %t0 = lshr i32 1, %y
@@ -803,24 +803,10 @@ define i1 @scalar_i32_x_is_const2_eq(i32 %y) nounwind {
 ;------------------------------------------------------------------------------;
 
 define i1 @negative_scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind {
-; X86-LABEL: negative_scalar_i8_bitsinmiddle_slt:
-; X86:       # %bb.0:
-; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
-; X86-NEXT:    movb $24, %al
-; X86-NEXT:    shrb %cl, %al
-; X86-NEXT:    andb {{[0-9]+}}(%esp), %al
-; X86-NEXT:    shrb $7, %al
-; X86-NEXT:    retl
-;
-; X64-LABEL: negative_scalar_i8_bitsinmiddle_slt:
-; X64:       # %bb.0:
-; X64-NEXT:    movl %esi, %ecx
-; X64-NEXT:    movb $24, %al
-; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X64-NEXT:    shrb %cl, %al
-; X64-NEXT:    andb %dil, %al
-; X64-NEXT:    shrb $7, %al
-; X64-NEXT:    retq
+; CHECK-LABEL: negative_scalar_i8_bitsinmiddle_slt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    ret{{[l|q]}}
   %t0 = lshr i8 24, %y
   %t1 = and i8 %t0, %x
   %res = icmp slt i8 %t1, 0

diff  --git a/llvm/test/CodeGen/X86/vector-fshl-128.ll b/llvm/test/CodeGen/X86/vector-fshl-128.ll
index 12a5f2bc2cc6..5fb1c6fb0883 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-128.ll
@@ -643,20 +643,19 @@ define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %amt)
 ;
 ; AVX2-LABEL: var_funnnel_v8i16:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %xmm2, %xmm3, %xmm3
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT:    vpsrlvd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT:    vpackusdw %xmm3, %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
 ; AVX2-NEXT:    vpsllvd %ymm4, %ymm3, %ymm3
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
-; AVX2-NEXT:    vpsubw %xmm2, %xmm5, %xmm5
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT:    vpsrlvd %ymm5, %ymm1, %ymm1
-; AVX2-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
-; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
 ; AVX2-NEXT:    vpor %xmm1, %xmm3, %xmm1
 ; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX2-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2

diff  --git a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll
index ce521ad8896d..7ef1ed243a6b 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll
@@ -338,20 +338,19 @@ define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %amt) nounwind {
 ;
 ; AVX2-LABEL: var_funnnel_v8i16:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT:    vpsllvd %ymm2, %ymm0, %ymm2
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT:    vpshufb %ymm3, %ymm2, %ymm2
-; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
-; AVX2-NEXT:    vpsubw %xmm1, %xmm4, %xmm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %xmm1, %xmm2, %xmm2
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT:    vpsrlvd %ymm2, %ymm0, %ymm2
+; AVX2-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT:    vpackusdw %xmm3, %xmm2, %xmm2
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpsllvd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    vpor %xmm2, %xmm0, %xmm0
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-fshr-128.ll b/llvm/test/CodeGen/X86/vector-fshr-128.ll
index 00f5d73a4def..ee982383c463 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-128.ll
@@ -656,15 +656,14 @@ define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %amt)
 ; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
 ; AVX2-NEXT:    vpsrlvd %ymm4, %ymm3, %ymm3
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
-; AVX2-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm5 = [16,16,16,16,16,16,16,16]
-; AVX2-NEXT:    vpsubw %xmm2, %xmm5, %xmm5
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX2-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX2-NEXT:    vpackusdw %xmm4, %xmm3, %xmm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %xmm2, %xmm4, %xmm4
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT:    vpsllvd %ymm5, %ymm0, %ymm0
-; AVX2-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
+; AVX2-NEXT:    vpsllvd %ymm4, %ymm0, %ymm0
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX2-NEXT:    vpor %xmm3, %xmm0, %xmm0
 ; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3

diff  --git a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
index d88a2a214cae..0439459ac55c 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
@@ -361,19 +361,18 @@ define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %amt) nounwind {
 ; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:    vpsubw %xmm1, %xmm2, %xmm1
 ; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %xmm1, %xmm2, %xmm2
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT:    vpsllvd %ymm2, %ymm0, %ymm2
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT:    vpshufb %ymm3, %ymm2, %ymm2
-; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
-; AVX2-NEXT:    vpsubw %xmm1, %xmm4, %xmm1
+; AVX2-NEXT:    vpsrlvd %ymm2, %ymm0, %ymm2
+; AVX2-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT:    vpackusdw %xmm3, %xmm2, %xmm2
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpsllvd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    vpor %xmm2, %xmm0, %xmm0
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-rotate-128.ll b/llvm/test/CodeGen/X86/vector-rotate-128.ll
index 666325b35f78..35dbb109924f 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-128.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-128.ll
@@ -324,20 +324,19 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
 ;
 ; AVX2-LABEL: var_rotate_v8i16:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT:    vpsllvd %ymm2, %ymm0, %ymm2
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT:    vpshufb %ymm3, %ymm2, %ymm2
-; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
-; AVX2-NEXT:    vpsubw %xmm1, %xmm4, %xmm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
+; AVX2-NEXT:    vpsubw %xmm1, %xmm2, %xmm2
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT:    vpsrlvd %ymm2, %ymm0, %ymm2
+; AVX2-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT:    vpackusdw %xmm3, %xmm2, %xmm2
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpsllvd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    vpor %xmm2, %xmm0, %xmm0
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
index 21a011abab0b..a587a43f1bbb 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
@@ -279,9 +279,8 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll
index f351bfb21962..b540421fb3de 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll
@@ -213,9 +213,8 @@ define <4 x i16> @var_shift_v4i16(<4 x i16> %a, <4 x i16> %b) nounwind {
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -387,9 +386,8 @@ define <2 x i16> @var_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind {
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;


        


More information about the llvm-commits mailing list