[llvm] [WIP][DAG] visitFREEZE - always allow freezing multiple operands (PR #152107)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 5 06:14:52 PDT 2025


https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/152107

>From f6e048a674581eb4c8338732611d5ec093896762 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Tue, 5 Aug 2025 10:31:10 +0100
Subject: [PATCH] [WIP][DAG] visitFREEZE - always allow freezing multiple
 operands

Remove the limited freeze multiple operand handling, always freeze all operands and rely on later visitFREEZE calls to merge frozen/unfrozen versions of each node to prevent infinite loops.

Fixes #149798
---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |   81 +-
 llvm/test/CodeGen/AMDGPU/bf16-conversions.ll  |   16 +-
 llvm/test/CodeGen/AMDGPU/div_i128.ll          |   78 +-
 llvm/test/CodeGen/AMDGPU/freeze.ll            |   28 +-
 llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll    |    3 +-
 llvm/test/CodeGen/AMDGPU/rem_i128.ll          |   78 +-
 llvm/test/CodeGen/AMDGPU/sad.ll               |   18 +-
 llvm/test/CodeGen/AMDGPU/srem.ll              |  192 +-
 .../test/CodeGen/AMDGPU/vector-reduce-smax.ll |   12 +-
 .../test/CodeGen/AMDGPU/vector-reduce-smin.ll |   12 +-
 .../test/CodeGen/AMDGPU/vector-reduce-umax.ll |   12 +-
 .../test/CodeGen/AMDGPU/vector-reduce-umin.ll |   12 +-
 llvm/test/CodeGen/ARM/fpclamptosat_vec.ll     |   10 +-
 llvm/test/CodeGen/NVPTX/i1-select.ll          |   32 +-
 llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll   |  417 ++--
 llvm/test/CodeGen/PowerPC/recipest.ll         |   10 +-
 llvm/test/CodeGen/RISCV/bfloat-convert.ll     |   24 +-
 llvm/test/CodeGen/RISCV/half-convert.ll       |   40 +-
 .../RISCV/intrinsic-cttz-elts-vscale.ll       |   36 +-
 llvm/test/CodeGen/RISCV/rv64-half-convert.ll  |   24 +-
 .../RISCV/wide-scalar-shift-legalization.ll   | 1901 ++++++++---------
 llvm/test/CodeGen/SystemZ/pr60413.ll          |   36 +-
 llvm/test/CodeGen/X86/abds-neg.ll             |   92 +-
 llvm/test/CodeGen/X86/avg.ll                  |  156 +-
 .../test/CodeGen/X86/avx10_2_512bf16-arith.ll |    2 +-
 llvm/test/CodeGen/X86/avx10_2bf16-arith.ll    |    4 +-
 llvm/test/CodeGen/X86/freeze-binary.ll        |   16 +-
 llvm/test/CodeGen/X86/freeze-vector.ll        |   24 +-
 llvm/test/CodeGen/X86/gfni-shifts.ll          |    3 +-
 llvm/test/CodeGen/X86/pr120093.ll             |   10 +-
 llvm/test/CodeGen/X86/pr120906.ll             |   33 +-
 llvm/test/CodeGen/X86/pr62286.ll              |   34 +-
 .../test/CodeGen/X86/setcc-non-simple-type.ll |    4 +-
 llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll   |   40 +-
 llvm/test/CodeGen/X86/sqrt-fastmath.ll        |   20 +-
 llvm/test/CodeGen/X86/sshl_sat_vec.ll         |  136 +-
 llvm/test/CodeGen/X86/ushl_sat_vec.ll         |   49 +-
 llvm/test/CodeGen/X86/vector-compress.ll      |  200 +-
 llvm/test/CodeGen/X86/vector-fshl-256.ll      |   40 +-
 llvm/test/CodeGen/X86/vector-fshl-rot-256.ll  |   20 +-
 llvm/test/CodeGen/X86/vector-fshr-128.ll      |  117 +-
 llvm/test/CodeGen/X86/vector-fshr-256.ll      |   60 +-
 llvm/test/CodeGen/X86/vector-fshr-rot-256.ll  |   18 +-
 llvm/test/CodeGen/X86/vector-rotate-256.ll    |   20 +-
 llvm/test/CodeGen/X86/vector-shift-lut.ll     |  177 +-
 llvm/test/CodeGen/X86/vector-shift-shl-128.ll |   70 +-
 .../CodeGen/X86/vector-shift-shl-sub128.ll    |  210 +-
 llvm/test/CodeGen/X86/vector-trunc-packus.ll  |  170 +-
 llvm/test/CodeGen/X86/vshift-6.ll             |   54 +-
 ...ad-of-small-alloca-with-zero-upper-half.ll |  229 +-
 .../CodeGen/X86/widen-load-of-small-alloca.ll |  229 +-
 51 files changed, 2586 insertions(+), 2723 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index d70e96938ed9a..b205034d27cd9 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -16778,18 +16778,6 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) {
       N0->getNumValues() != 1 || !N0->hasOneUse())
     return SDValue();
 
-  // TOOD: we should always allow multiple operands, however this increases the
-  // likelihood of infinite loops due to the ReplaceAllUsesOfValueWith call
-  // below causing later nodes that share frozen operands to fold again and no
-  // longer being able to confirm other operands are not poison due to recursion
-  // depth limits on isGuaranteedNotToBeUndefOrPoison.
-  bool AllowMultipleMaybePoisonOperands =
-      N0.getOpcode() == ISD::SELECT_CC || N0.getOpcode() == ISD::SETCC ||
-      N0.getOpcode() == ISD::BUILD_VECTOR ||
-      N0.getOpcode() == ISD::BUILD_PAIR ||
-      N0.getOpcode() == ISD::VECTOR_SHUFFLE ||
-      N0.getOpcode() == ISD::CONCAT_VECTORS || N0.getOpcode() == ISD::FMUL;
-
   // Avoid turning a BUILD_VECTOR that can be recognized as "all zeros", "all
   // ones" or "constant" into something that depends on FrozenUndef. We can
   // instead pick undef values to keep those properties, while at the same time
@@ -16810,74 +16798,13 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) {
     }
   }
 
-  SmallSet<SDValue, 8> MaybePoisonOperands;
-  SmallVector<unsigned, 8> MaybePoisonOperandNumbers;
-  for (auto [OpNo, Op] : enumerate(N0->ops())) {
-    if (DAG.isGuaranteedNotToBeUndefOrPoison(Op, /*PoisonOnly=*/false))
-      continue;
-    bool HadMaybePoisonOperands = !MaybePoisonOperands.empty();
-    bool IsNewMaybePoisonOperand = MaybePoisonOperands.insert(Op).second;
-    if (IsNewMaybePoisonOperand)
-      MaybePoisonOperandNumbers.push_back(OpNo);
-    if (!HadMaybePoisonOperands)
-      continue;
-    if (IsNewMaybePoisonOperand && !AllowMultipleMaybePoisonOperands) {
-      // Multiple maybe-poison ops when not allowed - bail out.
-      return SDValue();
-    }
-  }
-  // NOTE: the whole op may be not guaranteed to not be undef or poison because
-  // it could create undef or poison due to it's poison-generating flags.
-  // So not finding any maybe-poison operands is fine.
-
-  for (unsigned OpNo : MaybePoisonOperandNumbers) {
-    // N0 can mutate during iteration, so make sure to refetch the maybe poison
-    // operands via the operand numbers. The typical scenario is that we have
-    // something like this
-    //   t262: i32 = freeze t181
-    //   t150: i32 = ctlz_zero_undef t262
-    //   t184: i32 = ctlz_zero_undef t181
-    //   t268: i32 = select_cc t181, Constant:i32<0>, t184, t186, setne:ch
-    // When freezing the t181 operand we get t262 back, and then the
-    // ReplaceAllUsesOfValueWith call will not only replace t181 by t262, but
-    // also recursively replace t184 by t150.
-    SDValue MaybePoisonOperand = N->getOperand(0).getOperand(OpNo);
-    // Don't replace every single UNDEF everywhere with frozen UNDEF, though.
-    if (MaybePoisonOperand.isUndef())
-      continue;
-    // First, freeze each offending operand.
-    SDValue FrozenMaybePoisonOperand = DAG.getFreeze(MaybePoisonOperand);
-    // Then, change all other uses of unfrozen operand to use frozen operand.
-    DAG.ReplaceAllUsesOfValueWith(MaybePoisonOperand, FrozenMaybePoisonOperand);
-    if (FrozenMaybePoisonOperand.getOpcode() == ISD::FREEZE &&
-        FrozenMaybePoisonOperand.getOperand(0) == FrozenMaybePoisonOperand) {
-      // But, that also updated the use in the freeze we just created, thus
-      // creating a cycle in a DAG. Let's undo that by mutating the freeze.
-      DAG.UpdateNodeOperands(FrozenMaybePoisonOperand.getNode(),
-                             MaybePoisonOperand);
-    }
-
-    // This node has been merged with another.
-    if (N->getOpcode() == ISD::DELETED_NODE)
-      return SDValue(N, 0);
-  }
-
-  assert(N->getOpcode() != ISD::DELETED_NODE && "Node was deleted!");
-
-  // The whole node may have been updated, so the value we were holding
-  // may no longer be valid. Re-fetch the operand we're `freeze`ing.
-  N0 = N->getOperand(0);
+  // Collect and freeze all operands.
+  SmallVector<SDValue> Ops(N0->ops());
+  for (auto &Op : Ops)
+    Op = DAG.getFreeze(Op);
 
   // Finally, recreate the node, it's operands were updated to use
   // frozen operands, so we just need to use it's "original" operands.
-  SmallVector<SDValue> Ops(N0->ops());
-  // TODO: ISD::UNDEF and ISD::POISON should get separate handling, but best
-  // leave for a future patch.
-  for (SDValue &Op : Ops) {
-    if (Op.isUndef())
-      Op = DAG.getFreeze(Op);
-  }
-
   SDLoc DL(N0);
 
   // Special case handling for ShuffleVectorSDNode nodes.
diff --git a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
index 752a87ac3cb73..749ca6c46ac06 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
@@ -188,7 +188,7 @@ define amdgpu_ps float @v_test_cvt_v2f64_v2bf16_v(<2 x double> %src) {
 ; GFX-950-NEXT:    v_cmp_eq_u32_e64 s[0:1], 1, v7
 ; GFX-950-NEXT:    v_cndmask_b32_e64 v2, -1, 1, s[2:3]
 ; GFX-950-NEXT:    v_add_u32_e32 v2, v6, v2
-; GFX-950-NEXT:    s_or_b64 vcc, s[0:1], vcc
+; GFX-950-NEXT:    s_or_b64 vcc, vcc, s[0:1]
 ; GFX-950-NEXT:    v_cvt_f32_f64_e32 v5, v[0:1]
 ; GFX-950-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
 ; GFX-950-NEXT:    v_cvt_f64_f32_e32 v[2:3], v5
@@ -225,7 +225,7 @@ define amdgpu_ps float @v_test_cvt_v2f64_v2bf16_v(<2 x double> %src) {
 ; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX1250-NEXT:    v_add_nc_u32_e32 v0, v9, v0
 ; GFX1250-NEXT:    v_cmp_eq_u32_e64 s2, 1, v11
-; GFX1250-NEXT:    s_or_b32 vcc_lo, s1, vcc_lo
+; GFX1250-NEXT:    s_or_b32 vcc_lo, vcc_lo, s1
 ; GFX1250-NEXT:    v_cndmask_b32_e32 v1, v1, v8, vcc_lo
 ; GFX1250-NEXT:    s_or_b32 vcc_lo, s2, s0
 ; GFX1250-NEXT:    v_cndmask_b32_e32 v0, v0, v9, vcc_lo
@@ -344,7 +344,7 @@ define amdgpu_ps void @fptrunc_f32_to_bf16(float %a, ptr %out) {
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
 ; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
-; GFX1250-NEXT:    flat_store_b16 v[2:3], v0
+; GFX1250-NEXT:    flat_store_b16 v[2:3], v0 scope:SCOPE_SE
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %a.cvt = fptrunc float %a to bfloat
@@ -380,7 +380,7 @@ define amdgpu_ps void @fptrunc_f32_to_bf16_abs(float %a, ptr %out) {
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
 ; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, |v0|, s0
-; GFX1250-NEXT:    flat_store_b16 v[2:3], v0
+; GFX1250-NEXT:    flat_store_b16 v[2:3], v0 scope:SCOPE_SE
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %a.abs = call float @llvm.fabs.f32(float %a)
@@ -417,7 +417,7 @@ define amdgpu_ps void @fptrunc_f32_to_bf16_neg(float %a, ptr %out) {
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
 ; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, -v0, s0
-; GFX1250-NEXT:    flat_store_b16 v[2:3], v0
+; GFX1250-NEXT:    flat_store_b16 v[2:3], v0 scope:SCOPE_SE
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %a.neg = fneg float %a
@@ -480,7 +480,7 @@ define amdgpu_ps void @fptrunc_f64_to_bf16(double %a, ptr %out) {
 ; GFX1250-NEXT:    s_or_b32 vcc_lo, vcc_lo, s0
 ; GFX1250-NEXT:    v_cndmask_b32_e32 v0, v0, v6, vcc_lo
 ; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
-; GFX1250-NEXT:    flat_store_b16 v[2:3], v0
+; GFX1250-NEXT:    flat_store_b16 v[2:3], v0 scope:SCOPE_SE
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %a.cvt = fptrunc double %a to bfloat
@@ -543,7 +543,7 @@ define amdgpu_ps void @fptrunc_f64_to_bf16_neg(double %a, ptr %out) {
 ; GFX1250-NEXT:    v_cndmask_b32_e32 v0, v0, v6, vcc_lo
 ; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
-; GFX1250-NEXT:    flat_store_b16 v[2:3], v0
+; GFX1250-NEXT:    flat_store_b16 v[2:3], v0 scope:SCOPE_SE
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %a.neg = fneg double %a
@@ -607,7 +607,7 @@ define amdgpu_ps void @fptrunc_f64_to_bf16_abs(double %a, ptr %out) {
 ; GFX1250-NEXT:    v_cndmask_b32_e32 v0, v0, v6, vcc_lo
 ; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
-; GFX1250-NEXT:    flat_store_b16 v[2:3], v0
+; GFX1250-NEXT:    flat_store_b16 v[2:3], v0 scope:SCOPE_SE
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %a.abs = call double @llvm.fabs.f64(double %a)
diff --git a/llvm/test/CodeGen/AMDGPU/div_i128.ll b/llvm/test/CodeGen/AMDGPU/div_i128.ll
index 747affa928601..67519caa9a180 100644
--- a/llvm/test/CodeGen/AMDGPU/div_i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/div_i128.ll
@@ -475,21 +475,28 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v5, v8
-; GFX9-O0-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    s_nop 0
-; GFX9-O0-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v8, v6
-; GFX9-O0-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    v_mov_b32_e32 v10, v8
+; GFX9-O0-NEXT:    v_mov_b32_e32 v9, v7
+; GFX9-O0-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_nop 0
+; GFX9-O0-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    v_mov_b32_e32 v10, v5
+; GFX9-O0-NEXT:    v_mov_b32_e32 v9, v4
+; GFX9-O0-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    s_nop 0
-; GFX9-O0-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    v_cmp_eq_u64_e64 s[8:9], v[7:8], s[6:7]
+; GFX9-O0-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_mov_b64 s[8:9], s[6:7]
+; GFX9-O0-NEXT:    v_cmp_eq_u64_e64 s[8:9], v[7:8], s[8:9]
 ; GFX9-O0-NEXT:    s_mov_b64 s[12:13], 0x7f
-; GFX9-O0-NEXT:    v_cmp_gt_u64_e64 s[14:15], v[4:5], s[12:13]
+; GFX9-O0-NEXT:    s_mov_b64 s[14:15], s[12:13]
+; GFX9-O0-NEXT:    v_cmp_gt_u64_e64 s[14:15], v[4:5], s[14:15]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v9, 0, 1, s[14:15]
-; GFX9-O0-NEXT:    v_cmp_ne_u64_e64 s[14:15], v[7:8], s[6:7]
+; GFX9-O0-NEXT:    s_mov_b64 s[14:15], s[6:7]
+; GFX9-O0-NEXT:    v_cmp_ne_u64_e64 s[14:15], v[7:8], s[14:15]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s[14:15]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v6, v6, v9, s[8:9]
 ; GFX9-O0-NEXT:    v_and_b32_e64 v6, 1, v6
@@ -501,6 +508,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v6, v5
 ; GFX9-O0-NEXT:    s_mov_b32 s14, s13
 ; GFX9-O0-NEXT:    v_xor_b32_e64 v6, v6, s14
+; GFX9-O0-NEXT:    ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
 ; GFX9-O0-NEXT:    ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
 ; GFX9-O0-NEXT:    v_xor_b32_e64 v4, v4, s12
 ; GFX9-O0-NEXT:    ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
@@ -1039,10 +1047,10 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    s_mov_b64 s[6:7], 1
 ; GFX9-O0-NEXT:    s_mov_b32 s5, s6
 ; GFX9-O0-NEXT:    s_waitcnt vmcnt(1)
@@ -2660,31 +2668,40 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v5, v8
-; GFX9-O0-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    s_nop 0
-; GFX9-O0-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v8, v6
-; GFX9-O0-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    v_mov_b32_e32 v10, v8
+; GFX9-O0-NEXT:    v_mov_b32_e32 v9, v7
+; GFX9-O0-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    s_nop 0
-; GFX9-O0-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    v_cmp_eq_u64_e64 s[8:9], v[7:8], s[6:7]
+; GFX9-O0-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    v_mov_b32_e32 v10, v5
+; GFX9-O0-NEXT:    v_mov_b32_e32 v9, v4
+; GFX9-O0-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_nop 0
+; GFX9-O0-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_mov_b64 s[8:9], s[6:7]
+; GFX9-O0-NEXT:    v_cmp_eq_u64_e64 s[8:9], v[7:8], s[8:9]
 ; GFX9-O0-NEXT:    s_mov_b64 s[12:13], 0x7f
-; GFX9-O0-NEXT:    v_cmp_gt_u64_e64 s[14:15], v[4:5], s[12:13]
+; GFX9-O0-NEXT:    s_mov_b64 s[14:15], s[12:13]
+; GFX9-O0-NEXT:    v_cmp_gt_u64_e64 s[14:15], v[4:5], s[14:15]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v9, 0, 1, s[14:15]
-; GFX9-O0-NEXT:    v_cmp_ne_u64_e64 s[14:15], v[7:8], s[6:7]
+; GFX9-O0-NEXT:    s_mov_b64 s[14:15], s[6:7]
+; GFX9-O0-NEXT:    v_cmp_ne_u64_e64 s[14:15], v[7:8], s[14:15]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s[14:15]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v6, v6, v9, s[8:9]
 ; GFX9-O0-NEXT:    v_and_b32_e64 v6, 1, v6
 ; GFX9-O0-NEXT:    v_cmp_eq_u32_e64 s[8:9], v6, 1
 ; GFX9-O0-NEXT:    s_or_b64 s[8:9], s[4:5], s[8:9]
-; GFX9-O0-NEXT:    s_mov_b64 s[4:5], -1
-; GFX9-O0-NEXT:    s_xor_b64 s[4:5], s[8:9], s[4:5]
+; GFX9-O0-NEXT:    s_mov_b64 s[14:15], -1
+; GFX9-O0-NEXT:    s_mov_b64 s[4:5], s[8:9]
+; GFX9-O0-NEXT:    s_xor_b64 s[4:5], s[4:5], s[14:15]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v6, v5
 ; GFX9-O0-NEXT:    s_mov_b32 s14, s13
 ; GFX9-O0-NEXT:    v_xor_b32_e64 v6, v6, s14
+; GFX9-O0-NEXT:    ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
 ; GFX9-O0-NEXT:    ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
 ; GFX9-O0-NEXT:    v_xor_b32_e64 v4, v4, s12
 ; GFX9-O0-NEXT:    ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
@@ -2698,16 +2715,19 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v5, v6
 ; GFX9-O0-NEXT:    v_cmp_ne_u64_e64 s[6:7], v[4:5], s[6:7]
+; GFX9-O0-NEXT:    s_mov_b64 s[12:13], s[8:9]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v4, s11
-; GFX9-O0-NEXT:    v_cndmask_b32_e64 v4, v1, v4, s[8:9]
+; GFX9-O0-NEXT:    v_cndmask_b32_e64 v4, v1, v4, s[12:13]
+; GFX9-O0-NEXT:    s_mov_b64 s[12:13], s[8:9]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v1, s10
-; GFX9-O0-NEXT:    v_cndmask_b32_e64 v0, v0, v1, s[8:9]
+; GFX9-O0-NEXT:    v_cndmask_b32_e64 v0, v0, v1, s[12:13]
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr12
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr12
 ; GFX9-O0-NEXT:    ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v4
+; GFX9-O0-NEXT:    s_mov_b64 s[12:13], s[8:9]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v4, s11
-; GFX9-O0-NEXT:    v_cndmask_b32_e64 v4, v3, v4, s[8:9]
+; GFX9-O0-NEXT:    v_cndmask_b32_e64 v4, v3, v4, s[12:13]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v3, s10
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[8:9]
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
@@ -3220,10 +3240,10 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    s_mov_b64 s[6:7], 1
 ; GFX9-O0-NEXT:    s_mov_b32 s5, s6
 ; GFX9-O0-NEXT:    s_waitcnt vmcnt(1)
diff --git a/llvm/test/CodeGen/AMDGPU/freeze.ll b/llvm/test/CodeGen/AMDGPU/freeze.ll
index 308e86bbaf8fd..b478c920b4a0f 100644
--- a/llvm/test/CodeGen/AMDGPU/freeze.ll
+++ b/llvm/test/CodeGen/AMDGPU/freeze.ll
@@ -13340,7 +13340,10 @@ define void @freeze_v2i1(ptr addrspace(1) %ptra, ptr addrspace(1) %ptrb) {
 ; GFX6-SDAG-NEXT:    s_mov_b32 s5, s6
 ; GFX6-SDAG-NEXT:    buffer_load_ubyte v0, v[0:1], s[4:7], 0 addr64
 ; GFX6-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; GFX6-SDAG-NEXT:    v_and_b32_e32 v0, 3, v0
+; GFX6-SDAG-NEXT:    v_bfe_u32 v1, v0, 1, 1
+; GFX6-SDAG-NEXT:    v_and_b32_e32 v0, 1, v0
+; GFX6-SDAG-NEXT:    v_lshlrev_b32_e32 v1, 1, v1
+; GFX6-SDAG-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX6-SDAG-NEXT:    buffer_store_byte v0, v[2:3], s[4:7], 0 addr64
 ; GFX6-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
 ; GFX6-SDAG-NEXT:    s_setpc_b64 s[30:31]
@@ -13372,7 +13375,10 @@ define void @freeze_v2i1(ptr addrspace(1) %ptra, ptr addrspace(1) %ptrb) {
 ; GFX7-SDAG-NEXT:    s_mov_b32 s5, s6
 ; GFX7-SDAG-NEXT:    buffer_load_ubyte v0, v[0:1], s[4:7], 0 addr64
 ; GFX7-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; GFX7-SDAG-NEXT:    v_and_b32_e32 v0, 3, v0
+; GFX7-SDAG-NEXT:    v_bfe_u32 v1, v0, 1, 1
+; GFX7-SDAG-NEXT:    v_and_b32_e32 v0, 1, v0
+; GFX7-SDAG-NEXT:    v_lshlrev_b32_e32 v1, 1, v1
+; GFX7-SDAG-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX7-SDAG-NEXT:    buffer_store_byte v0, v[2:3], s[4:7], 0 addr64
 ; GFX7-SDAG-NEXT:    s_waitcnt vmcnt(0)
 ; GFX7-SDAG-NEXT:    s_setpc_b64 s[30:31]
@@ -13430,7 +13436,11 @@ define void @freeze_v2i1(ptr addrspace(1) %ptra, ptr addrspace(1) %ptrb) {
 ; GFX10-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-SDAG-NEXT:    global_load_ubyte v0, v[0:1], off
 ; GFX10-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-SDAG-NEXT:    v_and_b32_e32 v0, 3, v0
+; GFX10-SDAG-NEXT:    v_lshrrev_b16 v1, 1, v0
+; GFX10-SDAG-NEXT:    v_and_b32_e32 v0, 1, v0
+; GFX10-SDAG-NEXT:    v_and_b32_e32 v1, 1, v1
+; GFX10-SDAG-NEXT:    v_lshlrev_b16 v1, 1, v1
+; GFX10-SDAG-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX10-SDAG-NEXT:    global_store_byte v[2:3], v0, off
 ; GFX10-SDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -13453,7 +13463,11 @@ define void @freeze_v2i1(ptr addrspace(1) %ptra, ptr addrspace(1) %ptrb) {
 ; GFX11-SDAG-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-SDAG-TRUE16-NEXT:    global_load_d16_u8 v0, v[0:1], off
 ; GFX11-SDAG-TRUE16-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT:    v_and_b16 v0.l, v0.l, 3
+; GFX11-SDAG-TRUE16-NEXT:    v_lshrrev_b16 v0.h, 1, v0.l
+; GFX11-SDAG-TRUE16-NEXT:    v_and_b16 v0.l, v0.l, 1
+; GFX11-SDAG-TRUE16-NEXT:    v_and_b16 v0.h, v0.h, 1
+; GFX11-SDAG-TRUE16-NEXT:    v_lshlrev_b16 v0.h, 1, v0.h
+; GFX11-SDAG-TRUE16-NEXT:    v_or_b16 v0.l, v0.l, v0.h
 ; GFX11-SDAG-TRUE16-NEXT:    global_store_b8 v[2:3], v0, off
 ; GFX11-SDAG-TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -13462,7 +13476,11 @@ define void @freeze_v2i1(ptr addrspace(1) %ptra, ptr addrspace(1) %ptrb) {
 ; GFX11-SDAG-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-SDAG-FAKE16-NEXT:    global_load_u8 v0, v[0:1], off
 ; GFX11-SDAG-FAKE16-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT:    v_and_b32_e32 v0, 3, v0
+; GFX11-SDAG-FAKE16-NEXT:    v_lshrrev_b16 v1, 1, v0
+; GFX11-SDAG-FAKE16-NEXT:    v_and_b32_e32 v0, 1, v0
+; GFX11-SDAG-FAKE16-NEXT:    v_and_b32_e32 v1, 1, v1
+; GFX11-SDAG-FAKE16-NEXT:    v_lshlrev_b16 v1, 1, v1
+; GFX11-SDAG-FAKE16-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX11-SDAG-FAKE16-NEXT:    global_store_b8 v[2:3], v0, off
 ; GFX11-SDAG-FAKE16-NEXT:    s_setpc_b64 s[30:31]
 ;
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
index af914bd4043cf..355f77acfd302 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
@@ -76,12 +76,13 @@ define amdgpu_kernel void @v_round_f64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
 ; SI-NEXT:    buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_movk_i32 s4, 0xfc01
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_mov_b32 s3, 0xfffff
 ; SI-NEXT:    v_mov_b32_e32 v8, 0x3ff00000
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_bfe_u32 v4, v3, 20, 11
-; SI-NEXT:    v_add_i32_e32 v6, vcc, 0xfffffc01, v4
+; SI-NEXT:    v_add_i32_e32 v6, vcc, s4, v4
 ; SI-NEXT:    v_lshr_b64 v[4:5], s[2:3], v6
 ; SI-NEXT:    v_and_b32_e32 v7, 0x80000000, v3
 ; SI-NEXT:    v_not_b32_e32 v5, v5
diff --git a/llvm/test/CodeGen/AMDGPU/rem_i128.ll b/llvm/test/CodeGen/AMDGPU/rem_i128.ll
index f0c8fed925673..16375ea88807e 100644
--- a/llvm/test/CodeGen/AMDGPU/rem_i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/rem_i128.ll
@@ -513,21 +513,28 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v5, v8
-; GFX9-O0-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    s_nop 0
-; GFX9-O0-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v8, v6
-; GFX9-O0-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    v_mov_b32_e32 v10, v8
+; GFX9-O0-NEXT:    v_mov_b32_e32 v9, v7
+; GFX9-O0-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_nop 0
+; GFX9-O0-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    v_mov_b32_e32 v10, v5
+; GFX9-O0-NEXT:    v_mov_b32_e32 v9, v4
+; GFX9-O0-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    s_nop 0
-; GFX9-O0-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    v_cmp_eq_u64_e64 s[8:9], v[7:8], s[6:7]
+; GFX9-O0-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_mov_b64 s[8:9], s[6:7]
+; GFX9-O0-NEXT:    v_cmp_eq_u64_e64 s[8:9], v[7:8], s[8:9]
 ; GFX9-O0-NEXT:    s_mov_b64 s[12:13], 0x7f
-; GFX9-O0-NEXT:    v_cmp_gt_u64_e64 s[14:15], v[4:5], s[12:13]
+; GFX9-O0-NEXT:    s_mov_b64 s[14:15], s[12:13]
+; GFX9-O0-NEXT:    v_cmp_gt_u64_e64 s[14:15], v[4:5], s[14:15]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v9, 0, 1, s[14:15]
-; GFX9-O0-NEXT:    v_cmp_ne_u64_e64 s[14:15], v[7:8], s[6:7]
+; GFX9-O0-NEXT:    s_mov_b64 s[14:15], s[6:7]
+; GFX9-O0-NEXT:    v_cmp_ne_u64_e64 s[14:15], v[7:8], s[14:15]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s[14:15]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v6, v6, v9, s[8:9]
 ; GFX9-O0-NEXT:    v_and_b32_e64 v6, 1, v6
@@ -539,6 +546,7 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v6, v5
 ; GFX9-O0-NEXT:    s_mov_b32 s14, s13
 ; GFX9-O0-NEXT:    v_xor_b32_e64 v6, v6, s14
+; GFX9-O0-NEXT:    ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
 ; GFX9-O0-NEXT:    ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
 ; GFX9-O0-NEXT:    v_xor_b32_e64 v4, v4, s12
 ; GFX9-O0-NEXT:    ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
@@ -1077,10 +1085,10 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    s_mov_b64 s[6:7], 1
 ; GFX9-O0-NEXT:    s_mov_b32 s5, s6
 ; GFX9-O0-NEXT:    s_waitcnt vmcnt(1)
@@ -1893,31 +1901,40 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v5, v8
-; GFX9-O0-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    s_nop 0
-; GFX9-O0-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; kill: def $vgpr7 killed $vgpr7 def $vgpr7_vgpr8 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v8, v6
-; GFX9-O0-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    v_mov_b32_e32 v10, v8
+; GFX9-O0-NEXT:    v_mov_b32_e32 v9, v7
+; GFX9-O0-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    s_nop 0
-; GFX9-O0-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    v_cmp_eq_u64_e64 s[8:9], v[7:8], s[6:7]
+; GFX9-O0-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    v_mov_b32_e32 v10, v5
+; GFX9-O0-NEXT:    v_mov_b32_e32 v9, v4
+; GFX9-O0-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_nop 0
+; GFX9-O0-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_mov_b64 s[8:9], s[6:7]
+; GFX9-O0-NEXT:    v_cmp_eq_u64_e64 s[8:9], v[7:8], s[8:9]
 ; GFX9-O0-NEXT:    s_mov_b64 s[12:13], 0x7f
-; GFX9-O0-NEXT:    v_cmp_gt_u64_e64 s[14:15], v[4:5], s[12:13]
+; GFX9-O0-NEXT:    s_mov_b64 s[14:15], s[12:13]
+; GFX9-O0-NEXT:    v_cmp_gt_u64_e64 s[14:15], v[4:5], s[14:15]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v9, 0, 1, s[14:15]
-; GFX9-O0-NEXT:    v_cmp_ne_u64_e64 s[14:15], v[7:8], s[6:7]
+; GFX9-O0-NEXT:    s_mov_b64 s[14:15], s[6:7]
+; GFX9-O0-NEXT:    v_cmp_ne_u64_e64 s[14:15], v[7:8], s[14:15]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v6, 0, 1, s[14:15]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v6, v6, v9, s[8:9]
 ; GFX9-O0-NEXT:    v_and_b32_e64 v6, 1, v6
 ; GFX9-O0-NEXT:    v_cmp_eq_u32_e64 s[8:9], v6, 1
 ; GFX9-O0-NEXT:    s_or_b64 s[8:9], s[4:5], s[8:9]
-; GFX9-O0-NEXT:    s_mov_b64 s[4:5], -1
-; GFX9-O0-NEXT:    s_xor_b64 s[4:5], s[8:9], s[4:5]
+; GFX9-O0-NEXT:    s_mov_b64 s[14:15], -1
+; GFX9-O0-NEXT:    s_mov_b64 s[4:5], s[8:9]
+; GFX9-O0-NEXT:    s_xor_b64 s[4:5], s[4:5], s[14:15]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v6, v5
 ; GFX9-O0-NEXT:    s_mov_b32 s14, s13
 ; GFX9-O0-NEXT:    v_xor_b32_e64 v6, v6, s14
+; GFX9-O0-NEXT:    ; kill: def $vgpr4 killed $vgpr4 killed $vgpr4_vgpr5 killed $exec
 ; GFX9-O0-NEXT:    ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
 ; GFX9-O0-NEXT:    v_xor_b32_e64 v4, v4, s12
 ; GFX9-O0-NEXT:    ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
@@ -1931,16 +1948,19 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v5, v6
 ; GFX9-O0-NEXT:    v_cmp_ne_u64_e64 s[6:7], v[4:5], s[6:7]
+; GFX9-O0-NEXT:    s_mov_b64 s[12:13], s[8:9]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v4, s11
-; GFX9-O0-NEXT:    v_cndmask_b32_e64 v4, v1, v4, s[8:9]
+; GFX9-O0-NEXT:    v_cndmask_b32_e64 v4, v1, v4, s[12:13]
+; GFX9-O0-NEXT:    s_mov_b64 s[12:13], s[8:9]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v1, s10
-; GFX9-O0-NEXT:    v_cndmask_b32_e64 v0, v0, v1, s[8:9]
+; GFX9-O0-NEXT:    v_cndmask_b32_e64 v0, v0, v1, s[12:13]
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr12
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr12
 ; GFX9-O0-NEXT:    ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v4
+; GFX9-O0-NEXT:    s_mov_b64 s[12:13], s[8:9]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v4, s11
-; GFX9-O0-NEXT:    v_cndmask_b32_e64 v4, v3, v4, s[8:9]
+; GFX9-O0-NEXT:    v_cndmask_b32_e64 v4, v3, v4, s[12:13]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v3, s10
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[8:9]
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
@@ -2453,10 +2473,10 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    s_mov_b64 s[6:7], 1
 ; GFX9-O0-NEXT:    s_mov_b32 s5, s6
 ; GFX9-O0-NEXT:    s_waitcnt vmcnt(1)
diff --git a/llvm/test/CodeGen/AMDGPU/sad.ll b/llvm/test/CodeGen/AMDGPU/sad.ll
index 0b58b328bbfb6..b146606f86cc0 100644
--- a/llvm/test/CodeGen/AMDGPU/sad.ll
+++ b/llvm/test/CodeGen/AMDGPU/sad.ll
@@ -459,12 +459,13 @@ define amdgpu_kernel void @v_sad_u32_i8_pat1(ptr addrspace(1) %out, i8 %a, i8 %b
 ; GCN-NEXT:    s_mov_b32 flat_scratch_lo, s13
 ; GCN-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_lshr_b32 s4, s2, 8
 ; GCN-NEXT:    s_and_b32 s3, s2, 0xff
-; GCN-NEXT:    s_bfe_u32 s4, s2, 0x80008
 ; GCN-NEXT:    s_lshr_b32 s2, s2, 16
-; GCN-NEXT:    v_mov_b32_e32 v0, s4
-; GCN-NEXT:    v_mov_b32_e32 v1, s2
-; GCN-NEXT:    v_sad_u32 v2, s3, v0, v1
+; GCN-NEXT:    s_and_b32 s4, s4, 0xff
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    v_mov_b32_e32 v1, s4
+; GCN-NEXT:    v_sad_u32 v2, s3, v1, v0
 ; GCN-NEXT:    v_mov_b32_e32 v0, s0
 ; GCN-NEXT:    v_mov_b32_e32 v1, s1
 ; GCN-NEXT:    flat_store_byte v[0:1], v2
@@ -524,12 +525,13 @@ define amdgpu_kernel void @s_sad_u32_i8_pat2(ptr addrspace(1) %out, i8 zeroext %
 ; GCN-NEXT:    s_mov_b32 flat_scratch_lo, s13
 ; GCN-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_lshr_b32 s4, s2, 8
 ; GCN-NEXT:    s_and_b32 s3, s2, 0xff
-; GCN-NEXT:    s_bfe_u32 s4, s2, 0x80008
 ; GCN-NEXT:    s_lshr_b32 s2, s2, 16
-; GCN-NEXT:    v_mov_b32_e32 v0, s4
-; GCN-NEXT:    v_mov_b32_e32 v1, s2
-; GCN-NEXT:    v_sad_u32 v2, s3, v0, v1
+; GCN-NEXT:    s_and_b32 s4, s4, 0xff
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    v_mov_b32_e32 v1, s4
+; GCN-NEXT:    v_sad_u32 v2, s3, v1, v0
 ; GCN-NEXT:    v_mov_b32_e32 v0, s0
 ; GCN-NEXT:    v_mov_b32_e32 v1, s1
 ; GCN-NEXT:    flat_store_byte v[0:1], v2
diff --git a/llvm/test/CodeGen/AMDGPU/srem.ll b/llvm/test/CodeGen/AMDGPU/srem.ll
index f614f58d8e1dc..01fbfd20ad213 100644
--- a/llvm/test/CodeGen/AMDGPU/srem.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem.ll
@@ -853,77 +853,77 @@ define amdgpu_kernel void @srem_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s2
 ; GCN-NEXT:    s_sub_i32 s6, 0, s2
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_readfirstlane_b32 s4, v5
-; GCN-NEXT:    s_ashr_i32 s5, s4, 31
+; GCN-NEXT:    v_readfirstlane_b32 s3, v5
+; GCN-NEXT:    s_ashr_i32 s5, s3, 31
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v1
-; GCN-NEXT:    s_abs_i32 s4, s4
-; GCN-NEXT:    v_readfirstlane_b32 s3, v2
+; GCN-NEXT:    s_abs_i32 s3, s3
+; GCN-NEXT:    v_readfirstlane_b32 s4, v2
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_readfirstlane_b32 s7, v1
 ; GCN-NEXT:    s_mul_i32 s6, s6, s7
 ; GCN-NEXT:    s_mul_hi_u32 s6, s7, s6
 ; GCN-NEXT:    s_add_i32 s7, s7, s6
-; GCN-NEXT:    s_mul_hi_u32 s6, s4, s7
+; GCN-NEXT:    s_mul_hi_u32 s6, s3, s7
 ; GCN-NEXT:    s_mul_i32 s6, s6, s2
-; GCN-NEXT:    s_sub_i32 s4, s4, s6
-; GCN-NEXT:    s_sub_i32 s6, s4, s2
-; GCN-NEXT:    s_cmp_ge_u32 s4, s2
-; GCN-NEXT:    s_cselect_b32 s4, s6, s4
-; GCN-NEXT:    s_sub_i32 s6, s4, s2
-; GCN-NEXT:    s_cmp_ge_u32 s4, s2
-; GCN-NEXT:    s_cselect_b32 s2, s6, s4
-; GCN-NEXT:    s_abs_i32 s3, s3
+; GCN-NEXT:    s_sub_i32 s3, s3, s6
+; GCN-NEXT:    s_sub_i32 s6, s3, s2
+; GCN-NEXT:    s_cmp_ge_u32 s3, s2
+; GCN-NEXT:    s_cselect_b32 s3, s6, s3
+; GCN-NEXT:    s_sub_i32 s6, s3, s2
+; GCN-NEXT:    s_cmp_ge_u32 s3, s2
+; GCN-NEXT:    s_cselect_b32 s2, s6, s3
+; GCN-NEXT:    s_abs_i32 s3, s4
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s3
 ; GCN-NEXT:    s_xor_b32 s2, s2, s5
 ; GCN-NEXT:    s_sub_i32 s8, 0, s3
 ; GCN-NEXT:    s_sub_i32 s2, s2, s5
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v1
-; GCN-NEXT:    v_readfirstlane_b32 s6, v6
-; GCN-NEXT:    s_ashr_i32 s7, s6, 31
-; GCN-NEXT:    s_abs_i32 s6, s6
+; GCN-NEXT:    v_readfirstlane_b32 s4, v6
+; GCN-NEXT:    s_ashr_i32 s7, s4, 31
+; GCN-NEXT:    s_abs_i32 s4, s4
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_readfirstlane_b32 s4, v3
+; GCN-NEXT:    v_readfirstlane_b32 s6, v3
 ; GCN-NEXT:    v_readfirstlane_b32 s5, v1
 ; GCN-NEXT:    s_mul_i32 s8, s8, s5
 ; GCN-NEXT:    s_mul_hi_u32 s8, s5, s8
 ; GCN-NEXT:    s_add_i32 s5, s5, s8
-; GCN-NEXT:    s_mul_hi_u32 s5, s6, s5
+; GCN-NEXT:    s_mul_hi_u32 s5, s4, s5
 ; GCN-NEXT:    s_mul_i32 s5, s5, s3
-; GCN-NEXT:    s_sub_i32 s5, s6, s5
-; GCN-NEXT:    s_sub_i32 s6, s5, s3
-; GCN-NEXT:    s_cmp_ge_u32 s5, s3
-; GCN-NEXT:    s_cselect_b32 s5, s6, s5
-; GCN-NEXT:    s_sub_i32 s6, s5, s3
-; GCN-NEXT:    s_cmp_ge_u32 s5, s3
-; GCN-NEXT:    s_cselect_b32 s3, s6, s5
-; GCN-NEXT:    s_abs_i32 s4, s4
+; GCN-NEXT:    s_sub_i32 s4, s4, s5
+; GCN-NEXT:    s_sub_i32 s5, s4, s3
+; GCN-NEXT:    s_cmp_ge_u32 s4, s3
+; GCN-NEXT:    s_cselect_b32 s4, s5, s4
+; GCN-NEXT:    s_sub_i32 s5, s4, s3
+; GCN-NEXT:    s_cmp_ge_u32 s4, s3
+; GCN-NEXT:    s_cselect_b32 s3, s5, s4
+; GCN-NEXT:    s_abs_i32 s4, s6
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s4
 ; GCN-NEXT:    s_xor_b32 s3, s3, s7
 ; GCN-NEXT:    s_sub_i32 s9, 0, s4
 ; GCN-NEXT:    s_sub_i32 s3, s3, s7
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v1
-; GCN-NEXT:    v_readfirstlane_b32 s6, v7
-; GCN-NEXT:    s_ashr_i32 s8, s6, 31
-; GCN-NEXT:    s_abs_i32 s6, s6
+; GCN-NEXT:    v_readfirstlane_b32 s5, v7
+; GCN-NEXT:    s_ashr_i32 s8, s5, 31
+; GCN-NEXT:    s_abs_i32 s5, s5
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_readfirstlane_b32 s5, v4
+; GCN-NEXT:    v_readfirstlane_b32 s6, v4
 ; GCN-NEXT:    v_readfirstlane_b32 s7, v1
 ; GCN-NEXT:    s_mul_i32 s9, s9, s7
 ; GCN-NEXT:    s_mul_hi_u32 s9, s7, s9
 ; GCN-NEXT:    s_add_i32 s7, s7, s9
-; GCN-NEXT:    s_mul_hi_u32 s7, s6, s7
+; GCN-NEXT:    s_mul_hi_u32 s7, s5, s7
 ; GCN-NEXT:    s_mul_i32 s7, s7, s4
-; GCN-NEXT:    s_sub_i32 s6, s6, s7
-; GCN-NEXT:    s_sub_i32 s7, s6, s4
-; GCN-NEXT:    s_cmp_ge_u32 s6, s4
-; GCN-NEXT:    s_cselect_b32 s6, s7, s6
-; GCN-NEXT:    s_sub_i32 s7, s6, s4
-; GCN-NEXT:    s_cmp_ge_u32 s6, s4
-; GCN-NEXT:    s_cselect_b32 s4, s7, s6
-; GCN-NEXT:    s_abs_i32 s5, s5
+; GCN-NEXT:    s_sub_i32 s5, s5, s7
+; GCN-NEXT:    s_sub_i32 s7, s5, s4
+; GCN-NEXT:    s_cmp_ge_u32 s5, s4
+; GCN-NEXT:    s_cselect_b32 s5, s7, s5
+; GCN-NEXT:    s_sub_i32 s7, s5, s4
+; GCN-NEXT:    s_cmp_ge_u32 s5, s4
+; GCN-NEXT:    s_cselect_b32 s4, s7, s5
+; GCN-NEXT:    s_abs_i32 s5, s6
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s5
 ; GCN-NEXT:    v_readfirstlane_b32 s6, v8
 ; GCN-NEXT:    v_mov_b32_e32 v1, s2
@@ -997,16 +997,16 @@ define amdgpu_kernel void @srem_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TAHITI-NEXT:    s_abs_i32 s6, s7
 ; TAHITI-NEXT:    v_cvt_f32_u32_e32 v0, s6
 ; TAHITI-NEXT:    s_sub_i32 s7, 0, s6
-; TAHITI-NEXT:    v_readfirstlane_b32 s8, v5
-; TAHITI-NEXT:    s_abs_i32 s9, s8
-; TAHITI-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; TAHITI-NEXT:    s_xor_b32 s0, s0, s1
 ; TAHITI-NEXT:    s_sub_i32 s10, s0, s1
-; TAHITI-NEXT:    s_ashr_i32 s8, s8, 31
+; TAHITI-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; TAHITI-NEXT:    v_readfirstlane_b32 s8, v2
 ; TAHITI-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; TAHITI-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; TAHITI-NEXT:    v_mul_lo_u32 v1, s7, v0
-; TAHITI-NEXT:    v_readfirstlane_b32 s7, v2
+; TAHITI-NEXT:    v_readfirstlane_b32 s7, v5
+; TAHITI-NEXT:    s_abs_i32 s9, s7
+; TAHITI-NEXT:    s_ashr_i32 s7, s7, 31
 ; TAHITI-NEXT:    v_mul_hi_u32 v1, v0, v1
 ; TAHITI-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
 ; TAHITI-NEXT:    v_mul_hi_u32 v0, s9, v0
@@ -1019,19 +1019,19 @@ define amdgpu_kernel void @srem_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TAHITI-NEXT:    s_sub_i32 s1, s0, s6
 ; TAHITI-NEXT:    s_cmp_ge_u32 s0, s6
 ; TAHITI-NEXT:    s_cselect_b32 s0, s1, s0
-; TAHITI-NEXT:    s_abs_i32 s1, s7
+; TAHITI-NEXT:    s_abs_i32 s1, s8
 ; TAHITI-NEXT:    v_cvt_f32_u32_e32 v0, s1
 ; TAHITI-NEXT:    s_sub_i32 s6, 0, s1
-; TAHITI-NEXT:    v_readfirstlane_b32 s7, v6
-; TAHITI-NEXT:    s_abs_i32 s9, s7
+; TAHITI-NEXT:    s_xor_b32 s0, s0, s7
+; TAHITI-NEXT:    s_sub_i32 s7, s0, s7
 ; TAHITI-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; TAHITI-NEXT:    s_xor_b32 s0, s0, s8
-; TAHITI-NEXT:    s_sub_i32 s8, s0, s8
-; TAHITI-NEXT:    s_ashr_i32 s7, s7, 31
+; TAHITI-NEXT:    v_readfirstlane_b32 s8, v3
 ; TAHITI-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; TAHITI-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; TAHITI-NEXT:    v_mul_lo_u32 v1, s6, v0
-; TAHITI-NEXT:    v_readfirstlane_b32 s6, v3
+; TAHITI-NEXT:    v_readfirstlane_b32 s6, v6
+; TAHITI-NEXT:    s_abs_i32 s9, s6
+; TAHITI-NEXT:    s_ashr_i32 s6, s6, 31
 ; TAHITI-NEXT:    v_mul_hi_u32 v1, v0, v1
 ; TAHITI-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
 ; TAHITI-NEXT:    v_mul_hi_u32 v0, s9, v0
@@ -1044,9 +1044,9 @@ define amdgpu_kernel void @srem_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TAHITI-NEXT:    s_sub_i32 s9, s0, s1
 ; TAHITI-NEXT:    s_cmp_ge_u32 s0, s1
 ; TAHITI-NEXT:    s_cselect_b32 s9, s9, s0
-; TAHITI-NEXT:    s_abs_i32 s6, s6
-; TAHITI-NEXT:    v_cvt_f32_u32_e32 v0, s6
-; TAHITI-NEXT:    s_sub_i32 s1, 0, s6
+; TAHITI-NEXT:    s_abs_i32 s8, s8
+; TAHITI-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; TAHITI-NEXT:    s_sub_i32 s1, 0, s8
 ; TAHITI-NEXT:    s_mov_b32 s0, s4
 ; TAHITI-NEXT:    v_readfirstlane_b32 s4, v7
 ; TAHITI-NEXT:    v_rcp_iflag_f32_e32 v0, v0
@@ -1058,23 +1058,23 @@ define amdgpu_kernel void @srem_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TAHITI-NEXT:    s_abs_i32 s5, s4
 ; TAHITI-NEXT:    s_ashr_i32 s4, s4, 31
 ; TAHITI-NEXT:    v_mul_hi_u32 v3, v2, v1
-; TAHITI-NEXT:    v_mov_b32_e32 v1, s8
-; TAHITI-NEXT:    s_xor_b32 s8, s9, s7
-; TAHITI-NEXT:    s_sub_i32 s7, s8, s7
+; TAHITI-NEXT:    v_mov_b32_e32 v1, s7
+; TAHITI-NEXT:    s_xor_b32 s7, s9, s6
+; TAHITI-NEXT:    s_sub_i32 s6, s7, s6
 ; TAHITI-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; TAHITI-NEXT:    v_mul_hi_u32 v2, s5, v2
-; TAHITI-NEXT:    v_readfirstlane_b32 s8, v2
-; TAHITI-NEXT:    s_mul_i32 s8, s8, s6
-; TAHITI-NEXT:    s_sub_i32 s5, s5, s8
-; TAHITI-NEXT:    s_sub_i32 s8, s5, s6
-; TAHITI-NEXT:    s_cmp_ge_u32 s5, s6
-; TAHITI-NEXT:    s_cselect_b32 s5, s8, s5
-; TAHITI-NEXT:    s_sub_i32 s8, s5, s6
-; TAHITI-NEXT:    s_cmp_ge_u32 s5, s6
-; TAHITI-NEXT:    s_cselect_b32 s5, s8, s5
+; TAHITI-NEXT:    v_readfirstlane_b32 s7, v2
+; TAHITI-NEXT:    s_mul_i32 s7, s7, s8
+; TAHITI-NEXT:    s_sub_i32 s5, s5, s7
+; TAHITI-NEXT:    s_sub_i32 s7, s5, s8
+; TAHITI-NEXT:    s_cmp_ge_u32 s5, s8
+; TAHITI-NEXT:    s_cselect_b32 s5, s7, s5
+; TAHITI-NEXT:    s_sub_i32 s7, s5, s8
+; TAHITI-NEXT:    s_cmp_ge_u32 s5, s8
+; TAHITI-NEXT:    s_cselect_b32 s5, s7, s5
 ; TAHITI-NEXT:    s_xor_b32 s5, s5, s4
 ; TAHITI-NEXT:    s_sub_i32 s4, s5, s4
-; TAHITI-NEXT:    v_mov_b32_e32 v2, s7
+; TAHITI-NEXT:    v_mov_b32_e32 v2, s6
 ; TAHITI-NEXT:    v_mov_b32_e32 v3, s4
 ; TAHITI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; TAHITI-NEXT:    s_endpgm
@@ -1121,18 +1121,18 @@ define amdgpu_kernel void @srem_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    s_abs_i32 s4, s5
 ; TONGA-NEXT:    v_cvt_f32_u32_e32 v0, s4
 ; TONGA-NEXT:    s_sub_i32 s5, 0, s4
-; TONGA-NEXT:    v_readfirstlane_b32 s6, v5
-; TONGA-NEXT:    s_abs_i32 s7, s6
-; TONGA-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; TONGA-NEXT:    s_xor_b32 s2, s2, s3
 ; TONGA-NEXT:    s_sub_i32 s2, s2, s3
-; TONGA-NEXT:    s_ashr_i32 s6, s6, 31
+; TONGA-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; TONGA-NEXT:    v_readfirstlane_b32 s6, v2
 ; TONGA-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; TONGA-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; TONGA-NEXT:    v_mov_b32_e32 v5, s1
 ; TONGA-NEXT:    v_mul_lo_u32 v1, s5, v0
-; TONGA-NEXT:    v_readfirstlane_b32 s5, v2
+; TONGA-NEXT:    v_readfirstlane_b32 s5, v5
+; TONGA-NEXT:    s_abs_i32 s7, s5
+; TONGA-NEXT:    s_ashr_i32 s5, s5, 31
 ; TONGA-NEXT:    v_mul_hi_u32 v1, v0, v1
+; TONGA-NEXT:    v_mov_b32_e32 v5, s1
 ; TONGA-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
 ; TONGA-NEXT:    v_mul_hi_u32 v0, s7, v0
 ; TONGA-NEXT:    v_readfirstlane_b32 s3, v0
@@ -1144,40 +1144,40 @@ define amdgpu_kernel void @srem_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    s_sub_i32 s7, s3, s4
 ; TONGA-NEXT:    s_cmp_ge_u32 s3, s4
 ; TONGA-NEXT:    s_cselect_b32 s3, s7, s3
-; TONGA-NEXT:    s_abs_i32 s4, s5
+; TONGA-NEXT:    s_abs_i32 s4, s6
 ; TONGA-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; TONGA-NEXT:    s_sub_i32 s5, 0, s4
-; TONGA-NEXT:    v_readfirstlane_b32 s7, v6
-; TONGA-NEXT:    s_abs_i32 s8, s7
+; TONGA-NEXT:    s_sub_i32 s6, 0, s4
+; TONGA-NEXT:    s_xor_b32 s3, s3, s5
+; TONGA-NEXT:    s_sub_i32 s3, s3, s5
 ; TONGA-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; TONGA-NEXT:    s_xor_b32 s3, s3, s6
-; TONGA-NEXT:    s_sub_i32 s3, s3, s6
-; TONGA-NEXT:    s_ashr_i32 s7, s7, 31
+; TONGA-NEXT:    v_readfirstlane_b32 s7, v3
 ; TONGA-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; TONGA-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; TONGA-NEXT:    v_mul_lo_u32 v1, s5, v0
-; TONGA-NEXT:    v_readfirstlane_b32 s5, v3
+; TONGA-NEXT:    v_mul_lo_u32 v1, s6, v0
+; TONGA-NEXT:    v_readfirstlane_b32 s6, v6
+; TONGA-NEXT:    s_abs_i32 s8, s6
+; TONGA-NEXT:    s_ashr_i32 s6, s6, 31
 ; TONGA-NEXT:    v_mul_hi_u32 v1, v0, v1
 ; TONGA-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
 ; TONGA-NEXT:    v_mul_hi_u32 v0, s8, v0
-; TONGA-NEXT:    v_readfirstlane_b32 s6, v0
-; TONGA-NEXT:    s_mul_i32 s6, s6, s4
-; TONGA-NEXT:    s_sub_i32 s6, s8, s6
-; TONGA-NEXT:    s_sub_i32 s8, s6, s4
-; TONGA-NEXT:    s_cmp_ge_u32 s6, s4
-; TONGA-NEXT:    s_cselect_b32 s6, s8, s6
-; TONGA-NEXT:    s_sub_i32 s8, s6, s4
-; TONGA-NEXT:    s_cmp_ge_u32 s6, s4
-; TONGA-NEXT:    s_cselect_b32 s4, s8, s6
-; TONGA-NEXT:    s_abs_i32 s5, s5
+; TONGA-NEXT:    v_readfirstlane_b32 s5, v0
+; TONGA-NEXT:    s_mul_i32 s5, s5, s4
+; TONGA-NEXT:    s_sub_i32 s5, s8, s5
+; TONGA-NEXT:    s_sub_i32 s8, s5, s4
+; TONGA-NEXT:    s_cmp_ge_u32 s5, s4
+; TONGA-NEXT:    s_cselect_b32 s5, s8, s5
+; TONGA-NEXT:    s_sub_i32 s8, s5, s4
+; TONGA-NEXT:    s_cmp_ge_u32 s5, s4
+; TONGA-NEXT:    s_cselect_b32 s4, s8, s5
+; TONGA-NEXT:    s_abs_i32 s5, s7
 ; TONGA-NEXT:    v_cvt_f32_u32_e32 v0, s5
 ; TONGA-NEXT:    s_sub_i32 s0, 0, s5
 ; TONGA-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; TONGA-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; TONGA-NEXT:    v_cvt_u32_f32_e32 v2, v0
 ; TONGA-NEXT:    v_mov_b32_e32 v0, s2
-; TONGA-NEXT:    s_xor_b32 s2, s4, s7
-; TONGA-NEXT:    s_sub_i32 s2, s2, s7
+; TONGA-NEXT:    s_xor_b32 s2, s4, s6
+; TONGA-NEXT:    s_sub_i32 s2, s2, s6
 ; TONGA-NEXT:    v_mul_lo_u32 v1, s0, v2
 ; TONGA-NEXT:    v_readfirstlane_b32 s0, v7
 ; TONGA-NEXT:    s_abs_i32 s1, s0
@@ -1819,7 +1819,7 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
 ; TAHITI-NEXT:    v_mul_hi_u32 v1, v0, v1
 ; TAHITI-NEXT:    v_mul_lo_u32 v1, v1, v2
 ; TAHITI-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
-; TAHITI-NEXT:    v_subrev_i32_e32 v1, vcc, v2, v0
+; TAHITI-NEXT:    v_sub_i32_e32 v1, vcc, v0, v2
 ; TAHITI-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v2
 ; TAHITI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; TAHITI-NEXT:    v_sub_i32_e32 v1, vcc, v0, v2
@@ -6232,7 +6232,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    v_mul_hi_u32 v8, v14, v8
 ; TONGA-NEXT:    v_mul_lo_u32 v8, v8, v10
 ; TONGA-NEXT:    v_sub_u32_e32 v8, vcc, v14, v8
-; TONGA-NEXT:    v_subrev_u32_e32 v9, vcc, v10, v8
+; TONGA-NEXT:    v_sub_u32_e32 v9, vcc, v8, v10
 ; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v8, v10
 ; TONGA-NEXT:    v_cndmask_b32_e32 v8, v8, v9, vcc
 ; TONGA-NEXT:    v_sub_u32_e32 v9, vcc, v8, v10
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-smax.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-smax.ll
index b5d9d00c48045..ed2f06b8136a2 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-smax.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-smax.ll
@@ -3963,8 +3963,8 @@ define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) {
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v5, v5, v7, vcc
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
 ; GFX7-SDAG-NEXT:    v_cmp_gt_i64_e32 vcc, v[2:3], v[4:5]
-; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX7-SDAG-NEXT:    v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
@@ -4067,8 +4067,8 @@ define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) {
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v5, v5, v7, vcc
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
 ; GFX8-SDAG-NEXT:    v_cmp_gt_i64_e32 vcc, v[2:3], v[4:5]
-; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX8-SDAG-NEXT:    v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
@@ -4175,8 +4175,8 @@ define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) {
 ; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v4, v6, v4, vcc
 ; GFX9-SDAG-NEXT:    v_cmp_gt_i64_e32 vcc, v[2:3], v[4:5]
 ; GFX9-SDAG-NEXT:    s_nop 1
-; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX9-SDAG-NEXT:    v_cmp_gt_i64_e32 vcc, v[0:1], v[2:3]
 ; GFX9-SDAG-NEXT:    s_nop 1
 ; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
@@ -4283,8 +4283,8 @@ define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) {
 ; GFX10-SDAG-NEXT:    v_cmp_gt_i64_e64 s4, v[2:3], v[6:7]
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e32 v1, v5, v1, vcc_lo
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc_lo
-; GFX10-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s4
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s4
 ; GFX10-SDAG-NEXT:    v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc_lo
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4387,8 +4387,8 @@ define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) {
 ; GFX11-SDAG-NEXT:    v_cmp_gt_i64_e64 s0, v[2:3], v[6:7]
 ; GFX11-SDAG-NEXT:    v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0
 ; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s0
 ; GFX11-SDAG-NEXT:    v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s0
 ; GFX11-SDAG-NEXT:    v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
 ; GFX11-SDAG-NEXT:    v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
 ; GFX11-SDAG-NEXT:    s_setpc_b64 s[30:31]
@@ -4502,8 +4502,8 @@ define i64 @test_vector_reduce_smax_v16i64(<16 x i64> %v) {
 ; GFX12-SDAG-NEXT:    v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0
 ; GFX12-SDAG-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s0
 ; GFX12-SDAG-NEXT:    v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s0
 ; GFX12-SDAG-NEXT:    v_cmp_gt_i64_e32 vcc_lo, v[0:1], v[2:3]
 ; GFX12-SDAG-NEXT:    s_wait_alu 0xfffd
 ; GFX12-SDAG-NEXT:    v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-smin.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-smin.ll
index 2a989ecd2ebad..8812cae20f110 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-smin.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-smin.ll
@@ -3963,8 +3963,8 @@ define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) {
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v5, v5, v7, vcc
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
 ; GFX7-SDAG-NEXT:    v_cmp_lt_i64_e32 vcc, v[2:3], v[4:5]
-; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX7-SDAG-NEXT:    v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
@@ -4067,8 +4067,8 @@ define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) {
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v5, v5, v7, vcc
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
 ; GFX8-SDAG-NEXT:    v_cmp_lt_i64_e32 vcc, v[2:3], v[4:5]
-; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX8-SDAG-NEXT:    v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
@@ -4175,8 +4175,8 @@ define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) {
 ; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v4, v6, v4, vcc
 ; GFX9-SDAG-NEXT:    v_cmp_lt_i64_e32 vcc, v[2:3], v[4:5]
 ; GFX9-SDAG-NEXT:    s_nop 1
-; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX9-SDAG-NEXT:    v_cmp_lt_i64_e32 vcc, v[0:1], v[2:3]
 ; GFX9-SDAG-NEXT:    s_nop 1
 ; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
@@ -4283,8 +4283,8 @@ define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) {
 ; GFX10-SDAG-NEXT:    v_cmp_lt_i64_e64 s4, v[2:3], v[6:7]
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e32 v1, v5, v1, vcc_lo
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc_lo
-; GFX10-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s4
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s4
 ; GFX10-SDAG-NEXT:    v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc_lo
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4387,8 +4387,8 @@ define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) {
 ; GFX11-SDAG-NEXT:    v_cmp_lt_i64_e64 s0, v[2:3], v[6:7]
 ; GFX11-SDAG-NEXT:    v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0
 ; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s0
 ; GFX11-SDAG-NEXT:    v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s0
 ; GFX11-SDAG-NEXT:    v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
 ; GFX11-SDAG-NEXT:    v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
 ; GFX11-SDAG-NEXT:    s_setpc_b64 s[30:31]
@@ -4502,8 +4502,8 @@ define i64 @test_vector_reduce_smin_v16i64(<16 x i64> %v) {
 ; GFX12-SDAG-NEXT:    v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0
 ; GFX12-SDAG-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s0
 ; GFX12-SDAG-NEXT:    v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s0
 ; GFX12-SDAG-NEXT:    v_cmp_lt_i64_e32 vcc_lo, v[0:1], v[2:3]
 ; GFX12-SDAG-NEXT:    s_wait_alu 0xfffd
 ; GFX12-SDAG-NEXT:    v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-umax.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-umax.ll
index 69fd58aadfbcc..82eb122f9f703 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-umax.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-umax.ll
@@ -3839,8 +3839,8 @@ define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) {
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v5, v5, v7, vcc
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
 ; GFX7-SDAG-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
-; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX7-SDAG-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
@@ -3943,8 +3943,8 @@ define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) {
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v5, v5, v7, vcc
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
 ; GFX8-SDAG-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
-; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX8-SDAG-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
@@ -4051,8 +4051,8 @@ define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) {
 ; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v4, v6, v4, vcc
 ; GFX9-SDAG-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
 ; GFX9-SDAG-NEXT:    s_nop 1
-; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX9-SDAG-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX9-SDAG-NEXT:    s_nop 1
 ; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
@@ -4159,8 +4159,8 @@ define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) {
 ; GFX10-SDAG-NEXT:    v_cmp_gt_u64_e64 s4, v[2:3], v[6:7]
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e32 v1, v5, v1, vcc_lo
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc_lo
-; GFX10-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s4
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s4
 ; GFX10-SDAG-NEXT:    v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc_lo
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4263,8 +4263,8 @@ define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) {
 ; GFX11-SDAG-NEXT:    v_cmp_gt_u64_e64 s0, v[2:3], v[6:7]
 ; GFX11-SDAG-NEXT:    v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0
 ; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s0
 ; GFX11-SDAG-NEXT:    v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s0
 ; GFX11-SDAG-NEXT:    v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
 ; GFX11-SDAG-NEXT:    v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
 ; GFX11-SDAG-NEXT:    s_setpc_b64 s[30:31]
@@ -4378,8 +4378,8 @@ define i64 @test_vector_reduce_umax_v16i64(<16 x i64> %v) {
 ; GFX12-SDAG-NEXT:    v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0
 ; GFX12-SDAG-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s0
 ; GFX12-SDAG-NEXT:    v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s0
 ; GFX12-SDAG-NEXT:    v_cmp_gt_u64_e32 vcc_lo, v[0:1], v[2:3]
 ; GFX12-SDAG-NEXT:    s_wait_alu 0xfffd
 ; GFX12-SDAG-NEXT:    v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-umin.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-umin.ll
index aab0e76410ccb..115b05a5ca6a2 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-umin.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-umin.ll
@@ -3585,8 +3585,8 @@ define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) {
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v5, v5, v7, vcc
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
 ; GFX7-SDAG-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[4:5]
-; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX7-SDAG-NEXT:    v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
 ; GFX7-SDAG-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
@@ -3689,8 +3689,8 @@ define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) {
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v5, v5, v7, vcc
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
 ; GFX8-SDAG-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[4:5]
-; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX8-SDAG-NEXT:    v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
 ; GFX8-SDAG-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
@@ -3797,8 +3797,8 @@ define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) {
 ; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v4, v6, v4, vcc
 ; GFX9-SDAG-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[4:5]
 ; GFX9-SDAG-NEXT:    s_nop 1
-; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GFX9-SDAG-NEXT:    v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX9-SDAG-NEXT:    s_nop 1
 ; GFX9-SDAG-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
@@ -3905,8 +3905,8 @@ define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) {
 ; GFX10-SDAG-NEXT:    v_cmp_lt_u64_e64 s4, v[2:3], v[6:7]
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e32 v1, v5, v1, vcc_lo
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc_lo
-; GFX10-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s4
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e64 v3, v7, v3, s4
+; GFX10-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s4
 ; GFX10-SDAG-NEXT:    v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc_lo
 ; GFX10-SDAG-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc_lo
@@ -4009,8 +4009,8 @@ define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) {
 ; GFX11-SDAG-NEXT:    v_cmp_lt_u64_e64 s0, v[2:3], v[6:7]
 ; GFX11-SDAG-NEXT:    v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0
 ; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s0
 ; GFX11-SDAG-NEXT:    v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX11-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s0
 ; GFX11-SDAG-NEXT:    v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
 ; GFX11-SDAG-NEXT:    v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
 ; GFX11-SDAG-NEXT:    s_setpc_b64 s[30:31]
@@ -4124,8 +4124,8 @@ define i64 @test_vector_reduce_umin_v16i64(<16 x i64> %v) {
 ; GFX12-SDAG-NEXT:    v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v4, v0
 ; GFX12-SDAG-NEXT:    s_wait_alu 0xf1ff
 ; GFX12-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s0
 ; GFX12-SDAG-NEXT:    v_cndmask_b32_e64 v3, v7, v3, s0
+; GFX12-SDAG-NEXT:    v_cndmask_b32_e64 v2, v6, v2, s0
 ; GFX12-SDAG-NEXT:    v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
 ; GFX12-SDAG-NEXT:    s_wait_alu 0xfffd
 ; GFX12-SDAG-NEXT:    v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v3, v1
diff --git a/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll b/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll
index 96f009a4da02d..e77577e025566 100644
--- a/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll
@@ -141,7 +141,7 @@ define <2 x i32> @ustest_f64i32(<2 x double> %x) {
 ; CHECK-NEXT:    vmov.32 d8[0], r0
 ; CHECK-NEXT:    mvn r3, #0
 ; CHECK-NEXT:    subs r4, r4, r3
-; CHECK-NEXT:    vmov.i64 q9, #0xffffffff
+; CHECK-NEXT:    vmov.i64 q8, #0xffffffff
 ; CHECK-NEXT:    vmov.32 d9[1], r5
 ; CHECK-NEXT:    sbcs r5, r5, #0
 ; CHECK-NEXT:    mov r5, #0
@@ -155,10 +155,10 @@ define <2 x i32> @ustest_f64i32(<2 x double> %x) {
 ; CHECK-NEXT:    mov r0, #0
 ; CHECK-NEXT:    movwlt r0, #1
 ; CHECK-NEXT:    cmp r0, #0
-; CHECK-NEXT:    vdup.32 d17, r5
+; CHECK-NEXT:    vdup.32 d19, r5
 ; CHECK-NEXT:    mvnne r0, #0
-; CHECK-NEXT:    vdup.32 d16, r0
-; CHECK-NEXT:    vbsl q8, q4, q9
+; CHECK-NEXT:    vdup.32 d18, r0
+; CHECK-NEXT:    vbit q8, q4, q9
 ; CHECK-NEXT:    vmov r0, r1, d17
 ; CHECK-NEXT:    vmov r3, r5, d16
 ; CHECK-NEXT:    rsbs r0, r0, #0
@@ -428,12 +428,12 @@ define <4 x i32> @ustest_f32i32(<4 x float> %x) {
 ; CHECK-NEXT:    vdup.32 d16, r0
 ; CHECK-NEXT:    mov r0, r5
 ; CHECK-NEXT:    vbif q4, q5, q8
+; CHECK-NEXT:    vmov r7, r10, d8
 ; CHECK-NEXT:    bl __aeabi_f2lz
 ; CHECK-NEXT:    mov r5, r0
 ; CHECK-NEXT:    vmov.32 d13[0], r0
 ; CHECK-NEXT:    mov r0, r8
 ; CHECK-NEXT:    mov r6, r1
-; CHECK-NEXT:    vmov r7, r10, d8
 ; CHECK-NEXT:    bl __aeabi_f2lz
 ; CHECK-NEXT:    subs r2, r5, r9
 ; CHECK-NEXT:    vmov.32 d12[0], r0
diff --git a/llvm/test/CodeGen/NVPTX/i1-select.ll b/llvm/test/CodeGen/NVPTX/i1-select.ll
index df32e2a4cfad2..6f8da4f90915c 100644
--- a/llvm/test/CodeGen/NVPTX/i1-select.ll
+++ b/llvm/test/CodeGen/NVPTX/i1-select.ll
@@ -94,27 +94,27 @@ define i32 @test_select_i1_basic(i32 %v1, i32 %v2, i32 %v3, i32 %true, i32 %fals
 define i32 @test_select_i1_basic_folding(i32 %v1, i32 %v2, i32 %v3, i32 %true, i32 %false) {
 ; CHECK-LABEL: test_select_i1_basic_folding(
 ; CHECK:       {
-; CHECK-NEXT:    .reg .pred %p<13>;
-; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .pred %p<12>;
+; CHECK-NEXT:    .reg .b32 %r<9>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b32 %r1, [test_select_i1_basic_folding_param_0];
 ; CHECK-NEXT:    setp.eq.b32 %p1, %r1, 0;
-; CHECK-NEXT:    ld.param.b32 %r2, [test_select_i1_basic_folding_param_1];
-; CHECK-NEXT:    setp.ne.b32 %p2, %r2, 0;
-; CHECK-NEXT:    setp.eq.b32 %p3, %r2, 0;
-; CHECK-NEXT:    ld.param.b32 %r3, [test_select_i1_basic_folding_param_2];
-; CHECK-NEXT:    setp.eq.b32 %p4, %r3, 0;
-; CHECK-NEXT:    ld.param.b32 %r4, [test_select_i1_basic_folding_param_3];
+; CHECK-NEXT:    ld.param.b32 %r3, [test_select_i1_basic_folding_param_1];
+; CHECK-NEXT:    setp.ne.b32 %p2, %r3, 0;
+; CHECK-NEXT:    setp.eq.b32 %p3, %r3, 0;
+; CHECK-NEXT:    ld.param.b32 %r4, [test_select_i1_basic_folding_param_2];
+; CHECK-NEXT:    setp.eq.b32 %p5, %r4, 0;
+; CHECK-NEXT:    ld.param.b32 %r6, [test_select_i1_basic_folding_param_3];
 ; CHECK-NEXT:    xor.pred %p6, %p1, %p3;
-; CHECK-NEXT:    ld.param.b32 %r5, [test_select_i1_basic_folding_param_4];
-; CHECK-NEXT:    and.pred %p8, %p6, %p4;
-; CHECK-NEXT:    and.pred %p9, %p2, %p4;
-; CHECK-NEXT:    and.pred %p10, %p3, %p8;
-; CHECK-NEXT:    or.pred %p11, %p10, %p9;
-; CHECK-NEXT:    xor.pred %p12, %p11, %p3;
-; CHECK-NEXT:    selp.b32 %r6, %r4, %r5, %p12;
-; CHECK-NEXT:    st.param.b32 [func_retval0], %r6;
+; CHECK-NEXT:    ld.param.b32 %r7, [test_select_i1_basic_folding_param_4];
+; CHECK-NEXT:    and.pred %p7, %p6, %p5;
+; CHECK-NEXT:    and.pred %p8, %p2, %p5;
+; CHECK-NEXT:    and.pred %p9, %p3, %p7;
+; CHECK-NEXT:    or.pred %p10, %p9, %p8;
+; CHECK-NEXT:    xor.pred %p11, %p10, %p3;
+; CHECK-NEXT:    selp.b32 %r8, %r6, %r7, %p11;
+; CHECK-NEXT:    st.param.b32 [func_retval0], %r8;
 ; CHECK-NEXT:    ret;
   %b1 = icmp eq i32 %v1, 0
   %b2 = icmp eq i32 %v2, 0
diff --git a/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll b/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll
index b540948b20f75..821cfd00dcd07 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll
@@ -764,13 +764,8 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr
 ;
 ; CHECK-PWR7-LABEL: sub_absv_8_ext:
 ; CHECK-PWR7:       # %bb.0: # %entry
-; CHECK-PWR7-NEXT:    stdu r1, -512(r1)
-; CHECK-PWR7-NEXT:    .cfi_def_cfa_offset 512
-; CHECK-PWR7-NEXT:    .cfi_offset r14, -144
-; CHECK-PWR7-NEXT:    .cfi_offset r15, -136
-; CHECK-PWR7-NEXT:    .cfi_offset r16, -128
-; CHECK-PWR7-NEXT:    .cfi_offset r17, -120
-; CHECK-PWR7-NEXT:    .cfi_offset r18, -112
+; CHECK-PWR7-NEXT:    stdu r1, -448(r1)
+; CHECK-PWR7-NEXT:    .cfi_def_cfa_offset 448
 ; CHECK-PWR7-NEXT:    .cfi_offset r19, -104
 ; CHECK-PWR7-NEXT:    .cfi_offset r20, -96
 ; CHECK-PWR7-NEXT:    .cfi_offset r21, -88
@@ -783,244 +778,258 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr
 ; CHECK-PWR7-NEXT:    .cfi_offset r28, -32
 ; CHECK-PWR7-NEXT:    .cfi_offset r29, -24
 ; CHECK-PWR7-NEXT:    .cfi_offset r30, -16
-; CHECK-PWR7-NEXT:    .cfi_offset r31, -8
-; CHECK-PWR7-NEXT:    .cfi_offset r2, -152
-; CHECK-PWR7-NEXT:    addi r3, r1, 320
-; CHECK-PWR7-NEXT:    std r14, 368(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r15, 376(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r16, 384(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r17, 392(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r18, 400(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r19, 408(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r20, 416(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r21, 424(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r22, 432(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r23, 440(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r24, 448(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r25, 456(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r26, 464(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r27, 472(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r28, 480(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r29, 488(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r30, 496(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r31, 504(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r2, 360(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    addi r3, r1, 304
+; CHECK-PWR7-NEXT:    std r19, 344(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    std r20, 352(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    std r21, 360(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    std r22, 368(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    std r23, 376(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    std r24, 384(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    std r25, 392(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    std r26, 400(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    std r27, 408(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    std r28, 416(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    std r29, 424(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    std r30, 432(r1) # 8-byte Folded Spill
 ; CHECK-PWR7-NEXT:    stxvw4x v2, 0, r3
-; CHECK-PWR7-NEXT:    lbz r3, 320(r1)
-; CHECK-PWR7-NEXT:    addi r4, r1, 336
-; CHECK-PWR7-NEXT:    stw r3, 60(r1) # 4-byte Folded Spill
-; CHECK-PWR7-NEXT:    stxvw4x v3, 0, r4
-; CHECK-PWR7-NEXT:    lbz r15, 334(r1)
-; CHECK-PWR7-NEXT:    lbz r14, 350(r1)
-; CHECK-PWR7-NEXT:    lbz r31, 335(r1)
-; CHECK-PWR7-NEXT:    lbz r2, 351(r1)
-; CHECK-PWR7-NEXT:    sub r15, r15, r14
-; CHECK-PWR7-NEXT:    sub r14, r31, r2
-; CHECK-PWR7-NEXT:    srawi r2, r14, 31
-; CHECK-PWR7-NEXT:    xor r14, r14, r2
-; CHECK-PWR7-NEXT:    lbz r3, 333(r1)
-; CHECK-PWR7-NEXT:    lbz r19, 331(r1)
-; CHECK-PWR7-NEXT:    lbz r18, 347(r1)
-; CHECK-PWR7-NEXT:    sub r19, r19, r18
-; CHECK-PWR7-NEXT:    lbz r17, 332(r1)
-; CHECK-PWR7-NEXT:    lbz r16, 348(r1)
-; CHECK-PWR7-NEXT:    sub r17, r17, r16
-; CHECK-PWR7-NEXT:    lbz r23, 329(r1)
-; CHECK-PWR7-NEXT:    sub r14, r14, r2
-; CHECK-PWR7-NEXT:    lbz r2, 349(r1)
-; CHECK-PWR7-NEXT:    lbz r22, 345(r1)
-; CHECK-PWR7-NEXT:    lbz r4, 336(r1)
-; CHECK-PWR7-NEXT:    lbz r5, 321(r1)
-; CHECK-PWR7-NEXT:    lbz r6, 337(r1)
-; CHECK-PWR7-NEXT:    lbz r7, 322(r1)
-; CHECK-PWR7-NEXT:    lbz r8, 338(r1)
-; CHECK-PWR7-NEXT:    lbz r9, 323(r1)
-; CHECK-PWR7-NEXT:    lbz r10, 339(r1)
-; CHECK-PWR7-NEXT:    lbz r11, 324(r1)
-; CHECK-PWR7-NEXT:    lbz r12, 340(r1)
-; CHECK-PWR7-NEXT:    lbz r0, 325(r1)
-; CHECK-PWR7-NEXT:    lbz r30, 341(r1)
-; CHECK-PWR7-NEXT:    lbz r29, 326(r1)
-; CHECK-PWR7-NEXT:    lbz r28, 342(r1)
-; CHECK-PWR7-NEXT:    lbz r27, 327(r1)
-; CHECK-PWR7-NEXT:    lbz r26, 343(r1)
-; CHECK-PWR7-NEXT:    sub r3, r3, r2
-; CHECK-PWR7-NEXT:    lbz r25, 328(r1)
-; CHECK-PWR7-NEXT:    lbz r24, 344(r1)
-; CHECK-PWR7-NEXT:    lbz r21, 330(r1)
-; CHECK-PWR7-NEXT:    lbz r20, 346(r1)
+; CHECK-PWR7-NEXT:    addi r3, r1, 320
+; CHECK-PWR7-NEXT:    lbz r7, 304(r1)
+; CHECK-PWR7-NEXT:    stxvw4x v3, 0, r3
+; CHECK-PWR7-NEXT:    lbz r8, 320(r1)
+; CHECK-PWR7-NEXT:    lbz r9, 305(r1)
+; CHECK-PWR7-NEXT:    lbz r10, 321(r1)
+; CHECK-PWR7-NEXT:    lbz r26, 325(r1)
+; CHECK-PWR7-NEXT:    clrlwi r7, r7, 24
+; CHECK-PWR7-NEXT:    clrlwi r8, r8, 24
+; CHECK-PWR7-NEXT:    clrlwi r9, r9, 24
+; CHECK-PWR7-NEXT:    clrlwi r10, r10, 24
+; CHECK-PWR7-NEXT:    lbz r11, 306(r1)
+; CHECK-PWR7-NEXT:    lbz r12, 322(r1)
+; CHECK-PWR7-NEXT:    lbz r23, 314(r1)
+; CHECK-PWR7-NEXT:    clrlwi r22, r26, 24
+; CHECK-PWR7-NEXT:    lbz r26, 330(r1)
+; CHECK-PWR7-NEXT:    sub r8, r7, r8
+; CHECK-PWR7-NEXT:    lbz r7, 315(r1)
+; CHECK-PWR7-NEXT:    sub r20, r9, r10
+; CHECK-PWR7-NEXT:    lbz r9, 331(r1)
+; CHECK-PWR7-NEXT:    lbz r0, 307(r1)
+; CHECK-PWR7-NEXT:    lbz r30, 323(r1)
+; CHECK-PWR7-NEXT:    clrlwi r11, r11, 24
+; CHECK-PWR7-NEXT:    clrlwi r12, r12, 24
+; CHECK-PWR7-NEXT:    clrlwi r23, r23, 24
+; CHECK-PWR7-NEXT:    clrlwi r21, r26, 24
+; CHECK-PWR7-NEXT:    clrlwi r7, r7, 24
+; CHECK-PWR7-NEXT:    clrlwi r9, r9, 24
+; CHECK-PWR7-NEXT:    clrlwi r0, r0, 24
+; CHECK-PWR7-NEXT:    clrlwi r30, r30, 24
+; CHECK-PWR7-NEXT:    lbz r29, 308(r1)
+; CHECK-PWR7-NEXT:    lbz r28, 324(r1)
+; CHECK-PWR7-NEXT:    lbz r27, 309(r1)
+; CHECK-PWR7-NEXT:    lbz r25, 310(r1)
+; CHECK-PWR7-NEXT:    lbz r24, 326(r1)
+; CHECK-PWR7-NEXT:    sub r19, r11, r12
+; CHECK-PWR7-NEXT:    sub r11, r23, r21
+; CHECK-PWR7-NEXT:    sub r9, r7, r9
+; CHECK-PWR7-NEXT:    sub r26, r0, r30
+; CHECK-PWR7-NEXT:    srawi r12, r11, 31
+; CHECK-PWR7-NEXT:    srawi r0, r9, 31
+; CHECK-PWR7-NEXT:    lbz r3, 312(r1)
+; CHECK-PWR7-NEXT:    clrlwi r29, r29, 24
+; CHECK-PWR7-NEXT:    clrlwi r28, r28, 24
+; CHECK-PWR7-NEXT:    clrlwi r27, r27, 24
+; CHECK-PWR7-NEXT:    clrlwi r25, r25, 24
+; CHECK-PWR7-NEXT:    clrlwi r24, r24, 24
+; CHECK-PWR7-NEXT:    xor r11, r11, r12
+; CHECK-PWR7-NEXT:    xor r9, r9, r0
+; CHECK-PWR7-NEXT:    sub r28, r29, r28
+; CHECK-PWR7-NEXT:    sub r30, r27, r22
+; CHECK-PWR7-NEXT:    sub r29, r25, r24
+; CHECK-PWR7-NEXT:    sub r27, r11, r12
+; CHECK-PWR7-NEXT:    sub r24, r9, r0
+; CHECK-PWR7-NEXT:    lbz r9, 316(r1)
+; CHECK-PWR7-NEXT:    lbz r11, 332(r1)
+; CHECK-PWR7-NEXT:    lbz r4, 328(r1)
+; CHECK-PWR7-NEXT:    lbz r5, 311(r1)
+; CHECK-PWR7-NEXT:    lbz r6, 327(r1)
+; CHECK-PWR7-NEXT:    clrlwi r11, r11, 24
+; CHECK-PWR7-NEXT:    clrlwi r3, r3, 24
+; CHECK-PWR7-NEXT:    clrlwi r4, r4, 24
+; CHECK-PWR7-NEXT:    clrlwi r5, r5, 24
+; CHECK-PWR7-NEXT:    clrlwi r6, r6, 24
+; CHECK-PWR7-NEXT:    sub r3, r3, r4
 ; CHECK-PWR7-NEXT:    sub r5, r5, r6
-; CHECK-PWR7-NEXT:    srawi r18, r3, 31
-; CHECK-PWR7-NEXT:    sub r7, r7, r8
-; CHECK-PWR7-NEXT:    sub r9, r9, r10
-; CHECK-PWR7-NEXT:    sub r11, r11, r12
-; CHECK-PWR7-NEXT:    sub r0, r0, r30
-; CHECK-PWR7-NEXT:    sub r29, r29, r28
-; CHECK-PWR7-NEXT:    sub r27, r27, r26
-; CHECK-PWR7-NEXT:    sub r25, r25, r24
-; CHECK-PWR7-NEXT:    srawi r31, r15, 31
-; CHECK-PWR7-NEXT:    ld r2, 360(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    xor r3, r3, r18
+; CHECK-PWR7-NEXT:    clrlwi r9, r9, 24
+; CHECK-PWR7-NEXT:    srawi r4, r3, 31
 ; CHECK-PWR7-NEXT:    srawi r6, r5, 31
-; CHECK-PWR7-NEXT:    srawi r8, r7, 31
-; CHECK-PWR7-NEXT:    srawi r10, r9, 31
-; CHECK-PWR7-NEXT:    srawi r12, r11, 31
-; CHECK-PWR7-NEXT:    srawi r30, r0, 31
-; CHECK-PWR7-NEXT:    sub r3, r3, r18
-; CHECK-PWR7-NEXT:    srawi r18, r19, 31
-; CHECK-PWR7-NEXT:    srawi r28, r29, 31
-; CHECK-PWR7-NEXT:    ld r16, 384(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    sldi r3, r3, 56
-; CHECK-PWR7-NEXT:    srawi r26, r27, 31
-; CHECK-PWR7-NEXT:    srawi r24, r25, 31
-; CHECK-PWR7-NEXT:    xor r19, r19, r18
-; CHECK-PWR7-NEXT:    xor r15, r15, r31
+; CHECK-PWR7-NEXT:    xor r3, r3, r4
+; CHECK-PWR7-NEXT:    sldi r27, r27, 56
 ; CHECK-PWR7-NEXT:    xor r5, r5, r6
-; CHECK-PWR7-NEXT:    std r3, 272(r1)
-; CHECK-PWR7-NEXT:    std r3, 280(r1)
-; CHECK-PWR7-NEXT:    srawi r3, r17, 31
-; CHECK-PWR7-NEXT:    sub r19, r19, r18
-; CHECK-PWR7-NEXT:    xor r7, r7, r8
-; CHECK-PWR7-NEXT:    sub r15, r15, r31
-; CHECK-PWR7-NEXT:    xor r17, r17, r3
-; CHECK-PWR7-NEXT:    xor r9, r9, r10
-; CHECK-PWR7-NEXT:    xor r11, r11, r12
-; CHECK-PWR7-NEXT:    xor r0, r0, r30
-; CHECK-PWR7-NEXT:    xor r29, r29, r28
-; CHECK-PWR7-NEXT:    xor r27, r27, r26
-; CHECK-PWR7-NEXT:    sub r3, r17, r3
-; CHECK-PWR7-NEXT:    xor r25, r25, r24
-; CHECK-PWR7-NEXT:    sub r25, r25, r24
-; CHECK-PWR7-NEXT:    sub r27, r27, r26
-; CHECK-PWR7-NEXT:    sub r29, r29, r28
+; CHECK-PWR7-NEXT:    sub r9, r9, r11
+; CHECK-PWR7-NEXT:    sub r3, r3, r4
+; CHECK-PWR7-NEXT:    sldi r24, r24, 56
 ; CHECK-PWR7-NEXT:    sldi r3, r3, 56
-; CHECK-PWR7-NEXT:    sub r0, r0, r30
-; CHECK-PWR7-NEXT:    sub r11, r11, r12
-; CHECK-PWR7-NEXT:    sub r9, r9, r10
-; CHECK-PWR7-NEXT:    sub r7, r7, r8
-; CHECK-PWR7-NEXT:    sub r5, r5, r6
-; CHECK-PWR7-NEXT:    sldi r14, r14, 56
-; CHECK-PWR7-NEXT:    sldi r15, r15, 56
-; CHECK-PWR7-NEXT:    ld r31, 504(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    std r3, 256(r1)
-; CHECK-PWR7-NEXT:    std r3, 264(r1)
-; CHECK-PWR7-NEXT:    sldi r3, r19, 56
+; CHECK-PWR7-NEXT:    srawi r11, r9, 31
+; CHECK-PWR7-NEXT:    std r27, 208(r1)
+; CHECK-PWR7-NEXT:    sub r4, r5, r6
+; CHECK-PWR7-NEXT:    std r27, 216(r1)
+; CHECK-PWR7-NEXT:    srawi r27, r29, 31
+; CHECK-PWR7-NEXT:    lbz r10, 313(r1)
+; CHECK-PWR7-NEXT:    xor r9, r9, r11
+; CHECK-PWR7-NEXT:    std r24, 224(r1)
+; CHECK-PWR7-NEXT:    lbz r22, 329(r1)
+; CHECK-PWR7-NEXT:    std r24, 232(r1)
+; CHECK-PWR7-NEXT:    srawi r24, r30, 31
+; CHECK-PWR7-NEXT:    ld r21, 360(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    sub r23, r9, r11
+; CHECK-PWR7-NEXT:    lbz r9, 317(r1)
+; CHECK-PWR7-NEXT:    lbz r11, 333(r1)
+; CHECK-PWR7-NEXT:    xor r29, r29, r27
+; CHECK-PWR7-NEXT:    std r3, 176(r1)
+; CHECK-PWR7-NEXT:    std r3, 184(r1)
+; CHECK-PWR7-NEXT:    sldi r3, r4, 56
+; CHECK-PWR7-NEXT:    sldi r23, r23, 56
+; CHECK-PWR7-NEXT:    xor r30, r30, r24
+; CHECK-PWR7-NEXT:    clrlwi r9, r9, 24
+; CHECK-PWR7-NEXT:    clrlwi r11, r11, 24
+; CHECK-PWR7-NEXT:    sub r4, r30, r24
+; CHECK-PWR7-NEXT:    ld r30, 432(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    std r3, 160(r1)
+; CHECK-PWR7-NEXT:    std r3, 168(r1)
+; CHECK-PWR7-NEXT:    sub r9, r9, r11
+; CHECK-PWR7-NEXT:    sub r3, r29, r27
+; CHECK-PWR7-NEXT:    std r23, 240(r1)
+; CHECK-PWR7-NEXT:    ld r29, 424(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    srawi r11, r9, 31
+; CHECK-PWR7-NEXT:    std r23, 248(r1)
+; CHECK-PWR7-NEXT:    ld r27, 408(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    srawi r23, r28, 31
+; CHECK-PWR7-NEXT:    sldi r3, r3, 56
+; CHECK-PWR7-NEXT:    xor r28, r28, r23
+; CHECK-PWR7-NEXT:    xor r9, r9, r11
+; CHECK-PWR7-NEXT:    std r3, 144(r1)
+; CHECK-PWR7-NEXT:    ld r24, 384(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    std r3, 152(r1)
+; CHECK-PWR7-NEXT:    sldi r3, r4, 56
+; CHECK-PWR7-NEXT:    sub r25, r9, r11
+; CHECK-PWR7-NEXT:    lbz r9, 318(r1)
+; CHECK-PWR7-NEXT:    lbz r11, 334(r1)
+; CHECK-PWR7-NEXT:    std r3, 128(r1)
 ; CHECK-PWR7-NEXT:    sldi r25, r25, 56
-; CHECK-PWR7-NEXT:    sldi r27, r27, 56
-; CHECK-PWR7-NEXT:    std r3, 240(r1)
-; CHECK-PWR7-NEXT:    std r3, 248(r1)
-; CHECK-PWR7-NEXT:    sub r3, r23, r22
-; CHECK-PWR7-NEXT:    srawi r23, r3, 31
-; CHECK-PWR7-NEXT:    sub r22, r21, r20
-; CHECK-PWR7-NEXT:    srawi r21, r22, 31
-; CHECK-PWR7-NEXT:    sldi r29, r29, 56
-; CHECK-PWR7-NEXT:    sldi r0, r0, 56
-; CHECK-PWR7-NEXT:    sldi r11, r11, 56
-; CHECK-PWR7-NEXT:    xor r3, r3, r23
-; CHECK-PWR7-NEXT:    xor r22, r22, r21
-; CHECK-PWR7-NEXT:    sldi r9, r9, 56
-; CHECK-PWR7-NEXT:    sldi r7, r7, 56
-; CHECK-PWR7-NEXT:    sldi r5, r5, 56
-; CHECK-PWR7-NEXT:    ld r30, 496(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    ld r28, 480(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    sub r3, r3, r23
-; CHECK-PWR7-NEXT:    sub r22, r22, r21
-; CHECK-PWR7-NEXT:    std r14, 304(r1)
-; CHECK-PWR7-NEXT:    ld r26, 464(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    std r3, 136(r1)
+; CHECK-PWR7-NEXT:    sub r3, r28, r23
 ; CHECK-PWR7-NEXT:    sldi r3, r3, 56
-; CHECK-PWR7-NEXT:    sldi r22, r22, 56
-; CHECK-PWR7-NEXT:    ld r24, 448(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    ld r23, 440(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    std r14, 312(r1)
-; CHECK-PWR7-NEXT:    std r15, 288(r1)
-; CHECK-PWR7-NEXT:    std r3, 208(r1)
-; CHECK-PWR7-NEXT:    std r3, 216(r1)
-; CHECK-PWR7-NEXT:    lwz r3, 60(r1) # 4-byte Folded Reload
-; CHECK-PWR7-NEXT:    std r15, 296(r1)
-; CHECK-PWR7-NEXT:    ld r21, 424(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    ld r20, 416(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    std r22, 224(r1)
-; CHECK-PWR7-NEXT:    std r22, 232(r1)
-; CHECK-PWR7-NEXT:    sub r4, r3, r4
-; CHECK-PWR7-NEXT:    std r25, 192(r1)
-; CHECK-PWR7-NEXT:    ld r22, 432(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    ld r19, 408(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    srawi r3, r4, 31
-; CHECK-PWR7-NEXT:    std r25, 200(r1)
-; CHECK-PWR7-NEXT:    ld r25, 456(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    std r27, 176(r1)
-; CHECK-PWR7-NEXT:    std r27, 184(r1)
-; CHECK-PWR7-NEXT:    xor r4, r4, r3
-; CHECK-PWR7-NEXT:    std r29, 160(r1)
-; CHECK-PWR7-NEXT:    ld r27, 472(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    std r29, 168(r1)
-; CHECK-PWR7-NEXT:    std r0, 144(r1)
-; CHECK-PWR7-NEXT:    sub r3, r4, r3
-; CHECK-PWR7-NEXT:    std r0, 152(r1)
-; CHECK-PWR7-NEXT:    ld r29, 488(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    ld r18, 400(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    std r3, 112(r1)
+; CHECK-PWR7-NEXT:    ld r28, 416(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    clrlwi r9, r9, 24
+; CHECK-PWR7-NEXT:    clrlwi r11, r11, 24
+; CHECK-PWR7-NEXT:    clrlwi r10, r10, 24
+; CHECK-PWR7-NEXT:    std r25, 256(r1)
+; CHECK-PWR7-NEXT:    std r25, 264(r1)
+; CHECK-PWR7-NEXT:    sub r9, r9, r11
+; CHECK-PWR7-NEXT:    srawi r25, r26, 31
+; CHECK-PWR7-NEXT:    xor r26, r26, r25
+; CHECK-PWR7-NEXT:    ld r23, 376(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    srawi r11, r9, 31
+; CHECK-PWR7-NEXT:    std r3, 120(r1)
+; CHECK-PWR7-NEXT:    sub r4, r26, r25
+; CHECK-PWR7-NEXT:    clrlwi r22, r22, 24
+; CHECK-PWR7-NEXT:    srawi r7, r8, 31
+; CHECK-PWR7-NEXT:    sub r10, r10, r22
+; CHECK-PWR7-NEXT:    ld r26, 400(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    xor r9, r9, r11
+; CHECK-PWR7-NEXT:    sldi r3, r4, 56
+; CHECK-PWR7-NEXT:    srawi r22, r10, 31
+; CHECK-PWR7-NEXT:    xor r8, r8, r7
+; CHECK-PWR7-NEXT:    xor r10, r10, r22
+; CHECK-PWR7-NEXT:    sub r10, r10, r22
+; CHECK-PWR7-NEXT:    ld r25, 392(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    sub r12, r9, r11
+; CHECK-PWR7-NEXT:    lbz r9, 319(r1)
+; CHECK-PWR7-NEXT:    lbz r11, 335(r1)
+; CHECK-PWR7-NEXT:    std r3, 96(r1)
+; CHECK-PWR7-NEXT:    sldi r12, r12, 56
+; CHECK-PWR7-NEXT:    std r3, 104(r1)
+; CHECK-PWR7-NEXT:    ld r22, 368(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    sldi r10, r10, 56
+; CHECK-PWR7-NEXT:    std r10, 192(r1)
+; CHECK-PWR7-NEXT:    clrlwi r9, r9, 24
+; CHECK-PWR7-NEXT:    clrlwi r11, r11, 24
+; CHECK-PWR7-NEXT:    sub r9, r9, r11
+; CHECK-PWR7-NEXT:    std r12, 272(r1)
+; CHECK-PWR7-NEXT:    std r12, 280(r1)
+; CHECK-PWR7-NEXT:    srawi r12, r19, 31
+; CHECK-PWR7-NEXT:    xor r0, r19, r12
+; CHECK-PWR7-NEXT:    ld r19, 344(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    sub r3, r0, r12
+; CHECK-PWR7-NEXT:    srawi r11, r9, 31
+; CHECK-PWR7-NEXT:    std r10, 200(r1)
+; CHECK-PWR7-NEXT:    xor r9, r9, r11
 ; CHECK-PWR7-NEXT:    sldi r3, r3, 56
-; CHECK-PWR7-NEXT:    std r11, 128(r1)
-; CHECK-PWR7-NEXT:    ld r17, 392(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    std r11, 136(r1)
-; CHECK-PWR7-NEXT:    std r9, 112(r1)
+; CHECK-PWR7-NEXT:    sub r9, r9, r11
+; CHECK-PWR7-NEXT:    std r3, 80(r1)
+; CHECK-PWR7-NEXT:    std r3, 88(r1)
+; CHECK-PWR7-NEXT:    sldi r9, r9, 56
+; CHECK-PWR7-NEXT:    std r9, 288(r1)
+; CHECK-PWR7-NEXT:    std r9, 296(r1)
+; CHECK-PWR7-NEXT:    srawi r9, r20, 31
+; CHECK-PWR7-NEXT:    xor r11, r20, r9
+; CHECK-PWR7-NEXT:    ld r20, 352(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    sub r4, r11, r9
+; CHECK-PWR7-NEXT:    sldi r3, r4, 56
 ; CHECK-PWR7-NEXT:    std r3, 64(r1)
 ; CHECK-PWR7-NEXT:    std r3, 72(r1)
-; CHECK-PWR7-NEXT:    addi r3, r1, 304
-; CHECK-PWR7-NEXT:    std r9, 120(r1)
-; CHECK-PWR7-NEXT:    ld r15, 376(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    std r7, 96(r1)
-; CHECK-PWR7-NEXT:    std r7, 104(r1)
-; CHECK-PWR7-NEXT:    std r5, 80(r1)
-; CHECK-PWR7-NEXT:    std r5, 88(r1)
-; CHECK-PWR7-NEXT:    lxvw4x v2, 0, r3
+; CHECK-PWR7-NEXT:    sub r3, r8, r7
+; CHECK-PWR7-NEXT:    sldi r3, r3, 56
+; CHECK-PWR7-NEXT:    std r3, 48(r1)
+; CHECK-PWR7-NEXT:    std r3, 56(r1)
 ; CHECK-PWR7-NEXT:    addi r3, r1, 288
-; CHECK-PWR7-NEXT:    lxvw4x v3, 0, r3
+; CHECK-PWR7-NEXT:    lxvw4x v2, 0, r3
 ; CHECK-PWR7-NEXT:    addi r3, r1, 272
-; CHECK-PWR7-NEXT:    ld r14, 368(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    vmrghb v2, v3, v2
 ; CHECK-PWR7-NEXT:    lxvw4x v3, 0, r3
 ; CHECK-PWR7-NEXT:    addi r3, r1, 256
-; CHECK-PWR7-NEXT:    lxvw4x v4, 0, r3
+; CHECK-PWR7-NEXT:    vmrghb v2, v3, v2
+; CHECK-PWR7-NEXT:    lxvw4x v3, 0, r3
 ; CHECK-PWR7-NEXT:    addi r3, r1, 240
+; CHECK-PWR7-NEXT:    lxvw4x v4, 0, r3
+; CHECK-PWR7-NEXT:    addi r3, r1, 224
 ; CHECK-PWR7-NEXT:    vmrghb v3, v4, v3
 ; CHECK-PWR7-NEXT:    vmrghh v2, v3, v2
 ; CHECK-PWR7-NEXT:    lxvw4x v3, 0, r3
-; CHECK-PWR7-NEXT:    addi r3, r1, 224
-; CHECK-PWR7-NEXT:    lxvw4x v4, 0, r3
 ; CHECK-PWR7-NEXT:    addi r3, r1, 208
-; CHECK-PWR7-NEXT:    vmrghb v3, v4, v3
 ; CHECK-PWR7-NEXT:    lxvw4x v4, 0, r3
 ; CHECK-PWR7-NEXT:    addi r3, r1, 192
-; CHECK-PWR7-NEXT:    lxvw4x v5, 0, r3
+; CHECK-PWR7-NEXT:    vmrghb v3, v4, v3
+; CHECK-PWR7-NEXT:    lxvw4x v4, 0, r3
 ; CHECK-PWR7-NEXT:    addi r3, r1, 176
+; CHECK-PWR7-NEXT:    lxvw4x v5, 0, r3
+; CHECK-PWR7-NEXT:    addi r3, r1, 160
 ; CHECK-PWR7-NEXT:    vmrghb v4, v5, v4
 ; CHECK-PWR7-NEXT:    vmrghh v3, v4, v3
 ; CHECK-PWR7-NEXT:    xxmrghw vs0, v3, v2
 ; CHECK-PWR7-NEXT:    lxvw4x v2, 0, r3
-; CHECK-PWR7-NEXT:    addi r3, r1, 160
-; CHECK-PWR7-NEXT:    lxvw4x v3, 0, r3
 ; CHECK-PWR7-NEXT:    addi r3, r1, 144
-; CHECK-PWR7-NEXT:    vmrghb v2, v3, v2
 ; CHECK-PWR7-NEXT:    lxvw4x v3, 0, r3
 ; CHECK-PWR7-NEXT:    addi r3, r1, 128
+; CHECK-PWR7-NEXT:    vmrghb v2, v3, v2
+; CHECK-PWR7-NEXT:    lxvw4x v3, 0, r3
+; CHECK-PWR7-NEXT:    addi r3, r1, 112
 ; CHECK-PWR7-NEXT:    lxvw4x v4, 0, r3
+; CHECK-PWR7-NEXT:    addi r3, r1, 96
 ; CHECK-PWR7-NEXT:    vmrghb v3, v4, v3
-; CHECK-PWR7-NEXT:    addi r3, r1, 112
 ; CHECK-PWR7-NEXT:    vmrghh v2, v3, v2
 ; CHECK-PWR7-NEXT:    lxvw4x v3, 0, r3
-; CHECK-PWR7-NEXT:    addi r3, r1, 96
-; CHECK-PWR7-NEXT:    lxvw4x v4, 0, r3
 ; CHECK-PWR7-NEXT:    addi r3, r1, 80
-; CHECK-PWR7-NEXT:    vmrghb v3, v4, v3
 ; CHECK-PWR7-NEXT:    lxvw4x v4, 0, r3
 ; CHECK-PWR7-NEXT:    addi r3, r1, 64
+; CHECK-PWR7-NEXT:    vmrghb v3, v4, v3
+; CHECK-PWR7-NEXT:    lxvw4x v4, 0, r3
+; CHECK-PWR7-NEXT:    addi r3, r1, 48
 ; CHECK-PWR7-NEXT:    lxvw4x v5, 0, r3
 ; CHECK-PWR7-NEXT:    vmrghb v4, v5, v4
 ; CHECK-PWR7-NEXT:    vmrghh v3, v4, v3
 ; CHECK-PWR7-NEXT:    xxmrghw vs1, v3, v2
 ; CHECK-PWR7-NEXT:    xxmrghd v2, vs1, vs0
-; CHECK-PWR7-NEXT:    addi r1, r1, 512
+; CHECK-PWR7-NEXT:    addi r1, r1, 448
 ; CHECK-PWR7-NEXT:    blr
 entry:
   %vecext = extractelement <16 x i8> %a, i32 0
diff --git a/llvm/test/CodeGen/PowerPC/recipest.ll b/llvm/test/CodeGen/PowerPC/recipest.ll
index 2598a410b8761..2d417129d674d 100644
--- a/llvm/test/CodeGen/PowerPC/recipest.ll
+++ b/llvm/test/CodeGen/PowerPC/recipest.ll
@@ -1024,12 +1024,12 @@ define <4 x float> @hoo3_fmf(<4 x float> %a) #1 {
 ; CHECK-P7-NEXT:    vslw 3, 3, 3
 ; CHECK-P7-NEXT:    lvx 0, 0, 3
 ; CHECK-P7-NEXT:    addis 3, 2, .LCPI25_1 at toc@ha
-; CHECK-P7-NEXT:    vmaddfp 5, 2, 4, 3
 ; CHECK-P7-NEXT:    addi 3, 3, .LCPI25_1 at toc@l
-; CHECK-P7-NEXT:    vmaddfp 4, 5, 4, 0
-; CHECK-P7-NEXT:    lvx 0, 0, 3
-; CHECK-P7-NEXT:    vmaddfp 5, 5, 0, 3
-; CHECK-P7-NEXT:    vmaddfp 3, 5, 4, 3
+; CHECK-P7-NEXT:    vmaddfp 5, 2, 4, 3
+; CHECK-P7-NEXT:    lvx 1, 0, 3
+; CHECK-P7-NEXT:    vmaddfp 0, 5, 0, 3
+; CHECK-P7-NEXT:    vmaddfp 4, 5, 4, 1
+; CHECK-P7-NEXT:    vmaddfp 3, 0, 4, 3
 ; CHECK-P7-NEXT:    vxor 4, 4, 4
 ; CHECK-P7-NEXT:    vcmpeqfp 2, 2, 4
 ; CHECK-P7-NEXT:    vnot 2, 2
diff --git a/llvm/test/CodeGen/RISCV/bfloat-convert.ll b/llvm/test/CodeGen/RISCV/bfloat-convert.ll
index 6207a17734d62..814f7c5fce336 100644
--- a/llvm/test/CodeGen/RISCV/bfloat-convert.ll
+++ b/llvm/test/CodeGen/RISCV/bfloat-convert.ll
@@ -480,12 +480,12 @@ define i64 @fcvt_l_bf16_sat(bfloat %a) nounwind {
 ; RV32IZFBFMIN-NEXT:    addi a2, a3, -1
 ; RV32IZFBFMIN-NEXT:  .LBB10_4: # %start
 ; RV32IZFBFMIN-NEXT:    feq.s a3, fs0, fs0
-; RV32IZFBFMIN-NEXT:    neg a4, a1
-; RV32IZFBFMIN-NEXT:    neg a1, s0
+; RV32IZFBFMIN-NEXT:    neg a4, s0
+; RV32IZFBFMIN-NEXT:    neg a5, a1
 ; RV32IZFBFMIN-NEXT:    neg a3, a3
-; RV32IZFBFMIN-NEXT:    and a0, a1, a0
+; RV32IZFBFMIN-NEXT:    and a0, a4, a0
 ; RV32IZFBFMIN-NEXT:    and a1, a3, a2
-; RV32IZFBFMIN-NEXT:    or a0, a4, a0
+; RV32IZFBFMIN-NEXT:    or a0, a5, a0
 ; RV32IZFBFMIN-NEXT:    and a0, a3, a0
 ; RV32IZFBFMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFBFMIN-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -519,12 +519,12 @@ define i64 @fcvt_l_bf16_sat(bfloat %a) nounwind {
 ; R32IDZFBFMIN-NEXT:    addi a2, a3, -1
 ; R32IDZFBFMIN-NEXT:  .LBB10_4: # %start
 ; R32IDZFBFMIN-NEXT:    feq.s a3, fs0, fs0
-; R32IDZFBFMIN-NEXT:    neg a4, a1
-; R32IDZFBFMIN-NEXT:    neg a1, s0
+; R32IDZFBFMIN-NEXT:    neg a4, s0
+; R32IDZFBFMIN-NEXT:    neg a5, a1
 ; R32IDZFBFMIN-NEXT:    neg a3, a3
-; R32IDZFBFMIN-NEXT:    and a0, a1, a0
+; R32IDZFBFMIN-NEXT:    and a0, a4, a0
 ; R32IDZFBFMIN-NEXT:    and a1, a3, a2
-; R32IDZFBFMIN-NEXT:    or a0, a4, a0
+; R32IDZFBFMIN-NEXT:    or a0, a5, a0
 ; R32IDZFBFMIN-NEXT:    and a0, a3, a0
 ; R32IDZFBFMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; R32IDZFBFMIN-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -560,12 +560,12 @@ define i64 @fcvt_l_bf16_sat(bfloat %a) nounwind {
 ; RV32ID-NEXT:    addi a2, a3, -1
 ; RV32ID-NEXT:  .LBB10_4: # %start
 ; RV32ID-NEXT:    feq.s a3, fs0, fs0
-; RV32ID-NEXT:    neg a4, a1
-; RV32ID-NEXT:    neg a1, s0
+; RV32ID-NEXT:    neg a4, s0
+; RV32ID-NEXT:    neg a5, a1
 ; RV32ID-NEXT:    neg a3, a3
-; RV32ID-NEXT:    and a0, a1, a0
+; RV32ID-NEXT:    and a0, a4, a0
 ; RV32ID-NEXT:    and a1, a3, a2
-; RV32ID-NEXT:    or a0, a4, a0
+; RV32ID-NEXT:    or a0, a5, a0
 ; RV32ID-NEXT:    and a0, a3, a0
 ; RV32ID-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index facb544fb52b6..0c21b69310bd0 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -2167,12 +2167,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IZFH-NEXT:    addi a2, a3, -1
 ; RV32IZFH-NEXT:  .LBB10_4: # %start
 ; RV32IZFH-NEXT:    feq.s a3, fs0, fs0
-; RV32IZFH-NEXT:    neg a4, a1
-; RV32IZFH-NEXT:    neg a1, s0
+; RV32IZFH-NEXT:    neg a4, s0
+; RV32IZFH-NEXT:    neg a5, a1
 ; RV32IZFH-NEXT:    neg a3, a3
-; RV32IZFH-NEXT:    and a0, a1, a0
+; RV32IZFH-NEXT:    and a0, a4, a0
 ; RV32IZFH-NEXT:    and a1, a3, a2
-; RV32IZFH-NEXT:    or a0, a4, a0
+; RV32IZFH-NEXT:    or a0, a5, a0
 ; RV32IZFH-NEXT:    and a0, a3, a0
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -2215,12 +2215,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IDZFH-NEXT:    addi a2, a3, -1
 ; RV32IDZFH-NEXT:  .LBB10_4: # %start
 ; RV32IDZFH-NEXT:    feq.s a3, fs0, fs0
-; RV32IDZFH-NEXT:    neg a4, a1
-; RV32IDZFH-NEXT:    neg a1, s0
+; RV32IDZFH-NEXT:    neg a4, s0
+; RV32IDZFH-NEXT:    neg a5, a1
 ; RV32IDZFH-NEXT:    neg a3, a3
-; RV32IDZFH-NEXT:    and a0, a1, a0
+; RV32IDZFH-NEXT:    and a0, a4, a0
 ; RV32IDZFH-NEXT:    and a1, a3, a2
-; RV32IDZFH-NEXT:    or a0, a4, a0
+; RV32IDZFH-NEXT:    or a0, a5, a0
 ; RV32IDZFH-NEXT:    and a0, a3, a0
 ; RV32IDZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IDZFH-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -2460,12 +2460,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32ID-ILP32-NEXT:    addi a2, a3, -1
 ; RV32ID-ILP32-NEXT:  .LBB10_4: # %start
 ; RV32ID-ILP32-NEXT:    feq.s a3, fa5, fa5
-; RV32ID-ILP32-NEXT:    neg a4, a1
-; RV32ID-ILP32-NEXT:    neg a1, s0
+; RV32ID-ILP32-NEXT:    neg a4, s0
+; RV32ID-ILP32-NEXT:    neg a5, a1
 ; RV32ID-ILP32-NEXT:    neg a3, a3
-; RV32ID-ILP32-NEXT:    and a0, a1, a0
+; RV32ID-ILP32-NEXT:    and a0, a4, a0
 ; RV32ID-ILP32-NEXT:    and a1, a3, a2
-; RV32ID-ILP32-NEXT:    or a0, a4, a0
+; RV32ID-ILP32-NEXT:    or a0, a5, a0
 ; RV32ID-ILP32-NEXT:    and a0, a3, a0
 ; RV32ID-ILP32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32ID-ILP32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -2566,12 +2566,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IFZFHMIN-NEXT:    addi a2, a3, -1
 ; RV32IFZFHMIN-NEXT:  .LBB10_4: # %start
 ; RV32IFZFHMIN-NEXT:    feq.s a3, fs0, fs0
-; RV32IFZFHMIN-NEXT:    neg a4, a1
-; RV32IFZFHMIN-NEXT:    neg a1, s0
+; RV32IFZFHMIN-NEXT:    neg a4, s0
+; RV32IFZFHMIN-NEXT:    neg a5, a1
 ; RV32IFZFHMIN-NEXT:    neg a3, a3
-; RV32IFZFHMIN-NEXT:    and a0, a1, a0
+; RV32IFZFHMIN-NEXT:    and a0, a4, a0
 ; RV32IFZFHMIN-NEXT:    and a1, a3, a2
-; RV32IFZFHMIN-NEXT:    or a0, a4, a0
+; RV32IFZFHMIN-NEXT:    or a0, a5, a0
 ; RV32IFZFHMIN-NEXT:    and a0, a3, a0
 ; RV32IFZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFZFHMIN-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -2615,12 +2615,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IDZFHMIN-NEXT:    addi a2, a3, -1
 ; RV32IDZFHMIN-NEXT:  .LBB10_4: # %start
 ; RV32IDZFHMIN-NEXT:    feq.s a3, fs0, fs0
-; RV32IDZFHMIN-NEXT:    neg a4, a1
-; RV32IDZFHMIN-NEXT:    neg a1, s0
+; RV32IDZFHMIN-NEXT:    neg a4, s0
+; RV32IDZFHMIN-NEXT:    neg a5, a1
 ; RV32IDZFHMIN-NEXT:    neg a3, a3
-; RV32IDZFHMIN-NEXT:    and a0, a1, a0
+; RV32IDZFHMIN-NEXT:    and a0, a4, a0
 ; RV32IDZFHMIN-NEXT:    and a1, a3, a2
-; RV32IDZFHMIN-NEXT:    or a0, a4, a0
+; RV32IDZFHMIN-NEXT:    or a0, a5, a0
 ; RV32IDZFHMIN-NEXT:    and a0, a3, a0
 ; RV32IDZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IDZFHMIN-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
index a06c7505d543d..87c8343a417cd 100644
--- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
+++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
@@ -7,18 +7,18 @@
 define i32 @ctz_nxv4i32(<vscale x 4 x i32> %a) #0 {
 ; RV32-LABEL: ctz_nxv4i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; RV32-NEXT:    vid.v v10
-; RV32-NEXT:    li a1, -1
+; RV32-NEXT:    vmv.v.i v11, -1
+; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; RV32-NEXT:    vmsne.vi v0, v8, 0
 ; RV32-NEXT:    srli a0, a0, 1
 ; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; RV32-NEXT:    vmv.v.x v8, a0
-; RV32-NEXT:    vmadd.vx v10, a1, v8
-; RV32-NEXT:    vmv.v.i v8, 0
-; RV32-NEXT:    vmerge.vvm v8, v8, v10, v0
+; RV32-NEXT:    vmacc.vv v8, v10, v11
+; RV32-NEXT:    vmv.v.i v9, 0
+; RV32-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; RV32-NEXT:    vredmaxu.vs v8, v8, v8
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    sub a0, a0, a1
@@ -28,18 +28,18 @@ define i32 @ctz_nxv4i32(<vscale x 4 x i32> %a) #0 {
 ;
 ; RV64-LABEL: ctz_nxv4i32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    csrr a0, vlenb
-; RV64-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; RV64-NEXT:    vid.v v10
-; RV64-NEXT:    li a1, -1
+; RV64-NEXT:    vmv.v.i v11, -1
+; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; RV64-NEXT:    vmsne.vi v0, v8, 0
 ; RV64-NEXT:    srli a0, a0, 1
 ; RV64-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; RV64-NEXT:    vmv.v.x v8, a0
-; RV64-NEXT:    vmadd.vx v10, a1, v8
-; RV64-NEXT:    vmv.v.i v8, 0
-; RV64-NEXT:    vmerge.vvm v8, v8, v10, v0
+; RV64-NEXT:    vmacc.vv v8, v10, v11
+; RV64-NEXT:    vmv.v.i v9, 0
+; RV64-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; RV64-NEXT:    vredmaxu.vs v8, v8, v8
 ; RV64-NEXT:    vmv.x.s a1, v8
 ; RV64-NEXT:    sub a0, a0, a1
@@ -109,17 +109,17 @@ define i64 @ctz_nxv8i1_no_range(<vscale x 8 x i16> %a) {
 ;
 ; RV64-LABEL: ctz_nxv8i1_no_range:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    csrr a0, vlenb
-; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV64-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64-NEXT:    vid.v v16
-; RV64-NEXT:    li a1, -1
+; RV64-NEXT:    vmv.v.i v24, -1
+; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; RV64-NEXT:    vmsne.vi v0, v8, 0
 ; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64-NEXT:    vmv.v.x v8, a0
-; RV64-NEXT:    vmadd.vx v16, a1, v8
-; RV64-NEXT:    vmv.v.i v8, 0
-; RV64-NEXT:    vmerge.vvm v8, v8, v16, v0
+; RV64-NEXT:    vmacc.vv v8, v16, v24
+; RV64-NEXT:    vmv.v.i v16, 0
+; RV64-NEXT:    vmerge.vvm v8, v16, v8, v0
 ; RV64-NEXT:    vredmaxu.vs v8, v8, v8
 ; RV64-NEXT:    vmv.x.s a1, v8
 ; RV64-NEXT:    sub a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll
index 57061e1bde83a..53e439836faf4 100644
--- a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll
@@ -203,26 +203,26 @@ define i128 @fptosi_sat_f16_to_i128(half %a) nounwind {
 ; RV64IZFH-NEXT:    fle.s s0, fa5, fs0
 ; RV64IZFH-NEXT:    fmv.s fa0, fs0
 ; RV64IZFH-NEXT:    call __fixsfti
-; RV64IZFH-NEXT:    li a2, -1
+; RV64IZFH-NEXT:    li a3, -1
 ; RV64IZFH-NEXT:    bnez s0, .LBB4_2
 ; RV64IZFH-NEXT:  # %bb.1:
-; RV64IZFH-NEXT:    slli a1, a2, 63
+; RV64IZFH-NEXT:    slli a1, a3, 63
 ; RV64IZFH-NEXT:  .LBB4_2:
-; RV64IZFH-NEXT:    lui a3, %hi(.LCPI4_0)
-; RV64IZFH-NEXT:    flw fa5, %lo(.LCPI4_0)(a3)
-; RV64IZFH-NEXT:    flt.s a3, fa5, fs0
-; RV64IZFH-NEXT:    beqz a3, .LBB4_4
+; RV64IZFH-NEXT:    lui a2, %hi(.LCPI4_0)
+; RV64IZFH-NEXT:    flw fa5, %lo(.LCPI4_0)(a2)
+; RV64IZFH-NEXT:    flt.s a2, fa5, fs0
+; RV64IZFH-NEXT:    beqz a2, .LBB4_4
 ; RV64IZFH-NEXT:  # %bb.3:
-; RV64IZFH-NEXT:    srli a1, a2, 1
+; RV64IZFH-NEXT:    srli a1, a3, 1
 ; RV64IZFH-NEXT:  .LBB4_4:
-; RV64IZFH-NEXT:    feq.s a2, fs0, fs0
-; RV64IZFH-NEXT:    neg a3, a3
+; RV64IZFH-NEXT:    feq.s a3, fs0, fs0
 ; RV64IZFH-NEXT:    neg a4, s0
 ; RV64IZFH-NEXT:    neg a2, a2
+; RV64IZFH-NEXT:    neg a3, a3
 ; RV64IZFH-NEXT:    and a0, a4, a0
-; RV64IZFH-NEXT:    and a1, a2, a1
-; RV64IZFH-NEXT:    or a0, a3, a0
-; RV64IZFH-NEXT:    and a0, a2, a0
+; RV64IZFH-NEXT:    and a1, a3, a1
+; RV64IZFH-NEXT:    or a0, a2, a0
+; RV64IZFH-NEXT:    and a0, a3, a0
 ; RV64IZFH-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll b/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll
index cd7f30d8f5898..32753ca382fc7 100644
--- a/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll
+++ b/llvm/test/CodeGen/RISCV/wide-scalar-shift-legalization.ll
@@ -716,101 +716,92 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV32I-NEXT:    slli a4, a4, 8
 ; RV32I-NEXT:    slli a5, a5, 16
 ; RV32I-NEXT:    slli a6, a6, 24
-; RV32I-NEXT:    or a3, a4, a3
-; RV32I-NEXT:    or a4, a6, a5
-; RV32I-NEXT:    lbu a5, 8(a0)
-; RV32I-NEXT:    lbu a6, 9(a0)
-; RV32I-NEXT:    lbu t3, 10(a0)
-; RV32I-NEXT:    lbu t4, 11(a0)
 ; RV32I-NEXT:    slli t0, t0, 8
+; RV32I-NEXT:    or a4, a4, a3
+; RV32I-NEXT:    or a5, a6, a5
+; RV32I-NEXT:    or a3, t0, a7
+; RV32I-NEXT:    lbu a6, 8(a0)
+; RV32I-NEXT:    lbu a7, 9(a0)
+; RV32I-NEXT:    lbu t0, 10(a0)
+; RV32I-NEXT:    lbu t3, 11(a0)
 ; RV32I-NEXT:    slli t1, t1, 16
 ; RV32I-NEXT:    slli t2, t2, 24
-; RV32I-NEXT:    slli a6, a6, 8
-; RV32I-NEXT:    or a7, t0, a7
-; RV32I-NEXT:    or t0, t2, t1
-; RV32I-NEXT:    or a5, a6, a5
-; RV32I-NEXT:    lbu a6, 12(a0)
-; RV32I-NEXT:    lbu t1, 13(a0)
-; RV32I-NEXT:    lbu t2, 14(a0)
-; RV32I-NEXT:    lbu a0, 15(a0)
-; RV32I-NEXT:    slli t3, t3, 16
-; RV32I-NEXT:    slli t4, t4, 24
-; RV32I-NEXT:    slli t1, t1, 8
-; RV32I-NEXT:    slli t2, t2, 16
-; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    or t3, t4, t3
-; RV32I-NEXT:    or a6, t1, a6
-; RV32I-NEXT:    or a0, a0, t2
-; RV32I-NEXT:    lbu t1, 1(a1)
-; RV32I-NEXT:    lbu t2, 0(a1)
-; RV32I-NEXT:    lbu t4, 2(a1)
-; RV32I-NEXT:    lbu a1, 3(a1)
-; RV32I-NEXT:    slli t1, t1, 8
-; RV32I-NEXT:    or t1, t1, t2
+; RV32I-NEXT:    slli a7, a7, 8
+; RV32I-NEXT:    slli t0, t0, 16
+; RV32I-NEXT:    slli t3, t3, 24
+; RV32I-NEXT:    or t1, t2, t1
+; RV32I-NEXT:    or a6, a7, a6
+; RV32I-NEXT:    or a7, t3, t0
+; RV32I-NEXT:    lbu t0, 12(a0)
+; RV32I-NEXT:    lbu t2, 13(a0)
+; RV32I-NEXT:    lbu t3, 14(a0)
+; RV32I-NEXT:    lbu t4, 15(a0)
+; RV32I-NEXT:    lbu a0, 0(a1)
 ; RV32I-NEXT:    sw zero, 16(sp)
 ; RV32I-NEXT:    sw zero, 20(sp)
 ; RV32I-NEXT:    sw zero, 24(sp)
 ; RV32I-NEXT:    sw zero, 28(sp)
-; RV32I-NEXT:    slli t4, t4, 16
-; RV32I-NEXT:    slli a1, a1, 24
-; RV32I-NEXT:    or a1, a1, t4
-; RV32I-NEXT:    mv t2, sp
-; RV32I-NEXT:    or a3, a4, a3
-; RV32I-NEXT:    or a4, t0, a7
-; RV32I-NEXT:    or a5, t3, a5
-; RV32I-NEXT:    or a0, a0, a6
-; RV32I-NEXT:    or a1, a1, t1
-; RV32I-NEXT:    sw a3, 0(sp)
-; RV32I-NEXT:    sw a4, 4(sp)
-; RV32I-NEXT:    sw a5, 8(sp)
-; RV32I-NEXT:    sw a0, 12(sp)
-; RV32I-NEXT:    srli a0, a1, 3
-; RV32I-NEXT:    andi a3, a1, 31
-; RV32I-NEXT:    andi a0, a0, 12
-; RV32I-NEXT:    xori a3, a3, 31
-; RV32I-NEXT:    add a0, t2, a0
-; RV32I-NEXT:    lw a4, 4(a0)
-; RV32I-NEXT:    lw a5, 8(a0)
-; RV32I-NEXT:    lw a6, 0(a0)
-; RV32I-NEXT:    lw a0, 12(a0)
-; RV32I-NEXT:    srl a7, a4, a1
-; RV32I-NEXT:    slli t0, a5, 1
-; RV32I-NEXT:    srl a6, a6, a1
-; RV32I-NEXT:    slli a4, a4, 1
-; RV32I-NEXT:    srl a5, a5, a1
-; RV32I-NEXT:    slli t1, a0, 1
-; RV32I-NEXT:    srl a0, a0, a1
-; RV32I-NEXT:    sll a1, t0, a3
-; RV32I-NEXT:    sll a4, a4, a3
-; RV32I-NEXT:    sll a3, t1, a3
+; RV32I-NEXT:    slli t2, t2, 8
+; RV32I-NEXT:    or a1, t2, t0
+; RV32I-NEXT:    mv t0, sp
+; RV32I-NEXT:    slli t3, t3, 16
+; RV32I-NEXT:    slli t4, t4, 24
+; RV32I-NEXT:    or t2, t4, t3
+; RV32I-NEXT:    srli t3, a0, 3
+; RV32I-NEXT:    or a4, a5, a4
+; RV32I-NEXT:    andi a5, a0, 31
+; RV32I-NEXT:    andi t3, t3, 12
+; RV32I-NEXT:    xori a5, a5, 31
+; RV32I-NEXT:    or a3, t1, a3
+; RV32I-NEXT:    or a6, a7, a6
+; RV32I-NEXT:    or a1, t2, a1
+; RV32I-NEXT:    add t0, t0, t3
+; RV32I-NEXT:    sw a4, 0(sp)
+; RV32I-NEXT:    sw a3, 4(sp)
+; RV32I-NEXT:    sw a6, 8(sp)
+; RV32I-NEXT:    sw a1, 12(sp)
+; RV32I-NEXT:    lw a1, 4(t0)
+; RV32I-NEXT:    lw a3, 8(t0)
+; RV32I-NEXT:    lw a4, 0(t0)
+; RV32I-NEXT:    lw a6, 12(t0)
+; RV32I-NEXT:    srl a7, a1, a0
+; RV32I-NEXT:    slli t0, a3, 1
+; RV32I-NEXT:    srl a4, a4, a0
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srl a3, a3, a0
+; RV32I-NEXT:    slli t1, a6, 1
+; RV32I-NEXT:    srl a0, a6, a0
+; RV32I-NEXT:    sll a6, t0, a5
+; RV32I-NEXT:    sll a1, a1, a5
+; RV32I-NEXT:    sll a5, t1, a5
 ; RV32I-NEXT:    srli t0, a0, 16
 ; RV32I-NEXT:    srli t1, a0, 24
 ; RV32I-NEXT:    srli t2, a0, 8
-; RV32I-NEXT:    or a1, a7, a1
-; RV32I-NEXT:    or a4, a6, a4
-; RV32I-NEXT:    or a3, a5, a3
+; RV32I-NEXT:    or a6, a7, a6
+; RV32I-NEXT:    or a1, a4, a1
+; RV32I-NEXT:    or a3, a3, a5
 ; RV32I-NEXT:    sb a0, 12(a2)
 ; RV32I-NEXT:    sb t2, 13(a2)
 ; RV32I-NEXT:    sb t0, 14(a2)
 ; RV32I-NEXT:    sb t1, 15(a2)
 ; RV32I-NEXT:    srli a0, a3, 16
-; RV32I-NEXT:    srli a5, a3, 24
-; RV32I-NEXT:    srli a6, a3, 8
-; RV32I-NEXT:    srli a7, a4, 16
-; RV32I-NEXT:    srli t0, a4, 24
-; RV32I-NEXT:    srli t1, a4, 8
-; RV32I-NEXT:    srli t2, a1, 16
-; RV32I-NEXT:    srli t3, a1, 24
+; RV32I-NEXT:    srli a4, a3, 24
+; RV32I-NEXT:    srli a5, a3, 8
+; RV32I-NEXT:    srli a7, a1, 16
+; RV32I-NEXT:    srli t0, a1, 24
+; RV32I-NEXT:    srli t1, a1, 8
+; RV32I-NEXT:    srli t2, a6, 16
+; RV32I-NEXT:    srli t3, a6, 24
 ; RV32I-NEXT:    sb a3, 8(a2)
-; RV32I-NEXT:    sb a6, 9(a2)
+; RV32I-NEXT:    sb a5, 9(a2)
 ; RV32I-NEXT:    sb a0, 10(a2)
-; RV32I-NEXT:    sb a5, 11(a2)
-; RV32I-NEXT:    srli a0, a1, 8
-; RV32I-NEXT:    sb a4, 0(a2)
+; RV32I-NEXT:    sb a4, 11(a2)
+; RV32I-NEXT:    srli a0, a6, 8
+; RV32I-NEXT:    sb a1, 0(a2)
 ; RV32I-NEXT:    sb t1, 1(a2)
 ; RV32I-NEXT:    sb a7, 2(a2)
 ; RV32I-NEXT:    sb t0, 3(a2)
-; RV32I-NEXT:    sb a1, 4(a2)
+; RV32I-NEXT:    sb a6, 4(a2)
 ; RV32I-NEXT:    sb a0, 5(a2)
 ; RV32I-NEXT:    sb t2, 6(a2)
 ; RV32I-NEXT:    sb t3, 7(a2)
@@ -952,102 +943,93 @@ define void @shl_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV32I-NEXT:    slli a4, a4, 8
 ; RV32I-NEXT:    slli a5, a5, 16
 ; RV32I-NEXT:    slli a6, a6, 24
-; RV32I-NEXT:    or a3, a4, a3
-; RV32I-NEXT:    or a4, a6, a5
-; RV32I-NEXT:    lbu a5, 8(a0)
-; RV32I-NEXT:    lbu a6, 9(a0)
-; RV32I-NEXT:    lbu t3, 10(a0)
-; RV32I-NEXT:    lbu t4, 11(a0)
 ; RV32I-NEXT:    slli t0, t0, 8
+; RV32I-NEXT:    or a4, a4, a3
+; RV32I-NEXT:    or a5, a6, a5
+; RV32I-NEXT:    or a3, t0, a7
+; RV32I-NEXT:    lbu a6, 8(a0)
+; RV32I-NEXT:    lbu a7, 9(a0)
+; RV32I-NEXT:    lbu t0, 10(a0)
+; RV32I-NEXT:    lbu t3, 11(a0)
 ; RV32I-NEXT:    slli t1, t1, 16
 ; RV32I-NEXT:    slli t2, t2, 24
-; RV32I-NEXT:    slli a6, a6, 8
-; RV32I-NEXT:    or a7, t0, a7
-; RV32I-NEXT:    or t0, t2, t1
-; RV32I-NEXT:    or a5, a6, a5
-; RV32I-NEXT:    lbu a6, 12(a0)
-; RV32I-NEXT:    lbu t1, 13(a0)
-; RV32I-NEXT:    lbu t2, 14(a0)
-; RV32I-NEXT:    lbu a0, 15(a0)
-; RV32I-NEXT:    slli t3, t3, 16
-; RV32I-NEXT:    slli t4, t4, 24
-; RV32I-NEXT:    slli t1, t1, 8
-; RV32I-NEXT:    slli t2, t2, 16
-; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    or t3, t4, t3
-; RV32I-NEXT:    or a6, t1, a6
-; RV32I-NEXT:    or a0, a0, t2
-; RV32I-NEXT:    lbu t1, 1(a1)
-; RV32I-NEXT:    lbu t2, 0(a1)
-; RV32I-NEXT:    lbu t4, 2(a1)
-; RV32I-NEXT:    lbu a1, 3(a1)
-; RV32I-NEXT:    slli t1, t1, 8
-; RV32I-NEXT:    or t1, t1, t2
+; RV32I-NEXT:    slli a7, a7, 8
+; RV32I-NEXT:    slli t0, t0, 16
+; RV32I-NEXT:    slli t3, t3, 24
+; RV32I-NEXT:    or t1, t2, t1
+; RV32I-NEXT:    or a6, a7, a6
+; RV32I-NEXT:    or a7, t3, t0
+; RV32I-NEXT:    lbu t0, 12(a0)
+; RV32I-NEXT:    lbu t2, 13(a0)
+; RV32I-NEXT:    lbu t3, 14(a0)
+; RV32I-NEXT:    lbu t4, 15(a0)
+; RV32I-NEXT:    lbu a0, 0(a1)
 ; RV32I-NEXT:    sw zero, 0(sp)
 ; RV32I-NEXT:    sw zero, 4(sp)
 ; RV32I-NEXT:    sw zero, 8(sp)
 ; RV32I-NEXT:    sw zero, 12(sp)
-; RV32I-NEXT:    slli t4, t4, 16
-; RV32I-NEXT:    slli a1, a1, 24
-; RV32I-NEXT:    or a1, a1, t4
-; RV32I-NEXT:    addi t2, sp, 16
+; RV32I-NEXT:    slli t2, t2, 8
+; RV32I-NEXT:    or a1, t2, t0
+; RV32I-NEXT:    addi t0, sp, 16
+; RV32I-NEXT:    slli t3, t3, 16
+; RV32I-NEXT:    slli t4, t4, 24
+; RV32I-NEXT:    or t2, t4, t3
+; RV32I-NEXT:    srli t3, a0, 3
+; RV32I-NEXT:    or a4, a5, a4
+; RV32I-NEXT:    andi a5, a0, 31
+; RV32I-NEXT:    andi t3, t3, 12
+; RV32I-NEXT:    or a3, t1, a3
+; RV32I-NEXT:    or a6, a7, a6
+; RV32I-NEXT:    or a1, t2, a1
+; RV32I-NEXT:    sub a7, t0, t3
+; RV32I-NEXT:    sw a4, 16(sp)
+; RV32I-NEXT:    sw a3, 20(sp)
+; RV32I-NEXT:    sw a6, 24(sp)
+; RV32I-NEXT:    sw a1, 28(sp)
+; RV32I-NEXT:    lw a1, 0(a7)
+; RV32I-NEXT:    lw a3, 4(a7)
+; RV32I-NEXT:    lw a4, 8(a7)
+; RV32I-NEXT:    lw a6, 12(a7)
+; RV32I-NEXT:    xori a5, a5, 31
+; RV32I-NEXT:    sll a7, a3, a0
+; RV32I-NEXT:    srli t0, a1, 1
+; RV32I-NEXT:    sll a6, a6, a0
+; RV32I-NEXT:    srli t1, a4, 1
+; RV32I-NEXT:    sll a4, a4, a0
+; RV32I-NEXT:    srli a3, a3, 1
+; RV32I-NEXT:    sll a0, a1, a0
+; RV32I-NEXT:    srl a1, t0, a5
+; RV32I-NEXT:    srl t0, t1, a5
+; RV32I-NEXT:    srl a3, a3, a5
+; RV32I-NEXT:    srli a5, a0, 16
+; RV32I-NEXT:    srli t1, a0, 24
+; RV32I-NEXT:    srli t2, a0, 8
+; RV32I-NEXT:    or a1, a7, a1
+; RV32I-NEXT:    or a6, a6, t0
 ; RV32I-NEXT:    or a3, a4, a3
-; RV32I-NEXT:    or a4, t0, a7
-; RV32I-NEXT:    or a5, t3, a5
-; RV32I-NEXT:    or a0, a0, a6
-; RV32I-NEXT:    or a1, a1, t1
-; RV32I-NEXT:    sw a3, 16(sp)
-; RV32I-NEXT:    sw a4, 20(sp)
-; RV32I-NEXT:    sw a5, 24(sp)
-; RV32I-NEXT:    sw a0, 28(sp)
-; RV32I-NEXT:    srli a0, a1, 3
-; RV32I-NEXT:    andi a3, a1, 31
-; RV32I-NEXT:    andi a0, a0, 12
-; RV32I-NEXT:    sub a0, t2, a0
-; RV32I-NEXT:    lw a4, 0(a0)
-; RV32I-NEXT:    lw a5, 4(a0)
-; RV32I-NEXT:    lw a6, 8(a0)
-; RV32I-NEXT:    lw a0, 12(a0)
-; RV32I-NEXT:    xori a3, a3, 31
-; RV32I-NEXT:    sll a7, a5, a1
-; RV32I-NEXT:    srli t0, a4, 1
-; RV32I-NEXT:    sll a0, a0, a1
-; RV32I-NEXT:    srli t1, a6, 1
-; RV32I-NEXT:    sll a6, a6, a1
-; RV32I-NEXT:    srli a5, a5, 1
-; RV32I-NEXT:    sll a1, a4, a1
-; RV32I-NEXT:    srl a4, t0, a3
-; RV32I-NEXT:    srl t0, t1, a3
-; RV32I-NEXT:    srl a3, a5, a3
-; RV32I-NEXT:    srli a5, a1, 16
-; RV32I-NEXT:    srli t1, a1, 24
-; RV32I-NEXT:    srli t2, a1, 8
-; RV32I-NEXT:    or a4, a7, a4
-; RV32I-NEXT:    or a0, a0, t0
-; RV32I-NEXT:    or a3, a6, a3
-; RV32I-NEXT:    sb a1, 0(a2)
+; RV32I-NEXT:    sb a0, 0(a2)
 ; RV32I-NEXT:    sb t2, 1(a2)
 ; RV32I-NEXT:    sb a5, 2(a2)
 ; RV32I-NEXT:    sb t1, 3(a2)
-; RV32I-NEXT:    srli a1, a3, 16
-; RV32I-NEXT:    srli a5, a3, 24
-; RV32I-NEXT:    srli a6, a3, 8
-; RV32I-NEXT:    srli a7, a0, 16
-; RV32I-NEXT:    srli t0, a0, 24
-; RV32I-NEXT:    srli t1, a0, 8
-; RV32I-NEXT:    srli t2, a4, 16
-; RV32I-NEXT:    srli t3, a4, 24
+; RV32I-NEXT:    srli a0, a3, 16
+; RV32I-NEXT:    srli a4, a3, 24
+; RV32I-NEXT:    srli a5, a3, 8
+; RV32I-NEXT:    srli a7, a6, 16
+; RV32I-NEXT:    srli t0, a6, 24
+; RV32I-NEXT:    srli t1, a6, 8
+; RV32I-NEXT:    srli t2, a1, 16
+; RV32I-NEXT:    srli t3, a1, 24
 ; RV32I-NEXT:    sb a3, 8(a2)
-; RV32I-NEXT:    sb a6, 9(a2)
-; RV32I-NEXT:    sb a1, 10(a2)
-; RV32I-NEXT:    sb a5, 11(a2)
-; RV32I-NEXT:    srli a1, a4, 8
-; RV32I-NEXT:    sb a0, 12(a2)
+; RV32I-NEXT:    sb a5, 9(a2)
+; RV32I-NEXT:    sb a0, 10(a2)
+; RV32I-NEXT:    sb a4, 11(a2)
+; RV32I-NEXT:    srli a0, a1, 8
+; RV32I-NEXT:    sb a6, 12(a2)
 ; RV32I-NEXT:    sb t1, 13(a2)
 ; RV32I-NEXT:    sb a7, 14(a2)
 ; RV32I-NEXT:    sb t0, 15(a2)
-; RV32I-NEXT:    sb a4, 4(a2)
-; RV32I-NEXT:    sb a1, 5(a2)
+; RV32I-NEXT:    sb a1, 4(a2)
+; RV32I-NEXT:    sb a0, 5(a2)
 ; RV32I-NEXT:    sb t2, 6(a2)
 ; RV32I-NEXT:    sb t3, 7(a2)
 ; RV32I-NEXT:    addi sp, sp, 32
@@ -1186,82 +1168,73 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV32I-NEXT:    lbu t1, 6(a0)
 ; RV32I-NEXT:    lbu t2, 7(a0)
 ; RV32I-NEXT:    slli a4, a4, 8
-; RV32I-NEXT:    or a3, a4, a3
-; RV32I-NEXT:    lbu a4, 8(a0)
-; RV32I-NEXT:    lbu t3, 9(a0)
-; RV32I-NEXT:    lbu t4, 10(a0)
-; RV32I-NEXT:    lbu t5, 11(a0)
 ; RV32I-NEXT:    slli a5, a5, 16
 ; RV32I-NEXT:    slli a6, a6, 24
 ; RV32I-NEXT:    slli t0, t0, 8
+; RV32I-NEXT:    or a3, a4, a3
+; RV32I-NEXT:    or a4, a6, a5
+; RV32I-NEXT:    or a5, t0, a7
+; RV32I-NEXT:    lbu a6, 8(a0)
+; RV32I-NEXT:    lbu a7, 9(a0)
+; RV32I-NEXT:    lbu t0, 10(a0)
+; RV32I-NEXT:    lbu t3, 11(a0)
 ; RV32I-NEXT:    slli t1, t1, 16
 ; RV32I-NEXT:    slli t2, t2, 24
-; RV32I-NEXT:    or a5, a6, a5
-; RV32I-NEXT:    or a6, t0, a7
-; RV32I-NEXT:    or a7, t2, t1
+; RV32I-NEXT:    slli a7, a7, 8
+; RV32I-NEXT:    slli t0, t0, 16
+; RV32I-NEXT:    slli t3, t3, 24
+; RV32I-NEXT:    or t1, t2, t1
+; RV32I-NEXT:    or a6, a7, a6
+; RV32I-NEXT:    or a7, t3, t0
 ; RV32I-NEXT:    lbu t0, 12(a0)
-; RV32I-NEXT:    lbu t1, 13(a0)
-; RV32I-NEXT:    lbu t2, 14(a0)
-; RV32I-NEXT:    lbu a0, 15(a0)
-; RV32I-NEXT:    slli t3, t3, 8
-; RV32I-NEXT:    slli t4, t4, 16
-; RV32I-NEXT:    slli t5, t5, 24
-; RV32I-NEXT:    slli t1, t1, 8
-; RV32I-NEXT:    or a4, t3, a4
-; RV32I-NEXT:    or t3, t5, t4
-; RV32I-NEXT:    or t0, t1, t0
-; RV32I-NEXT:    lbu t1, 1(a1)
-; RV32I-NEXT:    lbu t4, 0(a1)
-; RV32I-NEXT:    lbu t5, 2(a1)
-; RV32I-NEXT:    lbu a1, 3(a1)
-; RV32I-NEXT:    slli t1, t1, 8
-; RV32I-NEXT:    or t1, t1, t4
-; RV32I-NEXT:    slli t5, t5, 16
-; RV32I-NEXT:    slli a1, a1, 24
-; RV32I-NEXT:    or a1, a1, t5
-; RV32I-NEXT:    or a3, a5, a3
-; RV32I-NEXT:    mv a5, sp
-; RV32I-NEXT:    slli t2, t2, 16
-; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    or t2, a0, t2
-; RV32I-NEXT:    srai a0, a0, 31
+; RV32I-NEXT:    lbu t2, 13(a0)
+; RV32I-NEXT:    lbu t3, 14(a0)
+; RV32I-NEXT:    lbu t4, 15(a0)
+; RV32I-NEXT:    lbu a0, 0(a1)
+; RV32I-NEXT:    slli t2, t2, 8
+; RV32I-NEXT:    or a1, t2, t0
+; RV32I-NEXT:    mv t0, sp
+; RV32I-NEXT:    slli t3, t3, 16
+; RV32I-NEXT:    slli t4, t4, 24
+; RV32I-NEXT:    or a3, a4, a3
+; RV32I-NEXT:    srli a4, a0, 3
+; RV32I-NEXT:    or a5, t1, a5
+; RV32I-NEXT:    andi t1, a0, 31
+; RV32I-NEXT:    or t2, t4, t3
+; RV32I-NEXT:    srai t3, t4, 31
+; RV32I-NEXT:    andi a4, a4, 12
+; RV32I-NEXT:    xori t1, t1, 31
 ; RV32I-NEXT:    or a6, a7, a6
-; RV32I-NEXT:    or a4, t3, a4
-; RV32I-NEXT:    or a7, t2, t0
-; RV32I-NEXT:    or a1, a1, t1
-; RV32I-NEXT:    sw a0, 16(sp)
-; RV32I-NEXT:    sw a0, 20(sp)
-; RV32I-NEXT:    sw a0, 24(sp)
-; RV32I-NEXT:    sw a0, 28(sp)
+; RV32I-NEXT:    or a1, t2, a1
+; RV32I-NEXT:    sw t3, 16(sp)
+; RV32I-NEXT:    sw t3, 20(sp)
+; RV32I-NEXT:    sw t3, 24(sp)
+; RV32I-NEXT:    sw t3, 28(sp)
+; RV32I-NEXT:    add a4, t0, a4
 ; RV32I-NEXT:    sw a3, 0(sp)
-; RV32I-NEXT:    sw a6, 4(sp)
-; RV32I-NEXT:    sw a4, 8(sp)
-; RV32I-NEXT:    sw a7, 12(sp)
-; RV32I-NEXT:    srli a0, a1, 3
-; RV32I-NEXT:    andi a3, a1, 31
-; RV32I-NEXT:    andi a0, a0, 12
-; RV32I-NEXT:    xori a3, a3, 31
-; RV32I-NEXT:    add a0, a5, a0
-; RV32I-NEXT:    lw a4, 4(a0)
-; RV32I-NEXT:    lw a5, 8(a0)
-; RV32I-NEXT:    lw a6, 0(a0)
-; RV32I-NEXT:    lw a0, 12(a0)
-; RV32I-NEXT:    srl a7, a4, a1
-; RV32I-NEXT:    slli t0, a5, 1
-; RV32I-NEXT:    srl a6, a6, a1
-; RV32I-NEXT:    slli a4, a4, 1
-; RV32I-NEXT:    srl a5, a5, a1
-; RV32I-NEXT:    slli t1, a0, 1
-; RV32I-NEXT:    sra a0, a0, a1
-; RV32I-NEXT:    sll a1, t0, a3
-; RV32I-NEXT:    sll a4, a4, a3
-; RV32I-NEXT:    sll a3, t1, a3
+; RV32I-NEXT:    sw a5, 4(sp)
+; RV32I-NEXT:    sw a6, 8(sp)
+; RV32I-NEXT:    sw a1, 12(sp)
+; RV32I-NEXT:    lw a1, 4(a4)
+; RV32I-NEXT:    lw a3, 8(a4)
+; RV32I-NEXT:    lw a5, 0(a4)
+; RV32I-NEXT:    lw a4, 12(a4)
+; RV32I-NEXT:    srl a6, a1, a0
+; RV32I-NEXT:    slli a7, a3, 1
+; RV32I-NEXT:    srl a5, a5, a0
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srl a3, a3, a0
+; RV32I-NEXT:    slli t0, a4, 1
+; RV32I-NEXT:    sra a0, a4, a0
+; RV32I-NEXT:    sll a4, a7, t1
+; RV32I-NEXT:    sll a1, a1, t1
+; RV32I-NEXT:    sll a7, t0, t1
 ; RV32I-NEXT:    srli t0, a0, 16
 ; RV32I-NEXT:    srli t1, a0, 24
 ; RV32I-NEXT:    srli t2, a0, 8
-; RV32I-NEXT:    or a1, a7, a1
 ; RV32I-NEXT:    or a4, a6, a4
-; RV32I-NEXT:    or a3, a5, a3
+; RV32I-NEXT:    or a1, a5, a1
+; RV32I-NEXT:    or a3, a3, a7
 ; RV32I-NEXT:    sb a0, 12(a2)
 ; RV32I-NEXT:    sb t2, 13(a2)
 ; RV32I-NEXT:    sb t0, 14(a2)
@@ -1269,21 +1242,21 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV32I-NEXT:    srli a0, a3, 16
 ; RV32I-NEXT:    srli a5, a3, 24
 ; RV32I-NEXT:    srli a6, a3, 8
-; RV32I-NEXT:    srli a7, a4, 16
-; RV32I-NEXT:    srli t0, a4, 24
-; RV32I-NEXT:    srli t1, a4, 8
-; RV32I-NEXT:    srli t2, a1, 16
-; RV32I-NEXT:    srli t3, a1, 24
+; RV32I-NEXT:    srli a7, a1, 16
+; RV32I-NEXT:    srli t0, a1, 24
+; RV32I-NEXT:    srli t1, a1, 8
+; RV32I-NEXT:    srli t2, a4, 16
+; RV32I-NEXT:    srli t3, a4, 24
 ; RV32I-NEXT:    sb a3, 8(a2)
 ; RV32I-NEXT:    sb a6, 9(a2)
 ; RV32I-NEXT:    sb a0, 10(a2)
 ; RV32I-NEXT:    sb a5, 11(a2)
-; RV32I-NEXT:    srli a0, a1, 8
-; RV32I-NEXT:    sb a4, 0(a2)
+; RV32I-NEXT:    srli a0, a4, 8
+; RV32I-NEXT:    sb a1, 0(a2)
 ; RV32I-NEXT:    sb t1, 1(a2)
 ; RV32I-NEXT:    sb a7, 2(a2)
 ; RV32I-NEXT:    sb t0, 3(a2)
-; RV32I-NEXT:    sb a1, 4(a2)
+; RV32I-NEXT:    sb a4, 4(a2)
 ; RV32I-NEXT:    sb a0, 5(a2)
 ; RV32I-NEXT:    sb t2, 6(a2)
 ; RV32I-NEXT:    sb t3, 7(a2)
@@ -1299,19 +1272,17 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-LABEL: lshr_32bytes:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -160
-; RV64I-NEXT:    sd s0, 152(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s1, 144(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s2, 136(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s3, 128(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s4, 120(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s5, 112(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s6, 104(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s7, 96(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s8, 88(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s9, 80(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s10, 72(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s11, 64(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    addi sp, sp, -144
+; RV64I-NEXT:    sd s0, 136(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 128(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 120(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 112(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s4, 104(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s5, 96(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s6, 88(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s7, 80(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s8, 72(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s9, 64(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    lbu a4, 1(a0)
 ; RV64I-NEXT:    lbu a5, 2(a0)
@@ -1328,143 +1299,122 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    lbu s1, 13(a0)
 ; RV64I-NEXT:    lbu s2, 14(a0)
 ; RV64I-NEXT:    lbu s3, 15(a0)
+; RV64I-NEXT:    slli a4, a4, 8
+; RV64I-NEXT:    slli a5, a5, 16
+; RV64I-NEXT:    slli a6, a6, 24
+; RV64I-NEXT:    or a3, a4, a3
+; RV64I-NEXT:    or a4, a6, a5
 ; RV64I-NEXT:    lbu s4, 16(a0)
 ; RV64I-NEXT:    lbu s5, 17(a0)
 ; RV64I-NEXT:    lbu s6, 18(a0)
 ; RV64I-NEXT:    lbu s7, 19(a0)
-; RV64I-NEXT:    slli a4, a4, 8
-; RV64I-NEXT:    slli s8, a5, 16
-; RV64I-NEXT:    slli a6, a6, 24
 ; RV64I-NEXT:    slli t0, t0, 8
 ; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli t2, t2, 24
-; RV64I-NEXT:    or a5, a4, a3
-; RV64I-NEXT:    or a6, a6, s8
-; RV64I-NEXT:    or a3, t0, a7
-; RV64I-NEXT:    or a4, t2, t1
-; RV64I-NEXT:    lbu s8, 20(a0)
-; RV64I-NEXT:    lbu s9, 21(a0)
-; RV64I-NEXT:    lbu s10, 22(a0)
-; RV64I-NEXT:    lbu s11, 23(a0)
 ; RV64I-NEXT:    slli t4, t4, 8
 ; RV64I-NEXT:    slli t5, t5, 16
 ; RV64I-NEXT:    slli t6, t6, 24
+; RV64I-NEXT:    or a5, t0, a7
+; RV64I-NEXT:    or a6, t2, t1
+; RV64I-NEXT:    or a7, t4, t3
+; RV64I-NEXT:    or t0, t6, t5
+; RV64I-NEXT:    lbu t5, 20(a0)
+; RV64I-NEXT:    lbu t6, 21(a0)
+; RV64I-NEXT:    lbu s8, 22(a0)
+; RV64I-NEXT:    lbu s9, 23(a0)
 ; RV64I-NEXT:    slli s1, s1, 8
 ; RV64I-NEXT:    slli s2, s2, 16
 ; RV64I-NEXT:    slli s3, s3, 24
-; RV64I-NEXT:    or a7, t4, t3
-; RV64I-NEXT:    or t0, t6, t5
-; RV64I-NEXT:    or t1, s1, s0
-; RV64I-NEXT:    or t2, s3, s2
-; RV64I-NEXT:    lbu t6, 24(a0)
-; RV64I-NEXT:    lbu s0, 25(a0)
-; RV64I-NEXT:    lbu s1, 26(a0)
-; RV64I-NEXT:    lbu s2, 27(a0)
 ; RV64I-NEXT:    slli s5, s5, 8
 ; RV64I-NEXT:    slli s6, s6, 16
 ; RV64I-NEXT:    slli s7, s7, 24
-; RV64I-NEXT:    slli s9, s9, 8
+; RV64I-NEXT:    or t1, s1, s0
+; RV64I-NEXT:    or t2, s3, s2
 ; RV64I-NEXT:    or t3, s5, s4
 ; RV64I-NEXT:    or t4, s7, s6
-; RV64I-NEXT:    or t5, s9, s8
-; RV64I-NEXT:    lbu s3, 28(a0)
+; RV64I-NEXT:    lbu s0, 24(a0)
+; RV64I-NEXT:    lbu s1, 25(a0)
+; RV64I-NEXT:    lbu s2, 26(a0)
+; RV64I-NEXT:    lbu s3, 27(a0)
+; RV64I-NEXT:    slli t6, t6, 8
+; RV64I-NEXT:    slli s8, s8, 16
+; RV64I-NEXT:    slli s9, s9, 24
+; RV64I-NEXT:    slli s1, s1, 8
+; RV64I-NEXT:    or t5, t6, t5
+; RV64I-NEXT:    or t6, s9, s8
+; RV64I-NEXT:    or s0, s1, s0
+; RV64I-NEXT:    lbu s1, 28(a0)
 ; RV64I-NEXT:    lbu s4, 29(a0)
 ; RV64I-NEXT:    lbu s5, 30(a0)
 ; RV64I-NEXT:    lbu s6, 31(a0)
-; RV64I-NEXT:    slli s10, s10, 16
-; RV64I-NEXT:    slli s11, s11, 24
-; RV64I-NEXT:    slli s0, s0, 8
-; RV64I-NEXT:    slli s1, s1, 16
-; RV64I-NEXT:    slli s2, s2, 24
-; RV64I-NEXT:    slli s4, s4, 8
-; RV64I-NEXT:    or a0, s11, s10
-; RV64I-NEXT:    or t6, s0, t6
-; RV64I-NEXT:    or s0, s2, s1
-; RV64I-NEXT:    or s1, s4, s3
-; RV64I-NEXT:    lbu s2, 0(a1)
-; RV64I-NEXT:    lbu s3, 1(a1)
-; RV64I-NEXT:    lbu s4, 2(a1)
-; RV64I-NEXT:    lbu s7, 3(a1)
-; RV64I-NEXT:    slli s5, s5, 16
-; RV64I-NEXT:    slli s6, s6, 24
-; RV64I-NEXT:    slli s3, s3, 8
-; RV64I-NEXT:    slli s4, s4, 16
-; RV64I-NEXT:    slli s7, s7, 24
-; RV64I-NEXT:    or s5, s6, s5
-; RV64I-NEXT:    or s2, s3, s2
-; RV64I-NEXT:    or s3, s7, s4
-; RV64I-NEXT:    lbu s4, 5(a1)
-; RV64I-NEXT:    lbu s6, 4(a1)
-; RV64I-NEXT:    lbu s7, 6(a1)
-; RV64I-NEXT:    lbu a1, 7(a1)
-; RV64I-NEXT:    slli s4, s4, 8
-; RV64I-NEXT:    or s4, s4, s6
-; RV64I-NEXT:    slli s7, s7, 16
-; RV64I-NEXT:    slli a1, a1, 24
-; RV64I-NEXT:    or a1, a1, s7
+; RV64I-NEXT:    lbu a0, 0(a1)
 ; RV64I-NEXT:    sd zero, 32(sp)
 ; RV64I-NEXT:    sd zero, 40(sp)
 ; RV64I-NEXT:    sd zero, 48(sp)
 ; RV64I-NEXT:    sd zero, 56(sp)
-; RV64I-NEXT:    or a5, a6, a5
-; RV64I-NEXT:    mv a6, sp
+; RV64I-NEXT:    slli s2, s2, 16
+; RV64I-NEXT:    slli s3, s3, 24
+; RV64I-NEXT:    or a1, s3, s2
+; RV64I-NEXT:    mv s2, sp
+; RV64I-NEXT:    slli s4, s4, 8
+; RV64I-NEXT:    slli s5, s5, 16
+; RV64I-NEXT:    slli s6, s6, 24
+; RV64I-NEXT:    or s1, s4, s1
+; RV64I-NEXT:    srli s3, a0, 3
+; RV64I-NEXT:    or s4, s6, s5
+; RV64I-NEXT:    andi s5, a0, 63
+; RV64I-NEXT:    andi s3, s3, 24
+; RV64I-NEXT:    xori s5, s5, 63
 ; RV64I-NEXT:    or a3, a4, a3
-; RV64I-NEXT:    or a4, t0, a7
-; RV64I-NEXT:    or a7, t2, t1
-; RV64I-NEXT:    or t0, t4, t3
-; RV64I-NEXT:    or a0, a0, t5
-; RV64I-NEXT:    or t1, s0, t6
-; RV64I-NEXT:    or t2, s5, s1
-; RV64I-NEXT:    or t3, s3, s2
-; RV64I-NEXT:    or a1, a1, s4
-; RV64I-NEXT:    slli a3, a3, 32
-; RV64I-NEXT:    slli a7, a7, 32
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    slli t2, t2, 32
-; RV64I-NEXT:    slli a1, a1, 32
-; RV64I-NEXT:    or a3, a3, a5
-; RV64I-NEXT:    or a4, a7, a4
-; RV64I-NEXT:    or a0, a0, t0
-; RV64I-NEXT:    or a5, t2, t1
-; RV64I-NEXT:    or a1, a1, t3
+; RV64I-NEXT:    or a4, a6, a5
+; RV64I-NEXT:    or a5, t0, a7
+; RV64I-NEXT:    or a6, t2, t1
+; RV64I-NEXT:    or a7, t4, t3
+; RV64I-NEXT:    or t0, t6, t5
+; RV64I-NEXT:    or a1, a1, s0
+; RV64I-NEXT:    or t1, s4, s1
+; RV64I-NEXT:    add s2, s2, s3
+; RV64I-NEXT:    slli a4, a4, 32
+; RV64I-NEXT:    slli a6, a6, 32
+; RV64I-NEXT:    slli t0, t0, 32
+; RV64I-NEXT:    slli t1, t1, 32
+; RV64I-NEXT:    or a3, a4, a3
+; RV64I-NEXT:    or a4, a6, a5
+; RV64I-NEXT:    or a5, t0, a7
+; RV64I-NEXT:    or a1, t1, a1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    sd a4, 8(sp)
-; RV64I-NEXT:    sd a0, 16(sp)
-; RV64I-NEXT:    sd a5, 24(sp)
-; RV64I-NEXT:    srli a0, a1, 3
-; RV64I-NEXT:    andi a3, a1, 63
-; RV64I-NEXT:    andi a0, a0, 24
-; RV64I-NEXT:    xori a3, a3, 63
-; RV64I-NEXT:    add a0, a6, a0
-; RV64I-NEXT:    ld a4, 8(a0)
-; RV64I-NEXT:    ld a5, 16(a0)
-; RV64I-NEXT:    ld a6, 0(a0)
-; RV64I-NEXT:    ld a0, 24(a0)
-; RV64I-NEXT:    srl a7, a4, a1
+; RV64I-NEXT:    sd a5, 16(sp)
+; RV64I-NEXT:    sd a1, 24(sp)
+; RV64I-NEXT:    ld a1, 8(s2)
+; RV64I-NEXT:    ld a3, 16(s2)
+; RV64I-NEXT:    ld a4, 0(s2)
+; RV64I-NEXT:    ld a5, 24(s2)
+; RV64I-NEXT:    srl a6, a1, a0
+; RV64I-NEXT:    slli a7, a3, 1
+; RV64I-NEXT:    srl a4, a4, a0
+; RV64I-NEXT:    slli a1, a1, 1
+; RV64I-NEXT:    srl a3, a3, a0
 ; RV64I-NEXT:    slli t0, a5, 1
-; RV64I-NEXT:    srl a6, a6, a1
-; RV64I-NEXT:    slli a4, a4, 1
-; RV64I-NEXT:    srl a5, a5, a1
-; RV64I-NEXT:    slli t1, a0, 1
-; RV64I-NEXT:    srl t2, a0, a1
-; RV64I-NEXT:    sll a0, t0, a3
-; RV64I-NEXT:    sll a1, a4, a3
-; RV64I-NEXT:    sll a3, t1, a3
-; RV64I-NEXT:    srli a4, t2, 56
-; RV64I-NEXT:    srli t0, t2, 48
-; RV64I-NEXT:    srli t1, t2, 40
-; RV64I-NEXT:    srli t3, t2, 32
-; RV64I-NEXT:    srli t4, t2, 24
-; RV64I-NEXT:    srli t5, t2, 16
-; RV64I-NEXT:    srli t6, t2, 8
-; RV64I-NEXT:    or a0, a7, a0
-; RV64I-NEXT:    or a1, a6, a1
-; RV64I-NEXT:    or a3, a5, a3
+; RV64I-NEXT:    srl a5, a5, a0
+; RV64I-NEXT:    sll a0, a7, s5
+; RV64I-NEXT:    sll a1, a1, s5
+; RV64I-NEXT:    sll a7, t0, s5
+; RV64I-NEXT:    srli t0, a5, 56
+; RV64I-NEXT:    srli t1, a5, 48
+; RV64I-NEXT:    srli t2, a5, 40
+; RV64I-NEXT:    srli t3, a5, 32
+; RV64I-NEXT:    srli t4, a5, 24
+; RV64I-NEXT:    srli t5, a5, 16
+; RV64I-NEXT:    srli t6, a5, 8
+; RV64I-NEXT:    or a0, a6, a0
+; RV64I-NEXT:    or a1, a4, a1
+; RV64I-NEXT:    or a3, a3, a7
 ; RV64I-NEXT:    sb t3, 28(a2)
-; RV64I-NEXT:    sb t1, 29(a2)
-; RV64I-NEXT:    sb t0, 30(a2)
-; RV64I-NEXT:    sb a4, 31(a2)
-; RV64I-NEXT:    sb t2, 24(a2)
+; RV64I-NEXT:    sb t2, 29(a2)
+; RV64I-NEXT:    sb t1, 30(a2)
+; RV64I-NEXT:    sb t0, 31(a2)
+; RV64I-NEXT:    sb a5, 24(a2)
 ; RV64I-NEXT:    sb t6, 25(a2)
 ; RV64I-NEXT:    sb t5, 26(a2)
 ; RV64I-NEXT:    sb t4, 27(a2)
@@ -1513,19 +1463,17 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    sb a1, 9(a2)
 ; RV64I-NEXT:    sb a5, 10(a2)
 ; RV64I-NEXT:    sb a3, 11(a2)
-; RV64I-NEXT:    ld s0, 152(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s1, 144(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s2, 136(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s3, 128(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s4, 120(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s5, 112(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s6, 104(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s7, 96(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s8, 88(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s9, 80(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s10, 72(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s11, 64(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 160
+; RV64I-NEXT:    ld s0, 136(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 128(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 120(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 112(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s4, 104(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s5, 96(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s6, 88(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s7, 80(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s8, 72(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s9, 64(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 144
 ; RV64I-NEXT:    ret
 ;
 ; RV32I-LABEL: lshr_32bytes:
@@ -1550,67 +1498,55 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV32I-NEXT:    lbu a7, 3(a0)
 ; RV32I-NEXT:    lbu a5, 4(a0)
 ; RV32I-NEXT:    lbu t0, 5(a0)
-; RV32I-NEXT:    lbu t1, 6(a0)
-; RV32I-NEXT:    lbu t2, 7(a0)
-; RV32I-NEXT:    lbu t3, 8(a0)
-; RV32I-NEXT:    lbu t4, 9(a0)
-; RV32I-NEXT:    lbu t5, 10(a0)
-; RV32I-NEXT:    lbu t6, 11(a0)
-; RV32I-NEXT:    lbu s0, 12(a0)
-; RV32I-NEXT:    lbu s2, 13(a0)
-; RV32I-NEXT:    lbu s4, 14(a0)
-; RV32I-NEXT:    lbu s5, 15(a0)
-; RV32I-NEXT:    lbu s6, 16(a0)
-; RV32I-NEXT:    lbu s7, 17(a0)
-; RV32I-NEXT:    lbu s8, 18(a0)
-; RV32I-NEXT:    lbu s9, 19(a0)
+; RV32I-NEXT:    lbu t3, 6(a0)
+; RV32I-NEXT:    lbu t6, 7(a0)
+; RV32I-NEXT:    lbu s2, 8(a0)
+; RV32I-NEXT:    lbu s3, 9(a0)
+; RV32I-NEXT:    lbu s4, 10(a0)
+; RV32I-NEXT:    lbu s5, 11(a0)
+; RV32I-NEXT:    lbu s7, 12(a0)
+; RV32I-NEXT:    lbu s8, 13(a0)
+; RV32I-NEXT:    lbu s9, 14(a0)
+; RV32I-NEXT:    lbu s10, 15(a0)
+; RV32I-NEXT:    lbu s11, 16(a0)
+; RV32I-NEXT:    lbu ra, 17(a0)
+; RV32I-NEXT:    lbu t4, 18(a0)
+; RV32I-NEXT:    lbu s0, 19(a0)
 ; RV32I-NEXT:    slli a4, a4, 8
 ; RV32I-NEXT:    slli a6, a6, 16
 ; RV32I-NEXT:    slli a7, a7, 24
 ; RV32I-NEXT:    or a3, a4, a3
-; RV32I-NEXT:    sw a3, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    or a4, a7, a6
-; RV32I-NEXT:    lbu s10, 20(a0)
-; RV32I-NEXT:    lbu s11, 21(a0)
-; RV32I-NEXT:    lbu ra, 22(a0)
-; RV32I-NEXT:    lbu a3, 23(a0)
+; RV32I-NEXT:    lbu t1, 20(a0)
+; RV32I-NEXT:    lbu t2, 21(a0)
+; RV32I-NEXT:    lbu t5, 22(a0)
+; RV32I-NEXT:    lbu s1, 23(a0)
 ; RV32I-NEXT:    slli t0, t0, 8
-; RV32I-NEXT:    slli t1, t1, 16
-; RV32I-NEXT:    slli t2, t2, 24
-; RV32I-NEXT:    slli t4, t4, 8
-; RV32I-NEXT:    slli t5, t5, 16
+; RV32I-NEXT:    slli t3, t3, 16
 ; RV32I-NEXT:    slli t6, t6, 24
-; RV32I-NEXT:    or a5, t0, a5
-; RV32I-NEXT:    or a6, t2, t1
-; RV32I-NEXT:    or a7, t4, t3
-; RV32I-NEXT:    or t0, t6, t5
-; RV32I-NEXT:    lbu s1, 24(a0)
-; RV32I-NEXT:    lbu s3, 25(a0)
-; RV32I-NEXT:    lbu t4, 26(a0)
-; RV32I-NEXT:    lbu t5, 27(a0)
-; RV32I-NEXT:    slli s2, s2, 8
+; RV32I-NEXT:    slli s3, s3, 8
 ; RV32I-NEXT:    slli s4, s4, 16
 ; RV32I-NEXT:    slli s5, s5, 24
-; RV32I-NEXT:    slli s7, s7, 8
-; RV32I-NEXT:    or t1, s2, s0
-; RV32I-NEXT:    or t2, s5, s4
-; RV32I-NEXT:    or t3, s7, s6
-; RV32I-NEXT:    lbu t6, 28(a0)
-; RV32I-NEXT:    lbu s4, 29(a0)
-; RV32I-NEXT:    lbu s5, 30(a0)
-; RV32I-NEXT:    lbu s6, 31(a0)
-; RV32I-NEXT:    slli s8, s8, 16
-; RV32I-NEXT:    slli s9, s9, 24
-; RV32I-NEXT:    slli s11, s11, 8
-; RV32I-NEXT:    slli ra, ra, 16
-; RV32I-NEXT:    slli a3, a3, 24
-; RV32I-NEXT:    or a0, s9, s8
-; RV32I-NEXT:    or s0, s11, s10
-; RV32I-NEXT:    or s2, a3, ra
-; RV32I-NEXT:    lbu a3, 0(a1)
-; RV32I-NEXT:    lbu s7, 1(a1)
-; RV32I-NEXT:    lbu s8, 2(a1)
-; RV32I-NEXT:    lbu a1, 3(a1)
+; RV32I-NEXT:    or a5, t0, a5
+; RV32I-NEXT:    or a6, t6, t3
+; RV32I-NEXT:    or a7, s3, s2
+; RV32I-NEXT:    or t0, s5, s4
+; RV32I-NEXT:    lbu t3, 24(a0)
+; RV32I-NEXT:    lbu s5, 25(a0)
+; RV32I-NEXT:    lbu s6, 26(a0)
+; RV32I-NEXT:    lbu t6, 27(a0)
+; RV32I-NEXT:    slli s8, s8, 8
+; RV32I-NEXT:    slli s9, s9, 16
+; RV32I-NEXT:    slli s10, s10, 24
+; RV32I-NEXT:    slli ra, ra, 8
+; RV32I-NEXT:    or s7, s8, s7
+; RV32I-NEXT:    or s2, s10, s9
+; RV32I-NEXT:    or s3, ra, s11
+; RV32I-NEXT:    lbu s4, 28(a0)
+; RV32I-NEXT:    lbu s8, 29(a0)
+; RV32I-NEXT:    lbu s9, 30(a0)
+; RV32I-NEXT:    lbu s10, 31(a0)
+; RV32I-NEXT:    lbu a0, 0(a1)
 ; RV32I-NEXT:    sw zero, 56(sp)
 ; RV32I-NEXT:    sw zero, 60(sp)
 ; RV32I-NEXT:    sw zero, 64(sp)
@@ -1619,89 +1555,90 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV32I-NEXT:    sw zero, 44(sp)
 ; RV32I-NEXT:    sw zero, 48(sp)
 ; RV32I-NEXT:    sw zero, 52(sp)
-; RV32I-NEXT:    slli s3, s3, 8
-; RV32I-NEXT:    or s1, s3, s1
-; RV32I-NEXT:    addi s3, sp, 8
 ; RV32I-NEXT:    slli t4, t4, 16
-; RV32I-NEXT:    slli t5, t5, 24
-; RV32I-NEXT:    slli s4, s4, 8
-; RV32I-NEXT:    slli s5, s5, 16
-; RV32I-NEXT:    slli s6, s6, 24
-; RV32I-NEXT:    slli s7, s7, 8
-; RV32I-NEXT:    slli s8, s8, 16
-; RV32I-NEXT:    slli a1, a1, 24
-; RV32I-NEXT:    or t4, t5, t4
-; RV32I-NEXT:    or t5, s4, t6
-; RV32I-NEXT:    or t6, s6, s5
-; RV32I-NEXT:    or a3, s7, a3
-; RV32I-NEXT:    or a1, a1, s8
-; RV32I-NEXT:    lw s4, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    or a4, a4, s4
-; RV32I-NEXT:    or a5, a6, a5
-; RV32I-NEXT:    or a6, t0, a7
-; RV32I-NEXT:    or a7, t2, t1
-; RV32I-NEXT:    or t0, a0, t3
-; RV32I-NEXT:    or t1, s2, s0
-; RV32I-NEXT:    or t2, t4, s1
-; RV32I-NEXT:    or t3, t6, t5
-; RV32I-NEXT:    or a0, a1, a3
-; RV32I-NEXT:    sw t0, 24(sp)
-; RV32I-NEXT:    sw t1, 28(sp)
-; RV32I-NEXT:    sw t2, 32(sp)
-; RV32I-NEXT:    sw t3, 36(sp)
-; RV32I-NEXT:    sw a4, 8(sp)
-; RV32I-NEXT:    sw a5, 12(sp)
-; RV32I-NEXT:    sw a6, 16(sp)
-; RV32I-NEXT:    sw a7, 20(sp)
+; RV32I-NEXT:    slli s0, s0, 24
+; RV32I-NEXT:    or t4, s0, t4
+; RV32I-NEXT:    addi s0, sp, 8
+; RV32I-NEXT:    slli t2, t2, 8
+; RV32I-NEXT:    slli t5, t5, 16
+; RV32I-NEXT:    slli s1, s1, 24
+; RV32I-NEXT:    slli s5, s5, 8
+; RV32I-NEXT:    slli s6, s6, 16
+; RV32I-NEXT:    slli t6, t6, 24
+; RV32I-NEXT:    slli s8, s8, 8
+; RV32I-NEXT:    slli s9, s9, 16
+; RV32I-NEXT:    slli s10, s10, 24
+; RV32I-NEXT:    or t1, t2, t1
 ; RV32I-NEXT:    srli a1, a0, 3
-; RV32I-NEXT:    andi a3, a0, 31
-; RV32I-NEXT:    andi a4, a1, 28
-; RV32I-NEXT:    xori a1, a3, 31
-; RV32I-NEXT:    add a4, s3, a4
-; RV32I-NEXT:    lw a3, 0(a4)
-; RV32I-NEXT:    lw a5, 4(a4)
-; RV32I-NEXT:    lw a6, 8(a4)
-; RV32I-NEXT:    lw a7, 12(a4)
-; RV32I-NEXT:    lw t0, 16(a4)
-; RV32I-NEXT:    lw t1, 20(a4)
-; RV32I-NEXT:    lw t2, 24(a4)
-; RV32I-NEXT:    lw a4, 28(a4)
-; RV32I-NEXT:    srl t3, a5, a0
-; RV32I-NEXT:    slli t4, a6, 1
+; RV32I-NEXT:    or t2, s1, t5
+; RV32I-NEXT:    andi t5, a0, 31
+; RV32I-NEXT:    or t3, s5, t3
+; RV32I-NEXT:    or t6, t6, s6
+; RV32I-NEXT:    or s1, s8, s4
+; RV32I-NEXT:    or s4, s10, s9
+; RV32I-NEXT:    andi s5, a1, 28
+; RV32I-NEXT:    xori a1, t5, 31
+; RV32I-NEXT:    or a3, a4, a3
+; RV32I-NEXT:    or a4, a6, a5
+; RV32I-NEXT:    or a5, t0, a7
+; RV32I-NEXT:    or a6, s2, s7
+; RV32I-NEXT:    or a7, t4, s3
+; RV32I-NEXT:    or t0, t2, t1
+; RV32I-NEXT:    or t1, t6, t3
+; RV32I-NEXT:    or t2, s4, s1
+; RV32I-NEXT:    add s0, s0, s5
+; RV32I-NEXT:    sw a7, 24(sp)
+; RV32I-NEXT:    sw t0, 28(sp)
+; RV32I-NEXT:    sw t1, 32(sp)
+; RV32I-NEXT:    sw t2, 36(sp)
+; RV32I-NEXT:    sw a3, 8(sp)
+; RV32I-NEXT:    sw a4, 12(sp)
+; RV32I-NEXT:    sw a5, 16(sp)
+; RV32I-NEXT:    sw a6, 20(sp)
+; RV32I-NEXT:    lw a3, 0(s0)
+; RV32I-NEXT:    lw a4, 4(s0)
+; RV32I-NEXT:    lw a5, 8(s0)
+; RV32I-NEXT:    lw a6, 12(s0)
+; RV32I-NEXT:    lw a7, 16(s0)
+; RV32I-NEXT:    lw t0, 20(s0)
+; RV32I-NEXT:    lw t1, 24(s0)
+; RV32I-NEXT:    lw t2, 28(s0)
+; RV32I-NEXT:    srl t3, a4, a0
+; RV32I-NEXT:    slli t4, a5, 1
 ; RV32I-NEXT:    srl a3, a3, a0
-; RV32I-NEXT:    slli a5, a5, 1
-; RV32I-NEXT:    srl t5, a7, a0
-; RV32I-NEXT:    slli t6, t0, 1
-; RV32I-NEXT:    srl a6, a6, a0
-; RV32I-NEXT:    slli a7, a7, 1
-; RV32I-NEXT:    srl s0, t1, a0
-; RV32I-NEXT:    slli s1, t2, 1
-; RV32I-NEXT:    srl t0, t0, a0
-; RV32I-NEXT:    slli t1, t1, 1
+; RV32I-NEXT:    slli a4, a4, 1
+; RV32I-NEXT:    srl t5, a6, a0
+; RV32I-NEXT:    slli t6, a7, 1
+; RV32I-NEXT:    srl a5, a5, a0
+; RV32I-NEXT:    slli a6, a6, 1
+; RV32I-NEXT:    srl s0, t0, a0
+; RV32I-NEXT:    slli s1, t1, 1
+; RV32I-NEXT:    srl a7, a7, a0
+; RV32I-NEXT:    slli t0, t0, 1
+; RV32I-NEXT:    srl t1, t1, a0
+; RV32I-NEXT:    slli s2, t2, 1
 ; RV32I-NEXT:    srl t2, t2, a0
-; RV32I-NEXT:    slli s2, a4, 1
-; RV32I-NEXT:    srl s3, a4, a0
 ; RV32I-NEXT:    sll a0, t4, a1
-; RV32I-NEXT:    sll a4, a5, a1
-; RV32I-NEXT:    sll a5, t6, a1
-; RV32I-NEXT:    sll a7, a7, a1
-; RV32I-NEXT:    sll t4, s1, a1
-; RV32I-NEXT:    sll t1, t1, a1
-; RV32I-NEXT:    sll t6, s2, a1
-; RV32I-NEXT:    srli s1, s3, 24
-; RV32I-NEXT:    srli s2, s3, 16
-; RV32I-NEXT:    srli s4, s3, 8
+; RV32I-NEXT:    sll a4, a4, a1
+; RV32I-NEXT:    sll t4, t6, a1
+; RV32I-NEXT:    sll a6, a6, a1
+; RV32I-NEXT:    sll t6, s1, a1
+; RV32I-NEXT:    sll t0, t0, a1
+; RV32I-NEXT:    sll s1, s2, a1
+; RV32I-NEXT:    srli s2, t2, 24
+; RV32I-NEXT:    srli s3, t2, 16
+; RV32I-NEXT:    srli s4, t2, 8
 ; RV32I-NEXT:    or a0, t3, a0
 ; RV32I-NEXT:    or a1, a3, a4
-; RV32I-NEXT:    or a3, t5, a5
-; RV32I-NEXT:    or a4, a6, a7
-; RV32I-NEXT:    or a5, s0, t4
-; RV32I-NEXT:    or a6, t0, t1
-; RV32I-NEXT:    or a7, t2, t6
-; RV32I-NEXT:    sb s3, 28(a2)
+; RV32I-NEXT:    or a3, t5, t4
+; RV32I-NEXT:    or a4, a5, a6
+; RV32I-NEXT:    or a5, s0, t6
+; RV32I-NEXT:    or a6, a7, t0
+; RV32I-NEXT:    or a7, t1, s1
+; RV32I-NEXT:    sb t2, 28(a2)
 ; RV32I-NEXT:    sb s4, 29(a2)
-; RV32I-NEXT:    sb s2, 30(a2)
-; RV32I-NEXT:    sb s1, 31(a2)
+; RV32I-NEXT:    sb s3, 30(a2)
+; RV32I-NEXT:    sb s2, 31(a2)
 ; RV32I-NEXT:    srli t0, a7, 24
 ; RV32I-NEXT:    srli t1, a7, 16
 ; RV32I-NEXT:    srli t2, a7, 8
@@ -1775,19 +1712,17 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-LABEL: shl_32bytes:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -160
-; RV64I-NEXT:    sd s0, 152(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s1, 144(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s2, 136(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s3, 128(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s4, 120(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s5, 112(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s6, 104(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s7, 96(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s8, 88(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s9, 80(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s10, 72(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s11, 64(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    addi sp, sp, -144
+; RV64I-NEXT:    sd s0, 136(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 128(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 120(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 112(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s4, 104(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s5, 96(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s6, 88(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s7, 80(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s8, 72(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s9, 64(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    lbu a4, 1(a0)
 ; RV64I-NEXT:    lbu a5, 2(a0)
@@ -1804,146 +1739,125 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    lbu s1, 13(a0)
 ; RV64I-NEXT:    lbu s2, 14(a0)
 ; RV64I-NEXT:    lbu s3, 15(a0)
+; RV64I-NEXT:    slli a4, a4, 8
+; RV64I-NEXT:    slli a5, a5, 16
+; RV64I-NEXT:    slli a6, a6, 24
+; RV64I-NEXT:    or a3, a4, a3
+; RV64I-NEXT:    or a4, a6, a5
 ; RV64I-NEXT:    lbu s4, 16(a0)
 ; RV64I-NEXT:    lbu s5, 17(a0)
 ; RV64I-NEXT:    lbu s6, 18(a0)
 ; RV64I-NEXT:    lbu s7, 19(a0)
-; RV64I-NEXT:    slli a4, a4, 8
-; RV64I-NEXT:    slli s8, a5, 16
-; RV64I-NEXT:    slli a6, a6, 24
 ; RV64I-NEXT:    slli t0, t0, 8
 ; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli t2, t2, 24
-; RV64I-NEXT:    or a5, a4, a3
-; RV64I-NEXT:    or a6, a6, s8
-; RV64I-NEXT:    or a3, t0, a7
-; RV64I-NEXT:    or a4, t2, t1
-; RV64I-NEXT:    lbu s8, 20(a0)
-; RV64I-NEXT:    lbu s9, 21(a0)
-; RV64I-NEXT:    lbu s10, 22(a0)
-; RV64I-NEXT:    lbu s11, 23(a0)
 ; RV64I-NEXT:    slli t4, t4, 8
 ; RV64I-NEXT:    slli t5, t5, 16
 ; RV64I-NEXT:    slli t6, t6, 24
+; RV64I-NEXT:    or a5, t0, a7
+; RV64I-NEXT:    or a6, t2, t1
+; RV64I-NEXT:    or a7, t4, t3
+; RV64I-NEXT:    or t0, t6, t5
+; RV64I-NEXT:    lbu t5, 20(a0)
+; RV64I-NEXT:    lbu t6, 21(a0)
+; RV64I-NEXT:    lbu s8, 22(a0)
+; RV64I-NEXT:    lbu s9, 23(a0)
 ; RV64I-NEXT:    slli s1, s1, 8
 ; RV64I-NEXT:    slli s2, s2, 16
 ; RV64I-NEXT:    slli s3, s3, 24
-; RV64I-NEXT:    or a7, t4, t3
-; RV64I-NEXT:    or t0, t6, t5
-; RV64I-NEXT:    or t1, s1, s0
-; RV64I-NEXT:    or t2, s3, s2
-; RV64I-NEXT:    lbu t6, 24(a0)
-; RV64I-NEXT:    lbu s0, 25(a0)
-; RV64I-NEXT:    lbu s1, 26(a0)
-; RV64I-NEXT:    lbu s2, 27(a0)
 ; RV64I-NEXT:    slli s5, s5, 8
 ; RV64I-NEXT:    slli s6, s6, 16
 ; RV64I-NEXT:    slli s7, s7, 24
-; RV64I-NEXT:    slli s9, s9, 8
+; RV64I-NEXT:    or t1, s1, s0
+; RV64I-NEXT:    or t2, s3, s2
 ; RV64I-NEXT:    or t3, s5, s4
 ; RV64I-NEXT:    or t4, s7, s6
-; RV64I-NEXT:    or t5, s9, s8
-; RV64I-NEXT:    lbu s3, 28(a0)
+; RV64I-NEXT:    lbu s0, 24(a0)
+; RV64I-NEXT:    lbu s1, 25(a0)
+; RV64I-NEXT:    lbu s2, 26(a0)
+; RV64I-NEXT:    lbu s3, 27(a0)
+; RV64I-NEXT:    slli t6, t6, 8
+; RV64I-NEXT:    slli s8, s8, 16
+; RV64I-NEXT:    slli s9, s9, 24
+; RV64I-NEXT:    slli s1, s1, 8
+; RV64I-NEXT:    or t5, t6, t5
+; RV64I-NEXT:    or t6, s9, s8
+; RV64I-NEXT:    or s0, s1, s0
+; RV64I-NEXT:    lbu s1, 28(a0)
 ; RV64I-NEXT:    lbu s4, 29(a0)
 ; RV64I-NEXT:    lbu s5, 30(a0)
 ; RV64I-NEXT:    lbu s6, 31(a0)
-; RV64I-NEXT:    slli s10, s10, 16
-; RV64I-NEXT:    slli s11, s11, 24
-; RV64I-NEXT:    slli s0, s0, 8
-; RV64I-NEXT:    slli s1, s1, 16
-; RV64I-NEXT:    slli s2, s2, 24
-; RV64I-NEXT:    slli s4, s4, 8
-; RV64I-NEXT:    or a0, s11, s10
-; RV64I-NEXT:    or t6, s0, t6
-; RV64I-NEXT:    or s0, s2, s1
-; RV64I-NEXT:    or s1, s4, s3
-; RV64I-NEXT:    lbu s2, 0(a1)
-; RV64I-NEXT:    lbu s3, 1(a1)
-; RV64I-NEXT:    lbu s4, 2(a1)
-; RV64I-NEXT:    lbu s7, 3(a1)
-; RV64I-NEXT:    slli s5, s5, 16
-; RV64I-NEXT:    slli s6, s6, 24
-; RV64I-NEXT:    slli s3, s3, 8
-; RV64I-NEXT:    slli s4, s4, 16
-; RV64I-NEXT:    slli s7, s7, 24
-; RV64I-NEXT:    or s5, s6, s5
-; RV64I-NEXT:    or s2, s3, s2
-; RV64I-NEXT:    or s3, s7, s4
-; RV64I-NEXT:    lbu s4, 5(a1)
-; RV64I-NEXT:    lbu s6, 4(a1)
-; RV64I-NEXT:    lbu s7, 6(a1)
-; RV64I-NEXT:    lbu a1, 7(a1)
-; RV64I-NEXT:    slli s4, s4, 8
-; RV64I-NEXT:    or s4, s4, s6
-; RV64I-NEXT:    slli s7, s7, 16
-; RV64I-NEXT:    slli a1, a1, 24
-; RV64I-NEXT:    or a1, a1, s7
+; RV64I-NEXT:    lbu a0, 0(a1)
 ; RV64I-NEXT:    sd zero, 0(sp)
 ; RV64I-NEXT:    sd zero, 8(sp)
 ; RV64I-NEXT:    sd zero, 16(sp)
 ; RV64I-NEXT:    sd zero, 24(sp)
-; RV64I-NEXT:    or a5, a6, a5
-; RV64I-NEXT:    addi a6, sp, 32
+; RV64I-NEXT:    slli s2, s2, 16
+; RV64I-NEXT:    slli s3, s3, 24
+; RV64I-NEXT:    or a1, s3, s2
+; RV64I-NEXT:    addi s2, sp, 32
+; RV64I-NEXT:    slli s4, s4, 8
+; RV64I-NEXT:    slli s5, s5, 16
+; RV64I-NEXT:    slli s6, s6, 24
+; RV64I-NEXT:    or s1, s4, s1
+; RV64I-NEXT:    srli s3, a0, 3
+; RV64I-NEXT:    or s4, s6, s5
+; RV64I-NEXT:    andi s5, a0, 63
+; RV64I-NEXT:    andi s3, s3, 24
 ; RV64I-NEXT:    or a3, a4, a3
-; RV64I-NEXT:    or a4, t0, a7
-; RV64I-NEXT:    or a7, t2, t1
-; RV64I-NEXT:    or t0, t4, t3
-; RV64I-NEXT:    or a0, a0, t5
-; RV64I-NEXT:    or t1, s0, t6
-; RV64I-NEXT:    or t2, s5, s1
-; RV64I-NEXT:    or t3, s3, s2
-; RV64I-NEXT:    or a1, a1, s4
-; RV64I-NEXT:    slli a3, a3, 32
-; RV64I-NEXT:    slli a7, a7, 32
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    slli t2, t2, 32
-; RV64I-NEXT:    slli a1, a1, 32
-; RV64I-NEXT:    or a3, a3, a5
-; RV64I-NEXT:    or a4, a7, a4
-; RV64I-NEXT:    or a0, a0, t0
-; RV64I-NEXT:    or a5, t2, t1
-; RV64I-NEXT:    or a1, a1, t3
+; RV64I-NEXT:    or a4, a6, a5
+; RV64I-NEXT:    or a5, t0, a7
+; RV64I-NEXT:    or a6, t2, t1
+; RV64I-NEXT:    or a7, t4, t3
+; RV64I-NEXT:    or t0, t6, t5
+; RV64I-NEXT:    or a1, a1, s0
+; RV64I-NEXT:    or t1, s4, s1
+; RV64I-NEXT:    sub t2, s2, s3
+; RV64I-NEXT:    slli a4, a4, 32
+; RV64I-NEXT:    slli a6, a6, 32
+; RV64I-NEXT:    slli t0, t0, 32
+; RV64I-NEXT:    slli t1, t1, 32
+; RV64I-NEXT:    or a3, a4, a3
+; RV64I-NEXT:    or a4, a6, a5
+; RV64I-NEXT:    or a5, t0, a7
+; RV64I-NEXT:    or a1, t1, a1
 ; RV64I-NEXT:    sd a3, 32(sp)
 ; RV64I-NEXT:    sd a4, 40(sp)
-; RV64I-NEXT:    sd a0, 48(sp)
-; RV64I-NEXT:    sd a5, 56(sp)
-; RV64I-NEXT:    srli a0, a1, 3
-; RV64I-NEXT:    andi a3, a1, 63
-; RV64I-NEXT:    andi a0, a0, 24
-; RV64I-NEXT:    sub a0, a6, a0
-; RV64I-NEXT:    ld a4, 0(a0)
-; RV64I-NEXT:    ld a5, 8(a0)
-; RV64I-NEXT:    ld a6, 16(a0)
-; RV64I-NEXT:    ld a0, 24(a0)
-; RV64I-NEXT:    xori a3, a3, 63
-; RV64I-NEXT:    sll a7, a5, a1
-; RV64I-NEXT:    srli t0, a4, 1
-; RV64I-NEXT:    sll t1, a0, a1
-; RV64I-NEXT:    srli a0, a6, 1
-; RV64I-NEXT:    sll a6, a6, a1
-; RV64I-NEXT:    srli a5, a5, 1
-; RV64I-NEXT:    sll a4, a4, a1
-; RV64I-NEXT:    srl a1, t0, a3
-; RV64I-NEXT:    srl t0, a0, a3
-; RV64I-NEXT:    srl a3, a5, a3
-; RV64I-NEXT:    srli a5, a4, 56
-; RV64I-NEXT:    srli t2, a4, 48
-; RV64I-NEXT:    srli t3, a4, 40
-; RV64I-NEXT:    srli t4, a4, 32
-; RV64I-NEXT:    srli t5, a4, 24
-; RV64I-NEXT:    srli t6, a4, 16
-; RV64I-NEXT:    srli s0, a4, 8
-; RV64I-NEXT:    or a0, a7, a1
-; RV64I-NEXT:    or a1, t1, t0
-; RV64I-NEXT:    or a3, a6, a3
-; RV64I-NEXT:    sb t4, 4(a2)
-; RV64I-NEXT:    sb t3, 5(a2)
-; RV64I-NEXT:    sb t2, 6(a2)
-; RV64I-NEXT:    sb a5, 7(a2)
-; RV64I-NEXT:    sb a4, 0(a2)
-; RV64I-NEXT:    sb s0, 1(a2)
-; RV64I-NEXT:    sb t6, 2(a2)
-; RV64I-NEXT:    sb t5, 3(a2)
+; RV64I-NEXT:    sd a5, 48(sp)
+; RV64I-NEXT:    sd a1, 56(sp)
+; RV64I-NEXT:    ld a1, 0(t2)
+; RV64I-NEXT:    ld a3, 8(t2)
+; RV64I-NEXT:    ld a4, 16(t2)
+; RV64I-NEXT:    ld a5, 24(t2)
+; RV64I-NEXT:    xori a6, s5, 63
+; RV64I-NEXT:    sll a7, a3, a0
+; RV64I-NEXT:    srli t0, a1, 1
+; RV64I-NEXT:    sll a5, a5, a0
+; RV64I-NEXT:    srli t1, a4, 1
+; RV64I-NEXT:    sll a4, a4, a0
+; RV64I-NEXT:    srli a3, a3, 1
+; RV64I-NEXT:    sll t2, a1, a0
+; RV64I-NEXT:    srl a0, t0, a6
+; RV64I-NEXT:    srl a1, t1, a6
+; RV64I-NEXT:    srl a3, a3, a6
+; RV64I-NEXT:    srli a6, t2, 56
+; RV64I-NEXT:    srli t0, t2, 48
+; RV64I-NEXT:    srli t1, t2, 40
+; RV64I-NEXT:    srli t3, t2, 32
+; RV64I-NEXT:    srli t4, t2, 24
+; RV64I-NEXT:    srli t5, t2, 16
+; RV64I-NEXT:    srli t6, t2, 8
+; RV64I-NEXT:    or a0, a7, a0
+; RV64I-NEXT:    or a1, a5, a1
+; RV64I-NEXT:    or a3, a4, a3
+; RV64I-NEXT:    sb t3, 4(a2)
+; RV64I-NEXT:    sb t1, 5(a2)
+; RV64I-NEXT:    sb t0, 6(a2)
+; RV64I-NEXT:    sb a6, 7(a2)
+; RV64I-NEXT:    sb t2, 0(a2)
+; RV64I-NEXT:    sb t6, 1(a2)
+; RV64I-NEXT:    sb t5, 2(a2)
+; RV64I-NEXT:    sb t4, 3(a2)
 ; RV64I-NEXT:    srli a4, a3, 56
 ; RV64I-NEXT:    srli a5, a3, 48
 ; RV64I-NEXT:    srli a6, a3, 40
@@ -1989,19 +1903,17 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    sb a1, 9(a2)
 ; RV64I-NEXT:    sb a5, 10(a2)
 ; RV64I-NEXT:    sb a3, 11(a2)
-; RV64I-NEXT:    ld s0, 152(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s1, 144(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s2, 136(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s3, 128(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s4, 120(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s5, 112(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s6, 104(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s7, 96(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s8, 88(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s9, 80(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s10, 72(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s11, 64(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 160
+; RV64I-NEXT:    ld s0, 136(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 128(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 120(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 112(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s4, 104(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s5, 96(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s6, 88(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s7, 80(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s8, 72(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s9, 64(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 144
 ; RV64I-NEXT:    ret
 ;
 ; RV32I-LABEL: shl_32bytes:
@@ -2026,67 +1938,55 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV32I-NEXT:    lbu a7, 3(a0)
 ; RV32I-NEXT:    lbu a5, 4(a0)
 ; RV32I-NEXT:    lbu t0, 5(a0)
-; RV32I-NEXT:    lbu t1, 6(a0)
-; RV32I-NEXT:    lbu t2, 7(a0)
-; RV32I-NEXT:    lbu t3, 8(a0)
-; RV32I-NEXT:    lbu t4, 9(a0)
-; RV32I-NEXT:    lbu t5, 10(a0)
-; RV32I-NEXT:    lbu t6, 11(a0)
-; RV32I-NEXT:    lbu s0, 12(a0)
-; RV32I-NEXT:    lbu s2, 13(a0)
-; RV32I-NEXT:    lbu s4, 14(a0)
-; RV32I-NEXT:    lbu s5, 15(a0)
-; RV32I-NEXT:    lbu s6, 16(a0)
-; RV32I-NEXT:    lbu s7, 17(a0)
-; RV32I-NEXT:    lbu s8, 18(a0)
-; RV32I-NEXT:    lbu s9, 19(a0)
+; RV32I-NEXT:    lbu t3, 6(a0)
+; RV32I-NEXT:    lbu t6, 7(a0)
+; RV32I-NEXT:    lbu s2, 8(a0)
+; RV32I-NEXT:    lbu s3, 9(a0)
+; RV32I-NEXT:    lbu s4, 10(a0)
+; RV32I-NEXT:    lbu s5, 11(a0)
+; RV32I-NEXT:    lbu s7, 12(a0)
+; RV32I-NEXT:    lbu s8, 13(a0)
+; RV32I-NEXT:    lbu s9, 14(a0)
+; RV32I-NEXT:    lbu s10, 15(a0)
+; RV32I-NEXT:    lbu s11, 16(a0)
+; RV32I-NEXT:    lbu ra, 17(a0)
+; RV32I-NEXT:    lbu t4, 18(a0)
+; RV32I-NEXT:    lbu s0, 19(a0)
 ; RV32I-NEXT:    slli a4, a4, 8
 ; RV32I-NEXT:    slli a6, a6, 16
 ; RV32I-NEXT:    slli a7, a7, 24
 ; RV32I-NEXT:    or a3, a4, a3
-; RV32I-NEXT:    sw a3, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    or a4, a7, a6
-; RV32I-NEXT:    lbu s10, 20(a0)
-; RV32I-NEXT:    lbu s11, 21(a0)
-; RV32I-NEXT:    lbu ra, 22(a0)
-; RV32I-NEXT:    lbu a3, 23(a0)
+; RV32I-NEXT:    lbu t1, 20(a0)
+; RV32I-NEXT:    lbu t2, 21(a0)
+; RV32I-NEXT:    lbu t5, 22(a0)
+; RV32I-NEXT:    lbu s1, 23(a0)
 ; RV32I-NEXT:    slli t0, t0, 8
-; RV32I-NEXT:    slli t1, t1, 16
-; RV32I-NEXT:    slli t2, t2, 24
-; RV32I-NEXT:    slli t4, t4, 8
-; RV32I-NEXT:    slli t5, t5, 16
+; RV32I-NEXT:    slli t3, t3, 16
 ; RV32I-NEXT:    slli t6, t6, 24
-; RV32I-NEXT:    or a5, t0, a5
-; RV32I-NEXT:    or a6, t2, t1
-; RV32I-NEXT:    or a7, t4, t3
-; RV32I-NEXT:    or t0, t6, t5
-; RV32I-NEXT:    lbu s1, 24(a0)
-; RV32I-NEXT:    lbu s3, 25(a0)
-; RV32I-NEXT:    lbu t4, 26(a0)
-; RV32I-NEXT:    lbu t5, 27(a0)
-; RV32I-NEXT:    slli s2, s2, 8
+; RV32I-NEXT:    slli s3, s3, 8
 ; RV32I-NEXT:    slli s4, s4, 16
 ; RV32I-NEXT:    slli s5, s5, 24
-; RV32I-NEXT:    slli s7, s7, 8
-; RV32I-NEXT:    or t1, s2, s0
-; RV32I-NEXT:    or t2, s5, s4
-; RV32I-NEXT:    or t3, s7, s6
-; RV32I-NEXT:    lbu t6, 28(a0)
-; RV32I-NEXT:    lbu s4, 29(a0)
-; RV32I-NEXT:    lbu s5, 30(a0)
-; RV32I-NEXT:    lbu s6, 31(a0)
-; RV32I-NEXT:    slli s8, s8, 16
-; RV32I-NEXT:    slli s9, s9, 24
-; RV32I-NEXT:    slli s11, s11, 8
-; RV32I-NEXT:    slli ra, ra, 16
-; RV32I-NEXT:    slli a3, a3, 24
-; RV32I-NEXT:    or a0, s9, s8
-; RV32I-NEXT:    or s0, s11, s10
-; RV32I-NEXT:    or s2, a3, ra
-; RV32I-NEXT:    lbu a3, 0(a1)
-; RV32I-NEXT:    lbu s7, 1(a1)
-; RV32I-NEXT:    lbu s8, 2(a1)
-; RV32I-NEXT:    lbu a1, 3(a1)
+; RV32I-NEXT:    or a5, t0, a5
+; RV32I-NEXT:    or a6, t6, t3
+; RV32I-NEXT:    or a7, s3, s2
+; RV32I-NEXT:    or t0, s5, s4
+; RV32I-NEXT:    lbu t3, 24(a0)
+; RV32I-NEXT:    lbu s5, 25(a0)
+; RV32I-NEXT:    lbu s6, 26(a0)
+; RV32I-NEXT:    lbu t6, 27(a0)
+; RV32I-NEXT:    slli s8, s8, 8
+; RV32I-NEXT:    slli s9, s9, 16
+; RV32I-NEXT:    slli s10, s10, 24
+; RV32I-NEXT:    slli ra, ra, 8
+; RV32I-NEXT:    or s7, s8, s7
+; RV32I-NEXT:    or s2, s10, s9
+; RV32I-NEXT:    or s3, ra, s11
+; RV32I-NEXT:    lbu s4, 28(a0)
+; RV32I-NEXT:    lbu s8, 29(a0)
+; RV32I-NEXT:    lbu s9, 30(a0)
+; RV32I-NEXT:    lbu s10, 31(a0)
+; RV32I-NEXT:    lbu a0, 0(a1)
 ; RV32I-NEXT:    sw zero, 24(sp)
 ; RV32I-NEXT:    sw zero, 28(sp)
 ; RV32I-NEXT:    sw zero, 32(sp)
@@ -2095,88 +1995,89 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV32I-NEXT:    sw zero, 12(sp)
 ; RV32I-NEXT:    sw zero, 16(sp)
 ; RV32I-NEXT:    sw zero, 20(sp)
-; RV32I-NEXT:    slli s3, s3, 8
-; RV32I-NEXT:    or s1, s3, s1
-; RV32I-NEXT:    addi s3, sp, 40
 ; RV32I-NEXT:    slli t4, t4, 16
-; RV32I-NEXT:    slli t5, t5, 24
-; RV32I-NEXT:    slli s4, s4, 8
-; RV32I-NEXT:    slli s5, s5, 16
-; RV32I-NEXT:    slli s6, s6, 24
-; RV32I-NEXT:    slli s7, s7, 8
-; RV32I-NEXT:    slli s8, s8, 16
-; RV32I-NEXT:    slli a1, a1, 24
-; RV32I-NEXT:    or t4, t5, t4
-; RV32I-NEXT:    or t5, s4, t6
-; RV32I-NEXT:    or t6, s6, s5
-; RV32I-NEXT:    or a3, s7, a3
-; RV32I-NEXT:    or a1, a1, s8
-; RV32I-NEXT:    lw s4, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    or a4, a4, s4
-; RV32I-NEXT:    or a5, a6, a5
-; RV32I-NEXT:    or a6, t0, a7
-; RV32I-NEXT:    or a7, t2, t1
-; RV32I-NEXT:    or t0, a0, t3
-; RV32I-NEXT:    or t1, s2, s0
-; RV32I-NEXT:    or t2, t4, s1
-; RV32I-NEXT:    or t3, t6, t5
-; RV32I-NEXT:    or a0, a1, a3
-; RV32I-NEXT:    sw t0, 56(sp)
-; RV32I-NEXT:    sw t1, 60(sp)
-; RV32I-NEXT:    sw t2, 64(sp)
-; RV32I-NEXT:    sw t3, 68(sp)
-; RV32I-NEXT:    sw a4, 40(sp)
-; RV32I-NEXT:    sw a5, 44(sp)
-; RV32I-NEXT:    sw a6, 48(sp)
-; RV32I-NEXT:    sw a7, 52(sp)
+; RV32I-NEXT:    slli s0, s0, 24
+; RV32I-NEXT:    or t4, s0, t4
+; RV32I-NEXT:    addi s0, sp, 40
+; RV32I-NEXT:    slli t2, t2, 8
+; RV32I-NEXT:    slli t5, t5, 16
+; RV32I-NEXT:    slli s1, s1, 24
+; RV32I-NEXT:    slli s5, s5, 8
+; RV32I-NEXT:    slli s6, s6, 16
+; RV32I-NEXT:    slli t6, t6, 24
+; RV32I-NEXT:    slli s8, s8, 8
+; RV32I-NEXT:    slli s9, s9, 16
+; RV32I-NEXT:    slli s10, s10, 24
+; RV32I-NEXT:    or t1, t2, t1
 ; RV32I-NEXT:    srli a1, a0, 3
-; RV32I-NEXT:    andi a3, a0, 31
-; RV32I-NEXT:    andi a4, a1, 28
-; RV32I-NEXT:    xori a1, a3, 31
-; RV32I-NEXT:    sub a3, s3, a4
-; RV32I-NEXT:    lw a4, 0(a3)
-; RV32I-NEXT:    lw a5, 4(a3)
-; RV32I-NEXT:    lw a6, 8(a3)
-; RV32I-NEXT:    lw a7, 12(a3)
-; RV32I-NEXT:    lw t0, 16(a3)
-; RV32I-NEXT:    lw t1, 20(a3)
-; RV32I-NEXT:    lw t2, 24(a3)
-; RV32I-NEXT:    lw a3, 28(a3)
-; RV32I-NEXT:    sll t3, a5, a0
-; RV32I-NEXT:    srli t4, a4, 1
-; RV32I-NEXT:    sll t5, a7, a0
-; RV32I-NEXT:    srli t6, a6, 1
-; RV32I-NEXT:    sll a6, a6, a0
-; RV32I-NEXT:    srli a5, a5, 1
-; RV32I-NEXT:    sll s0, t1, a0
-; RV32I-NEXT:    srli s1, t0, 1
-; RV32I-NEXT:    sll t0, t0, a0
-; RV32I-NEXT:    srli a7, a7, 1
-; RV32I-NEXT:    sll s2, a3, a0
-; RV32I-NEXT:    srli a3, t2, 1
+; RV32I-NEXT:    or t2, s1, t5
+; RV32I-NEXT:    andi t5, a0, 31
+; RV32I-NEXT:    or t3, s5, t3
+; RV32I-NEXT:    or t6, t6, s6
+; RV32I-NEXT:    or s1, s8, s4
+; RV32I-NEXT:    or s4, s10, s9
+; RV32I-NEXT:    andi s5, a1, 28
+; RV32I-NEXT:    xori a1, t5, 31
+; RV32I-NEXT:    or a3, a4, a3
+; RV32I-NEXT:    or a4, a6, a5
+; RV32I-NEXT:    or a5, t0, a7
+; RV32I-NEXT:    or a6, s2, s7
+; RV32I-NEXT:    or a7, t4, s3
+; RV32I-NEXT:    or t0, t2, t1
+; RV32I-NEXT:    or t1, t6, t3
+; RV32I-NEXT:    or t2, s4, s1
+; RV32I-NEXT:    sub t3, s0, s5
+; RV32I-NEXT:    sw a7, 56(sp)
+; RV32I-NEXT:    sw t0, 60(sp)
+; RV32I-NEXT:    sw t1, 64(sp)
+; RV32I-NEXT:    sw t2, 68(sp)
+; RV32I-NEXT:    sw a3, 40(sp)
+; RV32I-NEXT:    sw a4, 44(sp)
+; RV32I-NEXT:    sw a5, 48(sp)
+; RV32I-NEXT:    sw a6, 52(sp)
+; RV32I-NEXT:    lw a3, 0(t3)
+; RV32I-NEXT:    lw a4, 4(t3)
+; RV32I-NEXT:    lw a5, 8(t3)
+; RV32I-NEXT:    lw a6, 12(t3)
+; RV32I-NEXT:    lw a7, 16(t3)
+; RV32I-NEXT:    lw t0, 20(t3)
+; RV32I-NEXT:    lw t1, 24(t3)
+; RV32I-NEXT:    lw t2, 28(t3)
+; RV32I-NEXT:    sll t3, a4, a0
+; RV32I-NEXT:    srli t4, a3, 1
+; RV32I-NEXT:    sll t5, a6, a0
+; RV32I-NEXT:    srli t6, a5, 1
+; RV32I-NEXT:    sll a5, a5, a0
+; RV32I-NEXT:    srli a4, a4, 1
+; RV32I-NEXT:    sll s0, t0, a0
+; RV32I-NEXT:    srli s1, a7, 1
+; RV32I-NEXT:    sll a7, a7, a0
+; RV32I-NEXT:    srli a6, a6, 1
 ; RV32I-NEXT:    sll t2, t2, a0
-; RV32I-NEXT:    srli t1, t1, 1
-; RV32I-NEXT:    sll s3, a4, a0
+; RV32I-NEXT:    srli s2, t1, 1
+; RV32I-NEXT:    sll t1, t1, a0
+; RV32I-NEXT:    srli t0, t0, 1
+; RV32I-NEXT:    sll s3, a3, a0
 ; RV32I-NEXT:    srl a0, t4, a1
-; RV32I-NEXT:    srl a4, t6, a1
-; RV32I-NEXT:    srl a5, a5, a1
+; RV32I-NEXT:    srl a3, t6, a1
+; RV32I-NEXT:    srl a4, a4, a1
 ; RV32I-NEXT:    srl t4, s1, a1
-; RV32I-NEXT:    srl a7, a7, a1
-; RV32I-NEXT:    srl t6, a3, a1
-; RV32I-NEXT:    srl t1, t1, a1
+; RV32I-NEXT:    srl a6, a6, a1
+; RV32I-NEXT:    srl t6, s2, a1
+; RV32I-NEXT:    srl t0, t0, a1
 ; RV32I-NEXT:    srli s1, s3, 24
-; RV32I-NEXT:    srli s4, s3, 16
-; RV32I-NEXT:    srli s5, s3, 8
+; RV32I-NEXT:    srli s2, s3, 16
+; RV32I-NEXT:    srli s4, s3, 8
 ; RV32I-NEXT:    or a0, t3, a0
-; RV32I-NEXT:    or a1, t5, a4
-; RV32I-NEXT:    or a3, a6, a5
+; RV32I-NEXT:    or a1, t5, a3
+; RV32I-NEXT:    or a3, a5, a4
 ; RV32I-NEXT:    or a4, s0, t4
-; RV32I-NEXT:    or a5, t0, a7
-; RV32I-NEXT:    or a6, s2, t6
-; RV32I-NEXT:    or a7, t2, t1
+; RV32I-NEXT:    or a5, a7, a6
+; RV32I-NEXT:    or a6, t2, t6
+; RV32I-NEXT:    or a7, t1, t0
 ; RV32I-NEXT:    sb s3, 0(a2)
-; RV32I-NEXT:    sb s5, 1(a2)
-; RV32I-NEXT:    sb s4, 2(a2)
+; RV32I-NEXT:    sb s4, 1(a2)
+; RV32I-NEXT:    sb s2, 2(a2)
 ; RV32I-NEXT:    sb s1, 3(a2)
 ; RV32I-NEXT:    srli t0, a7, 24
 ; RV32I-NEXT:    srli t1, a7, 16
@@ -2251,19 +2152,17 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-LABEL: ashr_32bytes:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -160
-; RV64I-NEXT:    sd s0, 152(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s1, 144(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s2, 136(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s3, 128(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s4, 120(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s5, 112(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s6, 104(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s7, 96(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s8, 88(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s9, 80(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s10, 72(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s11, 64(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    addi sp, sp, -144
+; RV64I-NEXT:    sd s0, 136(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 128(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 120(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 112(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s4, 104(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s5, 96(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s6, 88(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s7, 80(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s8, 72(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s9, 64(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lbu a3, 0(a0)
 ; RV64I-NEXT:    lbu a4, 1(a0)
 ; RV64I-NEXT:    lbu a5, 2(a0)
@@ -2280,144 +2179,123 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    lbu s1, 13(a0)
 ; RV64I-NEXT:    lbu s2, 14(a0)
 ; RV64I-NEXT:    lbu s3, 15(a0)
+; RV64I-NEXT:    slli a4, a4, 8
+; RV64I-NEXT:    slli a5, a5, 16
+; RV64I-NEXT:    slli a6, a6, 24
+; RV64I-NEXT:    or a3, a4, a3
+; RV64I-NEXT:    or a4, a6, a5
 ; RV64I-NEXT:    lbu s4, 16(a0)
 ; RV64I-NEXT:    lbu s5, 17(a0)
 ; RV64I-NEXT:    lbu s6, 18(a0)
 ; RV64I-NEXT:    lbu s7, 19(a0)
-; RV64I-NEXT:    slli a4, a4, 8
-; RV64I-NEXT:    slli a5, a5, 16
-; RV64I-NEXT:    slli a6, a6, 24
 ; RV64I-NEXT:    slli t0, t0, 8
 ; RV64I-NEXT:    slli t1, t1, 16
 ; RV64I-NEXT:    slli t2, t2, 24
-; RV64I-NEXT:    or a3, a4, a3
-; RV64I-NEXT:    or a4, a6, a5
-; RV64I-NEXT:    or a5, t0, a7
-; RV64I-NEXT:    or a6, t2, t1
-; RV64I-NEXT:    lbu s8, 20(a0)
-; RV64I-NEXT:    lbu s9, 21(a0)
-; RV64I-NEXT:    lbu s10, 22(a0)
-; RV64I-NEXT:    lbu s11, 23(a0)
 ; RV64I-NEXT:    slli t4, t4, 8
 ; RV64I-NEXT:    slli t5, t5, 16
 ; RV64I-NEXT:    slli t6, t6, 24
+; RV64I-NEXT:    or a5, t0, a7
+; RV64I-NEXT:    or a6, t2, t1
+; RV64I-NEXT:    or a7, t4, t3
+; RV64I-NEXT:    or t0, t6, t5
+; RV64I-NEXT:    lbu t5, 20(a0)
+; RV64I-NEXT:    lbu t6, 21(a0)
+; RV64I-NEXT:    lbu s8, 22(a0)
+; RV64I-NEXT:    lbu s9, 23(a0)
 ; RV64I-NEXT:    slli s1, s1, 8
 ; RV64I-NEXT:    slli s2, s2, 16
 ; RV64I-NEXT:    slli s3, s3, 24
-; RV64I-NEXT:    or a7, t4, t3
-; RV64I-NEXT:    or t0, t6, t5
-; RV64I-NEXT:    or t1, s1, s0
-; RV64I-NEXT:    or t2, s3, s2
-; RV64I-NEXT:    lbu t6, 24(a0)
-; RV64I-NEXT:    lbu s0, 25(a0)
-; RV64I-NEXT:    lbu s1, 26(a0)
-; RV64I-NEXT:    lbu s2, 27(a0)
 ; RV64I-NEXT:    slli s5, s5, 8
 ; RV64I-NEXT:    slli s6, s6, 16
 ; RV64I-NEXT:    slli s7, s7, 24
-; RV64I-NEXT:    slli s9, s9, 8
+; RV64I-NEXT:    or t1, s1, s0
+; RV64I-NEXT:    or t2, s3, s2
 ; RV64I-NEXT:    or t3, s5, s4
 ; RV64I-NEXT:    or t4, s7, s6
-; RV64I-NEXT:    or t5, s9, s8
-; RV64I-NEXT:    lbu s3, 28(a0)
+; RV64I-NEXT:    lbu s0, 24(a0)
+; RV64I-NEXT:    lbu s1, 25(a0)
+; RV64I-NEXT:    lbu s2, 26(a0)
+; RV64I-NEXT:    lbu s3, 27(a0)
+; RV64I-NEXT:    slli t6, t6, 8
+; RV64I-NEXT:    slli s8, s8, 16
+; RV64I-NEXT:    slli s9, s9, 24
+; RV64I-NEXT:    slli s1, s1, 8
+; RV64I-NEXT:    or t5, t6, t5
+; RV64I-NEXT:    or t6, s9, s8
+; RV64I-NEXT:    or s0, s1, s0
+; RV64I-NEXT:    lbu s1, 28(a0)
 ; RV64I-NEXT:    lbu s4, 29(a0)
 ; RV64I-NEXT:    lbu s5, 30(a0)
 ; RV64I-NEXT:    lbu s6, 31(a0)
-; RV64I-NEXT:    slli s10, s10, 16
-; RV64I-NEXT:    slli s11, s11, 24
-; RV64I-NEXT:    slli s0, s0, 8
-; RV64I-NEXT:    slli s1, s1, 16
-; RV64I-NEXT:    slli s2, s2, 24
+; RV64I-NEXT:    lbu a0, 0(a1)
+; RV64I-NEXT:    slli s2, s2, 16
+; RV64I-NEXT:    slli s3, s3, 24
+; RV64I-NEXT:    or a1, s3, s2
+; RV64I-NEXT:    mv s2, sp
 ; RV64I-NEXT:    slli s4, s4, 8
-; RV64I-NEXT:    or a0, s11, s10
-; RV64I-NEXT:    or t6, s0, t6
-; RV64I-NEXT:    or s0, s2, s1
-; RV64I-NEXT:    or s1, s4, s3
-; RV64I-NEXT:    lbu s2, 0(a1)
-; RV64I-NEXT:    lbu s3, 1(a1)
-; RV64I-NEXT:    lbu s4, 2(a1)
-; RV64I-NEXT:    lbu s7, 3(a1)
 ; RV64I-NEXT:    slli s5, s5, 16
 ; RV64I-NEXT:    slli s6, s6, 24
-; RV64I-NEXT:    slli s3, s3, 8
-; RV64I-NEXT:    slli s4, s4, 16
-; RV64I-NEXT:    slli s7, s7, 24
-; RV64I-NEXT:    or s5, s6, s5
-; RV64I-NEXT:    or s2, s3, s2
-; RV64I-NEXT:    or s3, s7, s4
-; RV64I-NEXT:    lbu s4, 5(a1)
-; RV64I-NEXT:    lbu s6, 4(a1)
-; RV64I-NEXT:    lbu s7, 6(a1)
-; RV64I-NEXT:    lbu a1, 7(a1)
-; RV64I-NEXT:    slli s4, s4, 8
-; RV64I-NEXT:    or s4, s4, s6
-; RV64I-NEXT:    slli s7, s7, 16
-; RV64I-NEXT:    slli a1, a1, 24
-; RV64I-NEXT:    or a1, a1, s7
-; RV64I-NEXT:    mv s6, sp
+; RV64I-NEXT:    or s1, s4, s1
+; RV64I-NEXT:    srli s3, a0, 3
+; RV64I-NEXT:    or s4, s6, s5
+; RV64I-NEXT:    andi s5, a0, 63
+; RV64I-NEXT:    andi s3, s3, 24
+; RV64I-NEXT:    xori s5, s5, 63
 ; RV64I-NEXT:    or a3, a4, a3
 ; RV64I-NEXT:    or a4, a6, a5
 ; RV64I-NEXT:    or a5, t0, a7
 ; RV64I-NEXT:    or a6, t2, t1
 ; RV64I-NEXT:    or a7, t4, t3
-; RV64I-NEXT:    or a0, a0, t5
-; RV64I-NEXT:    or t0, s0, t6
-; RV64I-NEXT:    or t1, s5, s1
-; RV64I-NEXT:    or t2, s3, s2
-; RV64I-NEXT:    or a1, a1, s4
+; RV64I-NEXT:    or t0, t6, t5
+; RV64I-NEXT:    or a1, a1, s0
+; RV64I-NEXT:    or t1, s4, s1
+; RV64I-NEXT:    add s2, s2, s3
 ; RV64I-NEXT:    slli a4, a4, 32
 ; RV64I-NEXT:    slli a6, a6, 32
-; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    slli t3, t1, 32
-; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    slli t0, t0, 32
+; RV64I-NEXT:    slli t2, t1, 32
 ; RV64I-NEXT:    sraiw t1, t1, 31
 ; RV64I-NEXT:    or a3, a4, a3
 ; RV64I-NEXT:    or a4, a6, a5
-; RV64I-NEXT:    or a0, a0, a7
-; RV64I-NEXT:    or a5, t3, t0
-; RV64I-NEXT:    or a1, a1, t2
+; RV64I-NEXT:    or a5, t0, a7
+; RV64I-NEXT:    or a1, t2, a1
 ; RV64I-NEXT:    sd t1, 32(sp)
 ; RV64I-NEXT:    sd t1, 40(sp)
 ; RV64I-NEXT:    sd t1, 48(sp)
 ; RV64I-NEXT:    sd t1, 56(sp)
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    sd a4, 8(sp)
-; RV64I-NEXT:    sd a0, 16(sp)
-; RV64I-NEXT:    sd a5, 24(sp)
-; RV64I-NEXT:    srli a0, a1, 3
-; RV64I-NEXT:    andi a3, a1, 63
-; RV64I-NEXT:    andi a0, a0, 24
-; RV64I-NEXT:    xori a3, a3, 63
-; RV64I-NEXT:    add a0, s6, a0
-; RV64I-NEXT:    ld a4, 8(a0)
-; RV64I-NEXT:    ld a5, 16(a0)
-; RV64I-NEXT:    ld a6, 0(a0)
-; RV64I-NEXT:    ld a0, 24(a0)
-; RV64I-NEXT:    srl a7, a4, a1
+; RV64I-NEXT:    sd a5, 16(sp)
+; RV64I-NEXT:    sd a1, 24(sp)
+; RV64I-NEXT:    ld a1, 8(s2)
+; RV64I-NEXT:    ld a3, 16(s2)
+; RV64I-NEXT:    ld a4, 0(s2)
+; RV64I-NEXT:    ld a5, 24(s2)
+; RV64I-NEXT:    srl a6, a1, a0
+; RV64I-NEXT:    slli a7, a3, 1
+; RV64I-NEXT:    srl a4, a4, a0
+; RV64I-NEXT:    slli a1, a1, 1
+; RV64I-NEXT:    srl a3, a3, a0
 ; RV64I-NEXT:    slli t0, a5, 1
-; RV64I-NEXT:    srl a6, a6, a1
-; RV64I-NEXT:    slli a4, a4, 1
-; RV64I-NEXT:    srl a5, a5, a1
-; RV64I-NEXT:    slli t1, a0, 1
-; RV64I-NEXT:    sra t2, a0, a1
-; RV64I-NEXT:    sll a0, t0, a3
-; RV64I-NEXT:    sll a1, a4, a3
-; RV64I-NEXT:    sll a3, t1, a3
-; RV64I-NEXT:    srli a4, t2, 56
-; RV64I-NEXT:    srli t0, t2, 48
-; RV64I-NEXT:    srli t1, t2, 40
-; RV64I-NEXT:    srli t3, t2, 32
-; RV64I-NEXT:    srli t4, t2, 24
-; RV64I-NEXT:    srli t5, t2, 16
-; RV64I-NEXT:    srli t6, t2, 8
-; RV64I-NEXT:    or a0, a7, a0
-; RV64I-NEXT:    or a1, a6, a1
-; RV64I-NEXT:    or a3, a5, a3
+; RV64I-NEXT:    sra a5, a5, a0
+; RV64I-NEXT:    sll a0, a7, s5
+; RV64I-NEXT:    sll a1, a1, s5
+; RV64I-NEXT:    sll a7, t0, s5
+; RV64I-NEXT:    srli t0, a5, 56
+; RV64I-NEXT:    srli t1, a5, 48
+; RV64I-NEXT:    srli t2, a5, 40
+; RV64I-NEXT:    srli t3, a5, 32
+; RV64I-NEXT:    srli t4, a5, 24
+; RV64I-NEXT:    srli t5, a5, 16
+; RV64I-NEXT:    srli t6, a5, 8
+; RV64I-NEXT:    or a0, a6, a0
+; RV64I-NEXT:    or a1, a4, a1
+; RV64I-NEXT:    or a3, a3, a7
 ; RV64I-NEXT:    sb t3, 28(a2)
-; RV64I-NEXT:    sb t1, 29(a2)
-; RV64I-NEXT:    sb t0, 30(a2)
-; RV64I-NEXT:    sb a4, 31(a2)
-; RV64I-NEXT:    sb t2, 24(a2)
+; RV64I-NEXT:    sb t2, 29(a2)
+; RV64I-NEXT:    sb t1, 30(a2)
+; RV64I-NEXT:    sb t0, 31(a2)
+; RV64I-NEXT:    sb a5, 24(a2)
 ; RV64I-NEXT:    sb t6, 25(a2)
 ; RV64I-NEXT:    sb t5, 26(a2)
 ; RV64I-NEXT:    sb t4, 27(a2)
@@ -2438,47 +2316,45 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV64I-NEXT:    srli s3, a0, 56
 ; RV64I-NEXT:    srli s4, a0, 48
 ; RV64I-NEXT:    srli s5, a0, 40
-; RV64I-NEXT:    srli s6, a0, 32
 ; RV64I-NEXT:    sb a7, 20(a2)
 ; RV64I-NEXT:    sb a6, 21(a2)
 ; RV64I-NEXT:    sb a5, 22(a2)
 ; RV64I-NEXT:    sb a4, 23(a2)
-; RV64I-NEXT:    srli a4, a0, 24
+; RV64I-NEXT:    srli a4, a0, 32
 ; RV64I-NEXT:    sb a3, 16(a2)
 ; RV64I-NEXT:    sb t2, 17(a2)
 ; RV64I-NEXT:    sb t1, 18(a2)
 ; RV64I-NEXT:    sb t0, 19(a2)
-; RV64I-NEXT:    srli a3, a0, 16
+; RV64I-NEXT:    srli a3, a0, 24
 ; RV64I-NEXT:    sb t6, 4(a2)
 ; RV64I-NEXT:    sb t5, 5(a2)
 ; RV64I-NEXT:    sb t4, 6(a2)
 ; RV64I-NEXT:    sb t3, 7(a2)
-; RV64I-NEXT:    srli a5, a0, 8
+; RV64I-NEXT:    srli a5, a0, 16
 ; RV64I-NEXT:    sb a1, 0(a2)
 ; RV64I-NEXT:    sb s2, 1(a2)
 ; RV64I-NEXT:    sb s1, 2(a2)
 ; RV64I-NEXT:    sb s0, 3(a2)
-; RV64I-NEXT:    sb s6, 12(a2)
+; RV64I-NEXT:    srli a1, a0, 8
+; RV64I-NEXT:    sb a4, 12(a2)
 ; RV64I-NEXT:    sb s5, 13(a2)
 ; RV64I-NEXT:    sb s4, 14(a2)
 ; RV64I-NEXT:    sb s3, 15(a2)
 ; RV64I-NEXT:    sb a0, 8(a2)
-; RV64I-NEXT:    sb a5, 9(a2)
-; RV64I-NEXT:    sb a3, 10(a2)
-; RV64I-NEXT:    sb a4, 11(a2)
-; RV64I-NEXT:    ld s0, 152(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s1, 144(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s2, 136(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s3, 128(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s4, 120(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s5, 112(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s6, 104(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s7, 96(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s8, 88(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s9, 80(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s10, 72(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s11, 64(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 160
+; RV64I-NEXT:    sb a1, 9(a2)
+; RV64I-NEXT:    sb a5, 10(a2)
+; RV64I-NEXT:    sb a3, 11(a2)
+; RV64I-NEXT:    ld s0, 136(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 128(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 120(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 112(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s4, 104(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s5, 96(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s6, 88(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s7, 80(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s8, 72(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s9, 64(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 144
 ; RV64I-NEXT:    ret
 ;
 ; RV32I-LABEL: ashr_32bytes:
@@ -2503,159 +2379,148 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
 ; RV32I-NEXT:    lbu a7, 3(a0)
 ; RV32I-NEXT:    lbu a5, 4(a0)
 ; RV32I-NEXT:    lbu t0, 5(a0)
-; RV32I-NEXT:    lbu t1, 6(a0)
-; RV32I-NEXT:    lbu t2, 7(a0)
-; RV32I-NEXT:    lbu t3, 8(a0)
-; RV32I-NEXT:    lbu t4, 9(a0)
-; RV32I-NEXT:    lbu t5, 10(a0)
-; RV32I-NEXT:    lbu t6, 11(a0)
-; RV32I-NEXT:    lbu s0, 12(a0)
-; RV32I-NEXT:    lbu s1, 13(a0)
-; RV32I-NEXT:    lbu s2, 14(a0)
-; RV32I-NEXT:    lbu s3, 15(a0)
-; RV32I-NEXT:    lbu s4, 16(a0)
-; RV32I-NEXT:    lbu s5, 17(a0)
-; RV32I-NEXT:    lbu s6, 18(a0)
-; RV32I-NEXT:    lbu s7, 19(a0)
+; RV32I-NEXT:    lbu t3, 6(a0)
+; RV32I-NEXT:    lbu t4, 7(a0)
+; RV32I-NEXT:    lbu t6, 8(a0)
+; RV32I-NEXT:    lbu s0, 9(a0)
+; RV32I-NEXT:    lbu s4, 10(a0)
+; RV32I-NEXT:    lbu s5, 11(a0)
+; RV32I-NEXT:    lbu s6, 12(a0)
+; RV32I-NEXT:    lbu s7, 13(a0)
+; RV32I-NEXT:    lbu s8, 14(a0)
+; RV32I-NEXT:    lbu s9, 15(a0)
+; RV32I-NEXT:    lbu s10, 16(a0)
+; RV32I-NEXT:    lbu s11, 17(a0)
+; RV32I-NEXT:    lbu s2, 18(a0)
+; RV32I-NEXT:    lbu s3, 19(a0)
 ; RV32I-NEXT:    slli a4, a4, 8
 ; RV32I-NEXT:    slli a6, a6, 16
 ; RV32I-NEXT:    slli a7, a7, 24
 ; RV32I-NEXT:    or a3, a4, a3
-; RV32I-NEXT:    sw a3, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    or a4, a7, a6
-; RV32I-NEXT:    lbu s8, 20(a0)
-; RV32I-NEXT:    lbu s9, 21(a0)
-; RV32I-NEXT:    lbu s10, 22(a0)
-; RV32I-NEXT:    lbu s11, 23(a0)
+; RV32I-NEXT:    lbu t1, 20(a0)
+; RV32I-NEXT:    lbu t2, 21(a0)
+; RV32I-NEXT:    lbu t5, 22(a0)
+; RV32I-NEXT:    lbu s1, 23(a0)
 ; RV32I-NEXT:    slli t0, t0, 8
-; RV32I-NEXT:    slli t1, t1, 16
-; RV32I-NEXT:    slli t2, t2, 24
-; RV32I-NEXT:    slli t4, t4, 8
-; RV32I-NEXT:    slli t5, t5, 16
-; RV32I-NEXT:    slli t6, t6, 24
+; RV32I-NEXT:    slli t3, t3, 16
+; RV32I-NEXT:    slli t4, t4, 24
+; RV32I-NEXT:    slli s0, s0, 8
+; RV32I-NEXT:    slli s4, s4, 16
+; RV32I-NEXT:    slli s5, s5, 24
 ; RV32I-NEXT:    or a5, t0, a5
-; RV32I-NEXT:    or a6, t2, t1
-; RV32I-NEXT:    or a7, t4, t3
-; RV32I-NEXT:    or t0, t6, t5
-; RV32I-NEXT:    lbu ra, 24(a0)
-; RV32I-NEXT:    lbu a3, 25(a0)
-; RV32I-NEXT:    lbu t4, 26(a0)
-; RV32I-NEXT:    lbu t5, 27(a0)
-; RV32I-NEXT:    slli s1, s1, 8
+; RV32I-NEXT:    or a6, t4, t3
+; RV32I-NEXT:    or a7, s0, t6
+; RV32I-NEXT:    or t0, s5, s4
+; RV32I-NEXT:    lbu t3, 24(a0)
+; RV32I-NEXT:    lbu s4, 25(a0)
+; RV32I-NEXT:    lbu s5, 26(a0)
+; RV32I-NEXT:    lbu ra, 27(a0)
+; RV32I-NEXT:    slli s7, s7, 8
+; RV32I-NEXT:    slli s8, s8, 16
+; RV32I-NEXT:    slli s9, s9, 24
+; RV32I-NEXT:    slli s11, s11, 8
+; RV32I-NEXT:    or t4, s7, s6
+; RV32I-NEXT:    or t6, s9, s8
+; RV32I-NEXT:    or s0, s11, s10
+; RV32I-NEXT:    lbu s6, 28(a0)
+; RV32I-NEXT:    lbu s7, 29(a0)
+; RV32I-NEXT:    lbu s8, 30(a0)
+; RV32I-NEXT:    lbu s9, 31(a0)
+; RV32I-NEXT:    lbu a0, 0(a1)
 ; RV32I-NEXT:    slli s2, s2, 16
 ; RV32I-NEXT:    slli s3, s3, 24
-; RV32I-NEXT:    slli s5, s5, 8
-; RV32I-NEXT:    or t1, s1, s0
-; RV32I-NEXT:    or t2, s3, s2
-; RV32I-NEXT:    or t3, s5, s4
-; RV32I-NEXT:    lbu t6, 28(a0)
-; RV32I-NEXT:    lbu s0, 29(a0)
-; RV32I-NEXT:    lbu s1, 30(a0)
-; RV32I-NEXT:    lbu a0, 31(a0)
-; RV32I-NEXT:    slli s6, s6, 16
-; RV32I-NEXT:    slli s7, s7, 24
-; RV32I-NEXT:    slli s9, s9, 8
-; RV32I-NEXT:    slli s10, s10, 16
-; RV32I-NEXT:    slli s11, s11, 24
-; RV32I-NEXT:    or s2, s7, s6
-; RV32I-NEXT:    or s3, s9, s8
-; RV32I-NEXT:    or s4, s11, s10
-; RV32I-NEXT:    lbu s5, 0(a1)
-; RV32I-NEXT:    lbu s6, 1(a1)
-; RV32I-NEXT:    lbu s7, 2(a1)
-; RV32I-NEXT:    lbu a1, 3(a1)
-; RV32I-NEXT:    slli a3, a3, 8
-; RV32I-NEXT:    or a3, a3, ra
-; RV32I-NEXT:    addi s8, sp, 8
-; RV32I-NEXT:    slli t4, t4, 16
-; RV32I-NEXT:    slli t5, t5, 24
-; RV32I-NEXT:    slli s0, s0, 8
-; RV32I-NEXT:    slli s1, s1, 16
-; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    slli s6, s6, 8
-; RV32I-NEXT:    slli s7, s7, 16
-; RV32I-NEXT:    slli a1, a1, 24
-; RV32I-NEXT:    or t4, t5, t4
-; RV32I-NEXT:    or t5, s0, t6
-; RV32I-NEXT:    or s1, a0, s1
-; RV32I-NEXT:    or t6, s6, s5
-; RV32I-NEXT:    or a1, a1, s7
-; RV32I-NEXT:    srai s0, a0, 31
-; RV32I-NEXT:    lw a0, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    or a4, a4, a0
-; RV32I-NEXT:    or a5, a6, a5
-; RV32I-NEXT:    or a6, t0, a7
-; RV32I-NEXT:    or a7, t2, t1
-; RV32I-NEXT:    or t0, s2, t3
-; RV32I-NEXT:    or t1, s4, s3
-; RV32I-NEXT:    or a3, t4, a3
+; RV32I-NEXT:    or s2, s3, s2
+; RV32I-NEXT:    addi s3, sp, 8
+; RV32I-NEXT:    slli t2, t2, 8
+; RV32I-NEXT:    slli t5, t5, 16
+; RV32I-NEXT:    slli s1, s1, 24
+; RV32I-NEXT:    slli s4, s4, 8
+; RV32I-NEXT:    slli s5, s5, 16
+; RV32I-NEXT:    slli ra, ra, 24
+; RV32I-NEXT:    slli s7, s7, 8
+; RV32I-NEXT:    slli s8, s8, 16
+; RV32I-NEXT:    slli s9, s9, 24
+; RV32I-NEXT:    or t1, t2, t1
+; RV32I-NEXT:    srli a1, a0, 3
 ; RV32I-NEXT:    or t2, s1, t5
-; RV32I-NEXT:    or a0, a1, t6
-; RV32I-NEXT:    sw s0, 56(sp)
-; RV32I-NEXT:    sw s0, 60(sp)
-; RV32I-NEXT:    sw s0, 64(sp)
-; RV32I-NEXT:    sw s0, 68(sp)
-; RV32I-NEXT:    sw s0, 40(sp)
-; RV32I-NEXT:    sw s0, 44(sp)
-; RV32I-NEXT:    sw s0, 48(sp)
-; RV32I-NEXT:    sw s0, 52(sp)
-; RV32I-NEXT:    sw t0, 24(sp)
-; RV32I-NEXT:    sw t1, 28(sp)
-; RV32I-NEXT:    sw a3, 32(sp)
+; RV32I-NEXT:    andi t5, a0, 31
+; RV32I-NEXT:    or t3, s4, t3
+; RV32I-NEXT:    or s1, ra, s5
+; RV32I-NEXT:    or s4, s7, s6
+; RV32I-NEXT:    or s5, s9, s8
+; RV32I-NEXT:    srai s6, s9, 31
+; RV32I-NEXT:    andi s7, a1, 28
+; RV32I-NEXT:    xori a1, t5, 31
+; RV32I-NEXT:    or a3, a4, a3
+; RV32I-NEXT:    or a4, a6, a5
+; RV32I-NEXT:    or a5, t0, a7
+; RV32I-NEXT:    or a6, t6, t4
+; RV32I-NEXT:    or a7, s2, s0
+; RV32I-NEXT:    or t0, t2, t1
+; RV32I-NEXT:    or t1, s1, t3
+; RV32I-NEXT:    or t2, s5, s4
+; RV32I-NEXT:    sw s6, 56(sp)
+; RV32I-NEXT:    sw s6, 60(sp)
+; RV32I-NEXT:    sw s6, 64(sp)
+; RV32I-NEXT:    sw s6, 68(sp)
+; RV32I-NEXT:    sw s6, 40(sp)
+; RV32I-NEXT:    sw s6, 44(sp)
+; RV32I-NEXT:    sw s6, 48(sp)
+; RV32I-NEXT:    sw s6, 52(sp)
+; RV32I-NEXT:    add s3, s3, s7
+; RV32I-NEXT:    sw a7, 24(sp)
+; RV32I-NEXT:    sw t0, 28(sp)
+; RV32I-NEXT:    sw t1, 32(sp)
 ; RV32I-NEXT:    sw t2, 36(sp)
-; RV32I-NEXT:    sw a4, 8(sp)
-; RV32I-NEXT:    sw a5, 12(sp)
-; RV32I-NEXT:    sw a6, 16(sp)
-; RV32I-NEXT:    sw a7, 20(sp)
-; RV32I-NEXT:    srli a1, a0, 3
-; RV32I-NEXT:    andi a3, a0, 31
-; RV32I-NEXT:    andi a4, a1, 28
-; RV32I-NEXT:    xori a1, a3, 31
-; RV32I-NEXT:    add a4, s8, a4
-; RV32I-NEXT:    lw a3, 0(a4)
-; RV32I-NEXT:    lw a5, 4(a4)
-; RV32I-NEXT:    lw a6, 8(a4)
-; RV32I-NEXT:    lw a7, 12(a4)
-; RV32I-NEXT:    lw t0, 16(a4)
-; RV32I-NEXT:    lw t1, 20(a4)
-; RV32I-NEXT:    lw t2, 24(a4)
-; RV32I-NEXT:    lw a4, 28(a4)
-; RV32I-NEXT:    srl t3, a5, a0
-; RV32I-NEXT:    slli t4, a6, 1
+; RV32I-NEXT:    sw a3, 8(sp)
+; RV32I-NEXT:    sw a4, 12(sp)
+; RV32I-NEXT:    sw a5, 16(sp)
+; RV32I-NEXT:    sw a6, 20(sp)
+; RV32I-NEXT:    lw a3, 0(s3)
+; RV32I-NEXT:    lw a4, 4(s3)
+; RV32I-NEXT:    lw a5, 8(s3)
+; RV32I-NEXT:    lw a6, 12(s3)
+; RV32I-NEXT:    lw a7, 16(s3)
+; RV32I-NEXT:    lw t0, 20(s3)
+; RV32I-NEXT:    lw t1, 24(s3)
+; RV32I-NEXT:    lw t2, 28(s3)
+; RV32I-NEXT:    srl t3, a4, a0
+; RV32I-NEXT:    slli t4, a5, 1
 ; RV32I-NEXT:    srl a3, a3, a0
-; RV32I-NEXT:    slli a5, a5, 1
-; RV32I-NEXT:    srl t5, a7, a0
-; RV32I-NEXT:    slli t6, t0, 1
-; RV32I-NEXT:    srl a6, a6, a0
-; RV32I-NEXT:    slli a7, a7, 1
-; RV32I-NEXT:    srl s0, t1, a0
-; RV32I-NEXT:    slli s1, t2, 1
-; RV32I-NEXT:    srl t0, t0, a0
-; RV32I-NEXT:    slli t1, t1, 1
-; RV32I-NEXT:    srl t2, t2, a0
-; RV32I-NEXT:    slli s2, a4, 1
-; RV32I-NEXT:    sra s3, a4, a0
+; RV32I-NEXT:    slli a4, a4, 1
+; RV32I-NEXT:    srl t5, a6, a0
+; RV32I-NEXT:    slli t6, a7, 1
+; RV32I-NEXT:    srl a5, a5, a0
+; RV32I-NEXT:    slli a6, a6, 1
+; RV32I-NEXT:    srl s0, t0, a0
+; RV32I-NEXT:    slli s1, t1, 1
+; RV32I-NEXT:    srl a7, a7, a0
+; RV32I-NEXT:    slli t0, t0, 1
+; RV32I-NEXT:    srl t1, t1, a0
+; RV32I-NEXT:    slli s2, t2, 1
+; RV32I-NEXT:    sra t2, t2, a0
 ; RV32I-NEXT:    sll a0, t4, a1
-; RV32I-NEXT:    sll a4, a5, a1
-; RV32I-NEXT:    sll a5, t6, a1
-; RV32I-NEXT:    sll a7, a7, a1
-; RV32I-NEXT:    sll t4, s1, a1
-; RV32I-NEXT:    sll t1, t1, a1
-; RV32I-NEXT:    sll t6, s2, a1
-; RV32I-NEXT:    srli s1, s3, 24
-; RV32I-NEXT:    srli s2, s3, 16
-; RV32I-NEXT:    srli s4, s3, 8
+; RV32I-NEXT:    sll a4, a4, a1
+; RV32I-NEXT:    sll t4, t6, a1
+; RV32I-NEXT:    sll a6, a6, a1
+; RV32I-NEXT:    sll t6, s1, a1
+; RV32I-NEXT:    sll t0, t0, a1
+; RV32I-NEXT:    sll s1, s2, a1
+; RV32I-NEXT:    srli s2, t2, 24
+; RV32I-NEXT:    srli s3, t2, 16
+; RV32I-NEXT:    srli s4, t2, 8
 ; RV32I-NEXT:    or a0, t3, a0
 ; RV32I-NEXT:    or a1, a3, a4
-; RV32I-NEXT:    or a3, t5, a5
-; RV32I-NEXT:    or a4, a6, a7
-; RV32I-NEXT:    or a5, s0, t4
-; RV32I-NEXT:    or a6, t0, t1
-; RV32I-NEXT:    or a7, t2, t6
-; RV32I-NEXT:    sb s3, 28(a2)
+; RV32I-NEXT:    or a3, t5, t4
+; RV32I-NEXT:    or a4, a5, a6
+; RV32I-NEXT:    or a5, s0, t6
+; RV32I-NEXT:    or a6, a7, t0
+; RV32I-NEXT:    or a7, t1, s1
+; RV32I-NEXT:    sb t2, 28(a2)
 ; RV32I-NEXT:    sb s4, 29(a2)
-; RV32I-NEXT:    sb s2, 30(a2)
-; RV32I-NEXT:    sb s1, 31(a2)
+; RV32I-NEXT:    sb s3, 30(a2)
+; RV32I-NEXT:    sb s2, 31(a2)
 ; RV32I-NEXT:    srli t0, a7, 24
 ; RV32I-NEXT:    srli t1, a7, 16
 ; RV32I-NEXT:    srli t2, a7, 8
diff --git a/llvm/test/CodeGen/SystemZ/pr60413.ll b/llvm/test/CodeGen/SystemZ/pr60413.ll
index 8a6a30318ae58..bbf4d50bd716d 100644
--- a/llvm/test/CodeGen/SystemZ/pr60413.ll
+++ b/llvm/test/CodeGen/SystemZ/pr60413.ll
@@ -16,31 +16,31 @@ define dso_local void @m() local_unnamed_addr #1 {
 ; CHECK-NEXT:    stmg %r13, %r15, 104(%r15)
 ; CHECK-NEXT:    aghi %r15, -168
 ; CHECK-NEXT:    lhrl %r1, f+4
-; CHECK-NEXT:    sll %r1, 8
 ; CHECK-NEXT:    larl %r2, f
-; CHECK-NEXT:    ic %r1, 6(%r2)
-; CHECK-NEXT:    larl %r2, e
-; CHECK-NEXT:    lb %r0, 3(%r2)
-; CHECK-NEXT:    vlvgp %v0, %r0, %r1
-; CHECK-NEXT:    vlvgp %v1, %r1, %r0
-; CHECK-NEXT:    vlvgf %v1, %r1, 0
-; CHECK-NEXT:    vlvgf %v1, %r1, 2
-; CHECK-NEXT:    vlvgp %v2, %r1, %r1
-; CHECK-NEXT:    # kill: def $r1l killed $r1l killed $r1d
+; CHECK-NEXT:    llc %r2, 6(%r2)
+; CHECK-NEXT:    larl %r3, e
+; CHECK-NEXT:    lb %r0, 3(%r3)
+; CHECK-NEXT:    rosbg %r2, %r1, 32, 55, 8
+; CHECK-NEXT:    vlvgp %v0, %r2, %r0
+; CHECK-NEXT:    vlvgf %v0, %r2, 0
+; CHECK-NEXT:    vlvgf %v0, %r2, 2
+; CHECK-NEXT:    vlvgp %v1, %r0, %r2
+; CHECK-NEXT:    vlvgp %v2, %r2, %r2
+; CHECK-NEXT:    lr %r1, %r2
 ; CHECK-NEXT:    nilh %r1, 255
 ; CHECK-NEXT:    chi %r1, 128
 ; CHECK-NEXT:    ipm %r1
 ; CHECK-NEXT:    risbg %r1, %r1, 63, 191, 36
-; CHECK-NEXT:    vlvgf %v0, %r0, 0
-; CHECK-NEXT:    vlvgf %v0, %r0, 2
 ; CHECK-NEXT:    vgbm %v3, 30583
 ; CHECK-NEXT:    vn %v0, %v0, %v3
+; CHECK-NEXT:    vlvgf %v1, %r0, 0
+; CHECK-NEXT:    vlvgf %v1, %r0, 2
 ; CHECK-NEXT:    vn %v1, %v1, %v3
 ; CHECK-NEXT:    vrepf %v2, %v2, 1
 ; CHECK-NEXT:    vn %v2, %v2, %v3
 ; CHECK-NEXT:    vrepif %v3, 127
-; CHECK-NEXT:    vchlf %v1, %v1, %v3
-; CHECK-NEXT:    vlgvf %r13, %v1, 0
+; CHECK-NEXT:    vchlf %v0, %v0, %v3
+; CHECK-NEXT:    vlgvf %r13, %v0, 0
 ; CHECK-NEXT:    vchlf %v2, %v2, %v3
 ; CHECK-NEXT:    vlgvf %r3, %v2, 1
 ; CHECK-NEXT:    nilf %r3, 1
@@ -54,13 +54,13 @@ define dso_local void @m() local_unnamed_addr #1 {
 ; CHECK-NEXT:    nilf %r14, 1
 ; CHECK-NEXT:    rosbg %r2, %r14, 32, 51, 12
 ; CHECK-NEXT:    rosbg %r2, %r13, 52, 52, 11
-; CHECK-NEXT:    vlgvf %r13, %v1, 1
+; CHECK-NEXT:    vlgvf %r13, %v0, 1
 ; CHECK-NEXT:    rosbg %r2, %r13, 53, 53, 10
-; CHECK-NEXT:    vlgvf %r13, %v1, 2
+; CHECK-NEXT:    vlgvf %r13, %v0, 2
 ; CHECK-NEXT:    rosbg %r2, %r13, 54, 54, 9
-; CHECK-NEXT:    vlgvf %r13, %v1, 3
+; CHECK-NEXT:    vlgvf %r13, %v0, 3
 ; CHECK-NEXT:    rosbg %r2, %r13, 55, 55, 8
-; CHECK-NEXT:    vchlf %v0, %v0, %v3
+; CHECK-NEXT:    vchlf %v0, %v1, %v3
 ; CHECK-NEXT:    vlgvf %r13, %v0, 0
 ; CHECK-NEXT:    rosbg %r2, %r13, 56, 56, 7
 ; CHECK-NEXT:    vlgvf %r13, %v0, 1
diff --git a/llvm/test/CodeGen/X86/abds-neg.ll b/llvm/test/CodeGen/X86/abds-neg.ll
index d9064c684cb20..2911edfbfd409 100644
--- a/llvm/test/CodeGen/X86/abds-neg.ll
+++ b/llvm/test/CodeGen/X86/abds-neg.ll
@@ -1076,15 +1076,15 @@ define i64 @abd_subnsw_i64(i64 %a, i64 %b) nounwind {
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    subl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl %esi, %edx
+; X86-NEXT:    subl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    sarl $31, %edx
-; X86-NEXT:    xorl %edx, %esi
 ; X86-NEXT:    xorl %edx, %ecx
+; X86-NEXT:    xorl %edx, %esi
 ; X86-NEXT:    movl %edx, %eax
-; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    sbbl %esi, %edx
+; X86-NEXT:    subl %esi, %eax
+; X86-NEXT:    sbbl %ecx, %edx
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    retl
 ;
@@ -1107,15 +1107,15 @@ define i64 @abd_subnsw_i64_undef(i64 %a, i64 %b) nounwind {
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    subl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl %esi, %edx
+; X86-NEXT:    subl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    sarl $31, %edx
-; X86-NEXT:    xorl %edx, %esi
 ; X86-NEXT:    xorl %edx, %ecx
+; X86-NEXT:    xorl %edx, %esi
 ; X86-NEXT:    movl %edx, %eax
-; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    sbbl %esi, %edx
+; X86-NEXT:    subl %esi, %eax
+; X86-NEXT:    sbbl %ecx, %edx
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    retl
 ;
@@ -1142,32 +1142,32 @@ define i128 @abd_subnsw_i128(i128 %a, i128 %b) nounwind {
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    andl $-16, %esp
 ; X86-NEXT:    subl $16, %esp
-; X86-NEXT:    movl 32(%ebp), %ecx
 ; X86-NEXT:    movl 36(%ebp), %eax
-; X86-NEXT:    movl 24(%ebp), %edi
+; X86-NEXT:    movl 32(%ebp), %ecx
 ; X86-NEXT:    movl 28(%ebp), %edx
-; X86-NEXT:    subl 40(%ebp), %edi
+; X86-NEXT:    movl 24(%ebp), %esi
+; X86-NEXT:    subl 40(%ebp), %esi
 ; X86-NEXT:    sbbl 44(%ebp), %edx
 ; X86-NEXT:    sbbl 48(%ebp), %ecx
 ; X86-NEXT:    sbbl 52(%ebp), %eax
-; X86-NEXT:    movl %eax, %esi
-; X86-NEXT:    sarl $31, %esi
-; X86-NEXT:    xorl %esi, %eax
-; X86-NEXT:    xorl %esi, %ecx
-; X86-NEXT:    xorl %esi, %edx
-; X86-NEXT:    xorl %esi, %edi
-; X86-NEXT:    movl %esi, %ebx
-; X86-NEXT:    subl %edi, %ebx
-; X86-NEXT:    movl %esi, %edi
-; X86-NEXT:    sbbl %edx, %edi
-; X86-NEXT:    movl %esi, %edx
+; X86-NEXT:    movl %eax, %edi
+; X86-NEXT:    sarl $31, %edi
+; X86-NEXT:    xorl %edi, %eax
+; X86-NEXT:    xorl %edi, %ecx
+; X86-NEXT:    xorl %edi, %edx
+; X86-NEXT:    xorl %edi, %esi
+; X86-NEXT:    movl %edi, %ebx
+; X86-NEXT:    subl %esi, %ebx
+; X86-NEXT:    movl %edi, %esi
+; X86-NEXT:    sbbl %edx, %esi
+; X86-NEXT:    movl %edi, %edx
 ; X86-NEXT:    sbbl %ecx, %edx
-; X86-NEXT:    sbbl %eax, %esi
+; X86-NEXT:    sbbl %eax, %edi
 ; X86-NEXT:    movl 8(%ebp), %eax
 ; X86-NEXT:    movl %ebx, (%eax)
-; X86-NEXT:    movl %edi, 4(%eax)
+; X86-NEXT:    movl %esi, 4(%eax)
 ; X86-NEXT:    movl %edx, 8(%eax)
-; X86-NEXT:    movl %esi, 12(%eax)
+; X86-NEXT:    movl %edi, 12(%eax)
 ; X86-NEXT:    leal -12(%ebp), %esp
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
@@ -1203,32 +1203,32 @@ define i128 @abd_subnsw_i128_undef(i128 %a, i128 %b) nounwind {
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    andl $-16, %esp
 ; X86-NEXT:    subl $16, %esp
-; X86-NEXT:    movl 32(%ebp), %ecx
 ; X86-NEXT:    movl 36(%ebp), %eax
-; X86-NEXT:    movl 24(%ebp), %edi
+; X86-NEXT:    movl 32(%ebp), %ecx
 ; X86-NEXT:    movl 28(%ebp), %edx
-; X86-NEXT:    subl 40(%ebp), %edi
+; X86-NEXT:    movl 24(%ebp), %esi
+; X86-NEXT:    subl 40(%ebp), %esi
 ; X86-NEXT:    sbbl 44(%ebp), %edx
 ; X86-NEXT:    sbbl 48(%ebp), %ecx
 ; X86-NEXT:    sbbl 52(%ebp), %eax
-; X86-NEXT:    movl %eax, %esi
-; X86-NEXT:    sarl $31, %esi
-; X86-NEXT:    xorl %esi, %eax
-; X86-NEXT:    xorl %esi, %ecx
-; X86-NEXT:    xorl %esi, %edx
-; X86-NEXT:    xorl %esi, %edi
-; X86-NEXT:    movl %esi, %ebx
-; X86-NEXT:    subl %edi, %ebx
-; X86-NEXT:    movl %esi, %edi
-; X86-NEXT:    sbbl %edx, %edi
-; X86-NEXT:    movl %esi, %edx
+; X86-NEXT:    movl %eax, %edi
+; X86-NEXT:    sarl $31, %edi
+; X86-NEXT:    xorl %edi, %eax
+; X86-NEXT:    xorl %edi, %ecx
+; X86-NEXT:    xorl %edi, %edx
+; X86-NEXT:    xorl %edi, %esi
+; X86-NEXT:    movl %edi, %ebx
+; X86-NEXT:    subl %esi, %ebx
+; X86-NEXT:    movl %edi, %esi
+; X86-NEXT:    sbbl %edx, %esi
+; X86-NEXT:    movl %edi, %edx
 ; X86-NEXT:    sbbl %ecx, %edx
-; X86-NEXT:    sbbl %eax, %esi
+; X86-NEXT:    sbbl %eax, %edi
 ; X86-NEXT:    movl 8(%ebp), %eax
 ; X86-NEXT:    movl %ebx, (%eax)
-; X86-NEXT:    movl %edi, 4(%eax)
+; X86-NEXT:    movl %esi, 4(%eax)
 ; X86-NEXT:    movl %edx, 8(%eax)
-; X86-NEXT:    movl %esi, 12(%eax)
+; X86-NEXT:    movl %edi, 12(%eax)
 ; X86-NEXT:    leal -12(%ebp), %esp
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
diff --git a/llvm/test/CodeGen/X86/avg.ll b/llvm/test/CodeGen/X86/avg.ll
index 0de308a9e0738..9be816655072c 100644
--- a/llvm/test/CodeGen/X86/avg.ll
+++ b/llvm/test/CodeGen/X86/avg.ll
@@ -1734,20 +1734,20 @@ define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind {
 ; SSE2-LABEL: not_avg_v16i8_wide_constants:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movaps (%rdi), %xmm1
-; SSE2-NEXT:    movdqa (%rsi), %xmm0
+; SSE2-NEXT:    movdqa (%rsi), %xmm2
 ; SSE2-NEXT:    movaps %xmm1, -{{[0-9]+}}(%rsp)
 ; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
 ; SSE2-NEXT:    decl %eax
-; SSE2-NEXT:    movd %eax, %xmm2
+; SSE2-NEXT:    movd %eax, %xmm0
 ; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
 ; SSE2-NEXT:    decl %eax
 ; SSE2-NEXT:    movd %eax, %xmm1
 ; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
 ; SSE2-NEXT:    decl %eax
-; SSE2-NEXT:    movd %eax, %xmm3
+; SSE2-NEXT:    movd %eax, %xmm4
 ; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
 ; SSE2-NEXT:    decl %eax
-; SSE2-NEXT:    movd %eax, %xmm4
+; SSE2-NEXT:    movd %eax, %xmm3
 ; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
 ; SSE2-NEXT:    decl %eax
 ; SSE2-NEXT:    movd %eax, %xmm5
@@ -1762,9 +1762,6 @@ define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind {
 ; SSE2-NEXT:    movd %eax, %xmm8
 ; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
 ; SSE2-NEXT:    decl %eax
-; SSE2-NEXT:    movd %eax, %xmm10
-; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-NEXT:    decl %eax
 ; SSE2-NEXT:    movd %eax, %xmm9
 ; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
 ; SSE2-NEXT:    decl %eax
@@ -1774,6 +1771,9 @@ define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind {
 ; SSE2-NEXT:    movd %eax, %xmm12
 ; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
 ; SSE2-NEXT:    decl %eax
+; SSE2-NEXT:    movd %eax, %xmm10
+; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT:    decl %eax
 ; SSE2-NEXT:    movd %eax, %xmm13
 ; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
 ; SSE2-NEXT:    decl %eax
@@ -1783,45 +1783,43 @@ define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind {
 ; SSE2-NEXT:    movd %eax, %xmm15
 ; SSE2-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
 ; SSE2-NEXT:    decl %eax
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-NEXT:    movd %eax, %xmm2
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[0,0,0,0]
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT:    movd %eax, %xmm0
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm6[0,0,0,0]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm6[0,0,0,0]
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm8[0,0,0,0]
-; SSE2-NEXT:    punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; SSE2-NEXT:    movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1]
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; SSE2-NEXT:    movsd {{.*#+}} xmm4 = xmm3[0],xmm4[1]
 ; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    movdqa %xmm2, %xmm1
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
 ; SSE2-NEXT:    movapd %xmm4, %xmm5
 ; SSE2-NEXT:    andpd %xmm1, %xmm5
 ; SSE2-NEXT:    xorpd %xmm4, %xmm1
 ; SSE2-NEXT:    psrlw $1, %xmm1
 ; SSE2-NEXT:    paddw %xmm5, %xmm1
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3]
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
-; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm12[0,0,0,0]
-; SSE2-NEXT:    punpckldq {{.*#+}} xmm9 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm9[0],xmm11[1],xmm9[1],xmm11[2],xmm9[2],xmm11[3],xmm9[3]
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1]
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm14[0,0,0,0]
-; SSE2-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3]
-; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
-; SSE2-NEXT:    punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE2-NEXT:    movsd {{.*#+}} xmm2 = xmm9[0],xmm2[1]
-; SSE2-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
-; SSE2-NEXT:    movapd %xmm2, %xmm3
-; SSE2-NEXT:    andpd %xmm0, %xmm3
-; SSE2-NEXT:    xorpd %xmm2, %xmm0
-; SSE2-NEXT:    psrlw $1, %xmm0
-; SSE2-NEXT:    paddw %xmm3, %xmm0
-; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; SSE2-NEXT:    pand %xmm2, %xmm0
-; SSE2-NEXT:    pand %xmm2, %xmm1
-; SSE2-NEXT:    packuswb %xmm0, %xmm1
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm10[0],xmm0[1]
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; SSE2-NEXT:    movapd %xmm0, %xmm3
+; SSE2-NEXT:    andpd %xmm2, %xmm3
+; SSE2-NEXT:    xorpd %xmm0, %xmm2
+; SSE2-NEXT:    psrlw $1, %xmm2
+; SSE2-NEXT:    paddw %xmm3, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE2-NEXT:    pand %xmm0, %xmm2
+; SSE2-NEXT:    pand %xmm0, %xmm1
+; SSE2-NEXT:    packuswb %xmm2, %xmm1
 ; SSE2-NEXT:    movdqu %xmm1, (%rax)
 ; SSE2-NEXT:    retq
 ;
@@ -1831,75 +1829,71 @@ define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind {
 ; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX1-NEXT:    vpextrw $7, %xmm3, %edx
-; AVX1-NEXT:    vpextrw $6, %xmm3, %ecx
-; AVX1-NEXT:    vpextrw $5, %xmm3, %eax
+; AVX1-NEXT:    vpextrw $3, %xmm3, %edx
+; AVX1-NEXT:    vpextrw $2, %xmm3, %ecx
+; AVX1-NEXT:    vpextrw $1, %xmm3, %eax
 ; AVX1-NEXT:    decl %edx
 ; AVX1-NEXT:    vmovd %edx, %xmm4
-; AVX1-NEXT:    vpextrw $4, %xmm3, %edx
+; AVX1-NEXT:    vpextrw $0, %xmm3, %edx
 ; AVX1-NEXT:    decl %ecx
 ; AVX1-NEXT:    vmovd %ecx, %xmm5
-; AVX1-NEXT:    vpextrw $1, %xmm3, %ecx
+; AVX1-NEXT:    vpextrw $3, %xmm2, %ecx
 ; AVX1-NEXT:    decl %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm6
-; AVX1-NEXT:    vpextrw $0, %xmm3, %eax
+; AVX1-NEXT:    vpextrw $2, %xmm2, %eax
 ; AVX1-NEXT:    decl %edx
 ; AVX1-NEXT:    vmovd %edx, %xmm7
-; AVX1-NEXT:    vpextrw $3, %xmm3, %edx
-; AVX1-NEXT:    decq %rcx
-; AVX1-NEXT:    vmovq %rcx, %xmm8
-; AVX1-NEXT:    vpextrw $2, %xmm3, %ecx
-; AVX1-NEXT:    decq %rax
-; AVX1-NEXT:    vmovq %rax, %xmm3
-; AVX1-NEXT:    vpextrw $7, %xmm2, %eax
+; AVX1-NEXT:    vpextrw $1, %xmm2, %edx
+; AVX1-NEXT:    decl %ecx
+; AVX1-NEXT:    vmovd %ecx, %xmm8
+; AVX1-NEXT:    vpextrw $0, %xmm2, %ecx
+; AVX1-NEXT:    decl %eax
+; AVX1-NEXT:    vmovd %eax, %xmm9
+; AVX1-NEXT:    vpextrw $7, %xmm3, %eax
 ; AVX1-NEXT:    decl %edx
-; AVX1-NEXT:    vmovd %edx, %xmm9
-; AVX1-NEXT:    vpextrw $6, %xmm2, %edx
+; AVX1-NEXT:    vmovd %edx, %xmm10
+; AVX1-NEXT:    vpextrw $6, %xmm3, %edx
 ; AVX1-NEXT:    decl %ecx
-; AVX1-NEXT:    vmovd %ecx, %xmm10
-; AVX1-NEXT:    vpextrw $5, %xmm2, %ecx
+; AVX1-NEXT:    vmovd %ecx, %xmm11
+; AVX1-NEXT:    vpextrw $7, %xmm2, %ecx
 ; AVX1-NEXT:    decl %eax
-; AVX1-NEXT:    vmovd %eax, %xmm11
-; AVX1-NEXT:    vpextrw $4, %xmm2, %eax
+; AVX1-NEXT:    vmovd %eax, %xmm12
+; AVX1-NEXT:    vpextrw $6, %xmm2, %eax
 ; AVX1-NEXT:    decl %edx
-; AVX1-NEXT:    vmovd %edx, %xmm12
-; AVX1-NEXT:    vpextrw $1, %xmm2, %edx
+; AVX1-NEXT:    vmovd %edx, %xmm13
+; AVX1-NEXT:    vpextrw $5, %xmm3, %edx
 ; AVX1-NEXT:    decl %ecx
-; AVX1-NEXT:    vmovd %ecx, %xmm13
-; AVX1-NEXT:    vpextrw $0, %xmm2, %ecx
+; AVX1-NEXT:    vmovd %ecx, %xmm14
+; AVX1-NEXT:    vpextrw $4, %xmm3, %ecx
 ; AVX1-NEXT:    decl %eax
-; AVX1-NEXT:    vmovd %eax, %xmm14
-; AVX1-NEXT:    vpextrw $3, %xmm2, %eax
-; AVX1-NEXT:    decq %rdx
-; AVX1-NEXT:    vmovq %rdx, %xmm15
-; AVX1-NEXT:    vpextrw $2, %xmm2, %edx
-; AVX1-NEXT:    decq %rcx
-; AVX1-NEXT:    vmovq %rcx, %xmm2
+; AVX1-NEXT:    vmovd %eax, %xmm3
+; AVX1-NEXT:    vpextrw $5, %xmm2, %eax
+; AVX1-NEXT:    decl %edx
+; AVX1-NEXT:    vmovd %edx, %xmm15
+; AVX1-NEXT:    vpextrw $4, %xmm2, %edx
+; AVX1-NEXT:    decl %ecx
+; AVX1-NEXT:    vmovd %ecx, %xmm2
 ; AVX1-NEXT:    decl %eax
 ; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
 ; AVX1-NEXT:    vmovd %eax, %xmm5
 ; AVX1-NEXT:    decl %edx
 ; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
 ; AVX1-NEXT:    vmovd %edx, %xmm7
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,0,0,0]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3,4,5],xmm4[6,7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,0,1,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm6[2,3],xmm3[4,5,6,7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,0,0,0]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3,4,5],xmm4[6,7]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm6, %ymm4
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm3, %ymm3
 ; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3]
+; AVX1-NEXT:    vshufps {{.*#+}} ymm3 = ymm3[0,0,0,0,4,4,4,4]
 ; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,0,1,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5,6,7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7]
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm5, %ymm2
+; AVX1-NEXT:    vmovddup {{.*#+}} ymm2 = ymm2[0,0,2,2]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3],ymm2[4,5,6],ymm3[7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5],ymm2[6,7]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-NEXT:    vandps %ymm0, %ymm2, %ymm1
 ; AVX1-NEXT:    vxorps %ymm0, %ymm2, %ymm0
diff --git a/llvm/test/CodeGen/X86/avx10_2_512bf16-arith.ll b/llvm/test/CodeGen/X86/avx10_2_512bf16-arith.ll
index c22a394e6c4e0..1e2cf4956bd08 100644
--- a/llvm/test/CodeGen/X86/avx10_2_512bf16-arith.ll
+++ b/llvm/test/CodeGen/X86/avx10_2_512bf16-arith.ll
@@ -94,8 +94,8 @@ define <32 x bfloat> @test_int_x86_avx10_maskz_sub_bf16_512(<32 x bfloat> %src,
 ;
 ; X86-LABEL: test_int_x86_avx10_maskz_sub_bf16_512:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
 ; X86-NEXT:    vsubbf16 %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xc9,0x5c,0xc2]
 ; X86-NEXT:    vsubbf16 (%eax), %zmm1, %zmm1 # encoding: [0x62,0xf5,0x75,0x48,0x5c,0x08]
 ; X86-NEXT:    vsubbf16 %zmm1, %zmm0, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x49,0x5c,0xc1]
diff --git a/llvm/test/CodeGen/X86/avx10_2bf16-arith.ll b/llvm/test/CodeGen/X86/avx10_2bf16-arith.ll
index 435f67a0f1e4b..42831a453cb1d 100644
--- a/llvm/test/CodeGen/X86/avx10_2bf16-arith.ll
+++ b/llvm/test/CodeGen/X86/avx10_2bf16-arith.ll
@@ -147,8 +147,8 @@ define <16 x bfloat> @test_int_x86_avx10_maskz_sub_bf16_256(<16 x bfloat> %src,
 ;
 ; X86-LABEL: test_int_x86_avx10_maskz_sub_bf16_256:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
 ; X86-NEXT:    vsubbf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xa9,0x5c,0xc2]
 ; X86-NEXT:    vsubbf16 (%eax), %ymm1, %ymm1 # encoding: [0x62,0xf5,0x75,0x28,0x5c,0x08]
 ; X86-NEXT:    vsubbf16 %ymm1, %ymm0, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x29,0x5c,0xc1]
@@ -201,8 +201,8 @@ define <8 x bfloat> @test_int_x86_avx10_maskz_sub_bf16_128(<8 x bfloat> %src, <8
 ;
 ; X86-LABEL: test_int_x86_avx10_maskz_sub_bf16_128:
 ; X86:       # %bb.0:
-; X86-NEXT:    kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
+; X86-NEXT:    kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
 ; X86-NEXT:    vsubbf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0x89,0x5c,0xc2]
 ; X86-NEXT:    vsubbf16 (%eax), %xmm1, %xmm1 # encoding: [0x62,0xf5,0x75,0x08,0x5c,0x08]
 ; X86-NEXT:    vsubbf16 %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x5c,0xc1]
diff --git a/llvm/test/CodeGen/X86/freeze-binary.ll b/llvm/test/CodeGen/X86/freeze-binary.ll
index 962ffe47d0d51..f2148545f04de 100644
--- a/llvm/test/CodeGen/X86/freeze-binary.ll
+++ b/llvm/test/CodeGen/X86/freeze-binary.ll
@@ -203,13 +203,12 @@ define <4 x i32> @freeze_add_vec_undef(<4 x i32> %a0) nounwind {
 ; X86-LABEL: freeze_add_vec_undef:
 ; X86:       # %bb.0:
 ; X86-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: freeze_add_vec_undef:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [5,5,5,5]
+; X64-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %x = add <4 x i32> %a0, <i32 1, i32 2, i32 3, i32 undef>
   %y = freeze <4 x i32> %x
@@ -274,13 +273,12 @@ define <4 x i32> @freeze_sub_vec_undef(<4 x i32> %a0) nounwind {
 ; X86-LABEL: freeze_sub_vec_undef:
 ; X86:       # %bb.0:
 ; X86-NEXT:    psubd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-NEXT:    psubd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: freeze_sub_vec_undef:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-NEXT:    vpsubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [5,5,5,5]
+; X64-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %x = sub <4 x i32> %a0, <i32 1, i32 2, i32 3, i32 undef>
   %y = freeze <4 x i32> %x
@@ -345,14 +343,12 @@ define <8 x i16> @freeze_mul_vec(<8 x i16> %a0) nounwind {
 define <8 x i16> @freeze_mul_vec_undef(<8 x i16> %a0) nounwind {
 ; X86-LABEL: freeze_mul_vec_undef:
 ; X86:       # %bb.0:
-; X86-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [1,2,3,4,4,3,0,1]
-; X86-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [4,3,2,1,1,2,u,4]
+; X86-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [4,6,6,4,4,6,0,4]
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: freeze_mul_vec_undef:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1,2,3,4,4,3,0,1]
-; X64-NEXT:    vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4,3,2,1,1,2,u,4]
+; X64-NEXT:    vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4,6,6,4,4,6,0,4]
 ; X64-NEXT:    retq
   %x = mul <8 x i16> %a0, <i16 1, i16 2, i16 3, i16 4, i16 4, i16 3, i16 undef, i16 1>
   %y = freeze <8 x i16> %x
diff --git a/llvm/test/CodeGen/X86/freeze-vector.ll b/llvm/test/CodeGen/X86/freeze-vector.ll
index 15b43c41b9945..40229017bd0d2 100644
--- a/llvm/test/CodeGen/X86/freeze-vector.ll
+++ b/llvm/test/CodeGen/X86/freeze-vector.ll
@@ -171,15 +171,15 @@ define void @freeze_extractelement(ptr %origin0, ptr %origin1, ptr %dst) nounwin
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    vmovdqa (%edx), %xmm0
-; X86-NEXT:    vpand (%ecx), %xmm0, %xmm0
+; X86-NEXT:    vmovdqa (%ecx), %xmm0
+; X86-NEXT:    vpand (%edx), %xmm0, %xmm0
 ; X86-NEXT:    vpextrb $6, %xmm0, (%eax)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: freeze_extractelement:
 ; X64:       # %bb.0:
-; X64-NEXT:    vmovdqa (%rdi), %xmm0
-; X64-NEXT:    vpand (%rsi), %xmm0, %xmm0
+; X64-NEXT:    vmovdqa (%rsi), %xmm0
+; X64-NEXT:    vpand (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    vpextrb $6, %xmm0, (%rdx)
 ; X64-NEXT:    retq
   %i0 = load <16 x i8>, ptr %origin0
@@ -198,8 +198,8 @@ define void @freeze_extractelement_escape(ptr %origin0, ptr %origin1, ptr %dst,
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    vmovdqa (%esi), %xmm0
-; X86-NEXT:    vpand (%edx), %xmm0, %xmm0
+; X86-NEXT:    vmovdqa (%edx), %xmm0
+; X86-NEXT:    vpand (%esi), %xmm0, %xmm0
 ; X86-NEXT:    vmovdqa %xmm0, (%ecx)
 ; X86-NEXT:    vpextrb $6, %xmm0, (%eax)
 ; X86-NEXT:    popl %esi
@@ -207,8 +207,8 @@ define void @freeze_extractelement_escape(ptr %origin0, ptr %origin1, ptr %dst,
 ;
 ; X64-LABEL: freeze_extractelement_escape:
 ; X64:       # %bb.0:
-; X64-NEXT:    vmovdqa (%rdi), %xmm0
-; X64-NEXT:    vpand (%rsi), %xmm0, %xmm0
+; X64-NEXT:    vmovdqa (%rsi), %xmm0
+; X64-NEXT:    vpand (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    vmovdqa %xmm0, (%rcx)
 ; X64-NEXT:    vpextrb $6, %xmm0, (%rdx)
 ; X64-NEXT:    retq
@@ -239,8 +239,8 @@ define void @freeze_extractelement_extra_use(ptr %origin0, ptr %origin1, i64 %id
 ; X86-NEXT:    movl 32(%ebp), %edx
 ; X86-NEXT:    movl 12(%ebp), %esi
 ; X86-NEXT:    movl 8(%ebp), %edi
-; X86-NEXT:    vmovaps (%edi), %xmm0
-; X86-NEXT:    vandps (%esi), %xmm0, %xmm0
+; X86-NEXT:    vmovaps (%esi), %xmm0
+; X86-NEXT:    vandps (%edi), %xmm0, %xmm0
 ; X86-NEXT:    vmovaps %xmm0, (%esp)
 ; X86-NEXT:    movzbl (%esp,%ecx), %ecx
 ; X86-NEXT:    cmpb (%esp,%eax), %cl
@@ -255,8 +255,8 @@ define void @freeze_extractelement_extra_use(ptr %origin0, ptr %origin1, i64 %id
 ; X64:       # %bb.0:
 ; X64-NEXT:    andl $15, %ecx
 ; X64-NEXT:    andl $15, %edx
-; X64-NEXT:    vmovaps (%rdi), %xmm0
-; X64-NEXT:    vandps (%rsi), %xmm0, %xmm0
+; X64-NEXT:    vmovaps (%rsi), %xmm0
+; X64-NEXT:    vandps (%rdi), %xmm0, %xmm0
 ; X64-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    movzbl -24(%rsp,%rdx), %eax
 ; X64-NEXT:    cmpb -24(%rsp,%rcx), %al
diff --git a/llvm/test/CodeGen/X86/gfni-shifts.ll b/llvm/test/CodeGen/X86/gfni-shifts.ll
index cd16651123b07..47a32f9475987 100644
--- a/llvm/test/CodeGen/X86/gfni-shifts.ll
+++ b/llvm/test/CodeGen/X86/gfni-shifts.ll
@@ -1687,9 +1687,10 @@ define <64 x i8> @var_shl_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
 ; GFNIAVX512BW-NEXT:    vpsllw $5, %zmm1, %zmm1
 ; GFNIAVX512BW-NEXT:    vpmovb2m %zmm1, %k1
 ; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 {%k1}
+; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm2
 ; GFNIAVX512BW-NEXT:    vpaddb %zmm1, %zmm1, %zmm1
 ; GFNIAVX512BW-NEXT:    vpmovb2m %zmm1, %k1
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 {%k1}
+; GFNIAVX512BW-NEXT:    vmovdqu8 %zmm2, %zmm0 {%k1}
 ; GFNIAVX512BW-NEXT:    vpaddb %zmm1, %zmm1, %zmm1
 ; GFNIAVX512BW-NEXT:    vpmovb2m %zmm1, %k1
 ; GFNIAVX512BW-NEXT:    vpaddb %zmm0, %zmm0, %zmm0 {%k1}
diff --git a/llvm/test/CodeGen/X86/pr120093.ll b/llvm/test/CodeGen/X86/pr120093.ll
index 99bf2218c5538..bb4d13bf8532a 100644
--- a/llvm/test/CodeGen/X86/pr120093.ll
+++ b/llvm/test/CodeGen/X86/pr120093.ll
@@ -4,15 +4,11 @@
 define double @PR120093() {
 ; CHECK-LABEL: PR120093:
 ; CHECK:       # %bb.0: # %bb
-; CHECK-NEXT:    xorpd %xmm0, %xmm0
-; CHECK-NEXT:    xorpd %xmm1, %xmm1
-; CHECK-NEXT:    movhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
-; CHECK-NEXT:    cmpnltpd %xmm1, %xmm0
-; CHECK-NEXT:    movmskpd %xmm0, %eax
-; CHECK-NEXT:    cmpl $3, %eax
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    testb %al, %al
 ; CHECK-NEXT:    jne .LBB0_2
 ; CHECK-NEXT:  # %bb.1: # %bb2
-; CHECK-NEXT:    xorpd %xmm0, %xmm0
+; CHECK-NEXT:    xorps %xmm0, %xmm0
 ; CHECK-NEXT:    retq
 ; CHECK-NEXT:  .LBB0_2: # %bb3
 bb:
diff --git a/llvm/test/CodeGen/X86/pr120906.ll b/llvm/test/CodeGen/X86/pr120906.ll
index f5f6331bf3bf6..eadd0351354e0 100644
--- a/llvm/test/CodeGen/X86/pr120906.ll
+++ b/llvm/test/CodeGen/X86/pr120906.ll
@@ -8,20 +8,29 @@ define i32 @PR120906(ptr %p) {
 ; CHECK-NEXT:    pxor %xmm0, %xmm0
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    paddb %xmm1, %xmm1
-; CHECK-NEXT:    paddb %xmm1, %xmm1
 ; CHECK-NEXT:    pxor %xmm2, %xmm2
 ; CHECK-NEXT:    pcmpgtb %xmm1, %xmm2
-; CHECK-NEXT:    movdqa {{.*#+}} xmm1 = [11,11,11,11,u,u,u,u,u,u,u,u,u,u,u,u]
-; CHECK-NEXT:    movdqa %xmm1, %xmm3
-; CHECK-NEXT:    paddb %xmm1, %xmm3
-; CHECK-NEXT:    pand %xmm2, %xmm3
-; CHECK-NEXT:    pandn %xmm1, %xmm2
-; CHECK-NEXT:    por %xmm1, %xmm2
-; CHECK-NEXT:    por %xmm3, %xmm2
-; CHECK-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; CHECK-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
-; CHECK-NEXT:    por %xmm2, %xmm0
+; CHECK-NEXT:    movdqa {{.*#+}} xmm3 = [11,11,11,11,u,u,u,u,u,u,u,u,u,u,u,u]
+; CHECK-NEXT:    movdqa %xmm3, %xmm4
+; CHECK-NEXT:    psllw $2, %xmm4
+; CHECK-NEXT:    movdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; CHECK-NEXT:    pand %xmm2, %xmm5
+; CHECK-NEXT:    pand %xmm4, %xmm5
+; CHECK-NEXT:    pandn %xmm3, %xmm2
+; CHECK-NEXT:    por %xmm5, %xmm2
+; CHECK-NEXT:    paddb %xmm1, %xmm1
+; CHECK-NEXT:    pxor %xmm4, %xmm4
+; CHECK-NEXT:    pcmpgtb %xmm1, %xmm4
+; CHECK-NEXT:    movdqa %xmm4, %xmm1
+; CHECK-NEXT:    pandn %xmm2, %xmm1
+; CHECK-NEXT:    paddb %xmm2, %xmm2
+; CHECK-NEXT:    pand %xmm4, %xmm2
+; CHECK-NEXT:    por %xmm3, %xmm1
+; CHECK-NEXT:    por %xmm2, %xmm1
+; CHECK-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; CHECK-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; CHECK-NEXT:    por %xmm1, %xmm0
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
 ; CHECK-NEXT:    por %xmm0, %xmm1
 ; CHECK-NEXT:    movd %xmm1, %eax
diff --git a/llvm/test/CodeGen/X86/pr62286.ll b/llvm/test/CodeGen/X86/pr62286.ll
index 9728e130333c4..46366148789e8 100644
--- a/llvm/test/CodeGen/X86/pr62286.ll
+++ b/llvm/test/CodeGen/X86/pr62286.ll
@@ -26,26 +26,34 @@ define i64 @PR62286(i32 %a) {
 ; AVX1-LABEL: PR62286:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovd %edi, %xmm0
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
-; AVX1-NEXT:    vpaddd %xmm0, %xmm0, %xmm0
-; AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,0]
+; AVX1-NEXT:    vpaddd %xmm0, %xmm0, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpaddd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vorps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
 ; AVX1-NEXT:    vpmovsxdq %xmm0, %xmm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX1-NEXT:    vpmovsxdq %xmm1, %xmm1
-; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 ; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
+; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: PR62286:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovd %edi, %xmm0
-; AVX2-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
-; AVX2-NEXT:    vpaddd %xmm0, %xmm0, %xmm0
-; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX2-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,0]
+; AVX2-NEXT:    vpaddd %ymm0, %ymm0, %ymm1
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7]
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpmovsxdq %xmm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
@@ -63,7 +71,9 @@ define i64 @PR62286(i32 %a) {
 ; AVX512-NEXT:    movw $4369, %ax # imm = 0x1111
 ; AVX512-NEXT:    kmovd %eax, %k1
 ; AVX512-NEXT:    vpaddd %zmm0, %zmm0, %zmm1 {%k1}
-; AVX512-NEXT:    vpmovsxdq %ymm1, %zmm0
+; AVX512-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
+; AVX512-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT:    vpmovsxdq %ymm0, %zmm0
 ; AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512-NEXT:    vpaddq %zmm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
diff --git a/llvm/test/CodeGen/X86/setcc-non-simple-type.ll b/llvm/test/CodeGen/X86/setcc-non-simple-type.ll
index 2ac2be5545dfd..d2b292f1a7996 100644
--- a/llvm/test/CodeGen/X86/setcc-non-simple-type.ll
+++ b/llvm/test/CodeGen/X86/setcc-non-simple-type.ll
@@ -119,8 +119,8 @@ define void @failing(ptr %0, ptr %1) nounwind {
 ; CHECK-AVX2-NEXT:  .LBB0_2: # %vector.body
 ; CHECK-AVX2-NEXT:    # Parent Loop BB0_1 Depth=1
 ; CHECK-AVX2-NEXT:    # => This Inner Loop Header: Depth=2
-; CHECK-AVX2-NEXT:    vmovdqu 1024(%rdx,%rsi), %xmm5
-; CHECK-AVX2-NEXT:    vmovdqu 1040(%rdx,%rsi), %xmm6
+; CHECK-AVX2-NEXT:    vmovdqu 1024(%rdx,%rsi), %ymm5
+; CHECK-AVX2-NEXT:    vextracti128 $1, %ymm5, %xmm6
 ; CHECK-AVX2-NEXT:    vpextrq $1, %xmm5, %rdi
 ; CHECK-AVX2-NEXT:    vpextrq $1, %xmm6, %r8
 ; CHECK-AVX2-NEXT:    vmovq %xmm5, %r9
diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll b/llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll
index 74b51ac21dc1f..75e1d74cc048d 100644
--- a/llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll
+++ b/llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll
@@ -74,11 +74,11 @@ define <4 x float> @v4f32_no_daz(<4 x float> %f) #0 {
 ; BDW:       # %bb.0:
 ; BDW-NEXT:    vrsqrtps %xmm0, %xmm1
 ; BDW-NEXT:    vmulps %xmm1, %xmm0, %xmm2
-; BDW-NEXT:    vbroadcastss {{.*#+}} xmm3 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
-; BDW-NEXT:    vfmadd231ps {{.*#+}} xmm3 = (xmm2 * xmm1) + xmm3
-; BDW-NEXT:    vbroadcastss {{.*#+}} xmm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
-; BDW-NEXT:    vmulps %xmm1, %xmm2, %xmm1
-; BDW-NEXT:    vmulps %xmm3, %xmm1, %xmm1
+; BDW-NEXT:    vbroadcastss {{.*#+}} xmm3 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
+; BDW-NEXT:    vmulps %xmm3, %xmm2, %xmm3
+; BDW-NEXT:    vbroadcastss {{.*#+}} xmm4 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
+; BDW-NEXT:    vfmadd231ps {{.*#+}} xmm4 = (xmm2 * xmm1) + xmm4
+; BDW-NEXT:    vmulps %xmm4, %xmm3, %xmm1
 ; BDW-NEXT:    vbroadcastss {{.*#+}} xmm2 = [NaN,NaN,NaN,NaN]
 ; BDW-NEXT:    vandps %xmm2, %xmm0, %xmm0
 ; BDW-NEXT:    vbroadcastss {{.*#+}} xmm2 = [1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38]
@@ -162,11 +162,11 @@ define <8 x float> @v8f32_no_daz(<8 x float> %f) #0 {
 ; BDW:       # %bb.0:
 ; BDW-NEXT:    vrsqrtps %ymm0, %ymm1
 ; BDW-NEXT:    vmulps %ymm1, %ymm0, %ymm2
-; BDW-NEXT:    vbroadcastss {{.*#+}} ymm3 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
-; BDW-NEXT:    vfmadd231ps {{.*#+}} ymm3 = (ymm2 * ymm1) + ymm3
-; BDW-NEXT:    vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
-; BDW-NEXT:    vmulps %ymm1, %ymm2, %ymm1
-; BDW-NEXT:    vmulps %ymm3, %ymm1, %ymm1
+; BDW-NEXT:    vbroadcastss {{.*#+}} ymm3 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
+; BDW-NEXT:    vmulps %ymm3, %ymm2, %ymm3
+; BDW-NEXT:    vbroadcastss {{.*#+}} ymm4 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
+; BDW-NEXT:    vfmadd231ps {{.*#+}} ymm4 = (ymm2 * ymm1) + ymm4
+; BDW-NEXT:    vmulps %ymm4, %ymm3, %ymm1
 ; BDW-NEXT:    vbroadcastss {{.*#+}} ymm2 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
 ; BDW-NEXT:    vandps %ymm2, %ymm0, %ymm0
 ; BDW-NEXT:    vbroadcastss {{.*#+}} ymm2 = [1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38]
@@ -278,11 +278,11 @@ define <4 x float> @v4f32_daz(<4 x float> %f) #1 {
 ; BDW:       # %bb.0:
 ; BDW-NEXT:    vrsqrtps %xmm0, %xmm1
 ; BDW-NEXT:    vmulps %xmm1, %xmm0, %xmm2
-; BDW-NEXT:    vbroadcastss {{.*#+}} xmm3 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
-; BDW-NEXT:    vfmadd231ps {{.*#+}} xmm3 = (xmm2 * xmm1) + xmm3
-; BDW-NEXT:    vbroadcastss {{.*#+}} xmm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
-; BDW-NEXT:    vmulps %xmm1, %xmm2, %xmm1
-; BDW-NEXT:    vmulps %xmm3, %xmm1, %xmm1
+; BDW-NEXT:    vbroadcastss {{.*#+}} xmm3 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
+; BDW-NEXT:    vmulps %xmm3, %xmm2, %xmm3
+; BDW-NEXT:    vbroadcastss {{.*#+}} xmm4 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
+; BDW-NEXT:    vfmadd231ps {{.*#+}} xmm4 = (xmm2 * xmm1) + xmm4
+; BDW-NEXT:    vmulps %xmm4, %xmm3, %xmm1
 ; BDW-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; BDW-NEXT:    vcmpneqps %xmm2, %xmm0, %xmm0
 ; BDW-NEXT:    vandps %xmm1, %xmm0, %xmm0
@@ -355,11 +355,11 @@ define <8 x float> @v8f32_daz(<8 x float> %f) #1 {
 ; BDW:       # %bb.0:
 ; BDW-NEXT:    vrsqrtps %ymm0, %ymm1
 ; BDW-NEXT:    vmulps %ymm1, %ymm0, %ymm2
-; BDW-NEXT:    vbroadcastss {{.*#+}} ymm3 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
-; BDW-NEXT:    vfmadd231ps {{.*#+}} ymm3 = (ymm2 * ymm1) + ymm3
-; BDW-NEXT:    vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
-; BDW-NEXT:    vmulps %ymm1, %ymm2, %ymm1
-; BDW-NEXT:    vmulps %ymm3, %ymm1, %ymm1
+; BDW-NEXT:    vbroadcastss {{.*#+}} ymm3 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
+; BDW-NEXT:    vmulps %ymm3, %ymm2, %ymm3
+; BDW-NEXT:    vbroadcastss {{.*#+}} ymm4 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
+; BDW-NEXT:    vfmadd231ps {{.*#+}} ymm4 = (ymm2 * ymm1) + ymm4
+; BDW-NEXT:    vmulps %ymm4, %ymm3, %ymm1
 ; BDW-NEXT:    vxorps %xmm2, %xmm2, %xmm2
 ; BDW-NEXT:    vcmpneqps %ymm2, %ymm0, %ymm0
 ; BDW-NEXT:    vandps %ymm1, %ymm0, %ymm0
diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath.ll b/llvm/test/CodeGen/X86/sqrt-fastmath.ll
index a260b325f7e3c..b50c54d0d4be2 100644
--- a/llvm/test/CodeGen/X86/sqrt-fastmath.ll
+++ b/llvm/test/CodeGen/X86/sqrt-fastmath.ll
@@ -219,11 +219,11 @@ define <4 x float> @sqrt_v4f32_check_denorms_ieee_ninf(<4 x float> %x) #7 {
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vrsqrtps %xmm0, %xmm1
 ; AVX512-NEXT:    vmulps %xmm1, %xmm0, %xmm2
-; AVX512-NEXT:    vbroadcastss {{.*#+}} xmm3 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
-; AVX512-NEXT:    vfmadd231ps {{.*#+}} xmm3 = (xmm2 * xmm1) + xmm3
-; AVX512-NEXT:    vbroadcastss {{.*#+}} xmm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
-; AVX512-NEXT:    vmulps %xmm1, %xmm2, %xmm1
-; AVX512-NEXT:    vmulps %xmm3, %xmm1, %xmm1
+; AVX512-NEXT:    vbroadcastss {{.*#+}} xmm3 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
+; AVX512-NEXT:    vmulps %xmm3, %xmm2, %xmm3
+; AVX512-NEXT:    vbroadcastss {{.*#+}} xmm4 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
+; AVX512-NEXT:    vfmadd231ps {{.*#+}} xmm4 = (xmm2 * xmm1) + xmm4
+; AVX512-NEXT:    vmulps %xmm4, %xmm3, %xmm1
 ; AVX512-NEXT:    vbroadcastss {{.*#+}} xmm2 = [NaN,NaN,NaN,NaN]
 ; AVX512-NEXT:    vandps %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    vbroadcastss {{.*#+}} xmm2 = [1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38]
@@ -270,11 +270,11 @@ define <4 x float> @sqrt_v4f32_check_denorms_dynamic_ninf(<4 x float> %x) #8 {
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vrsqrtps %xmm0, %xmm1
 ; AVX512-NEXT:    vmulps %xmm1, %xmm0, %xmm2
-; AVX512-NEXT:    vbroadcastss {{.*#+}} xmm3 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
-; AVX512-NEXT:    vfmadd231ps {{.*#+}} xmm3 = (xmm2 * xmm1) + xmm3
-; AVX512-NEXT:    vbroadcastss {{.*#+}} xmm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
-; AVX512-NEXT:    vmulps %xmm1, %xmm2, %xmm1
-; AVX512-NEXT:    vmulps %xmm3, %xmm1, %xmm1
+; AVX512-NEXT:    vbroadcastss {{.*#+}} xmm3 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
+; AVX512-NEXT:    vmulps %xmm3, %xmm2, %xmm3
+; AVX512-NEXT:    vbroadcastss {{.*#+}} xmm4 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
+; AVX512-NEXT:    vfmadd231ps {{.*#+}} xmm4 = (xmm2 * xmm1) + xmm4
+; AVX512-NEXT:    vmulps %xmm4, %xmm3, %xmm1
 ; AVX512-NEXT:    vbroadcastss {{.*#+}} xmm2 = [NaN,NaN,NaN,NaN]
 ; AVX512-NEXT:    vandps %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    vbroadcastss {{.*#+}} xmm2 = [1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38]
diff --git a/llvm/test/CodeGen/X86/sshl_sat_vec.ll b/llvm/test/CodeGen/X86/sshl_sat_vec.ll
index f91758b861b4c..2ef31c324531c 100644
--- a/llvm/test/CodeGen/X86/sshl_sat_vec.ll
+++ b/llvm/test/CodeGen/X86/sshl_sat_vec.ll
@@ -494,93 +494,93 @@ define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
 ; X64-LABEL: vec_v16i8:
 ; X64:       # %bb.0:
 ; X64-NEXT:    psllw $5, %xmm1
+; X64-NEXT:    pxor %xmm2, %xmm2
 ; X64-NEXT:    pxor %xmm3, %xmm3
-; X64-NEXT:    pxor %xmm4, %xmm4
-; X64-NEXT:    pcmpgtb %xmm1, %xmm4
-; X64-NEXT:    movdqa %xmm0, %xmm2
-; X64-NEXT:    psllw $4, %xmm2
-; X64-NEXT:    pand %xmm4, %xmm2
-; X64-NEXT:    pandn %xmm0, %xmm4
-; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; X64-NEXT:    por %xmm4, %xmm2
-; X64-NEXT:    punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
-; X64-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; X64-NEXT:    pcmpgtb %xmm1, %xmm3
+; X64-NEXT:    movdqa %xmm0, %xmm4
+; X64-NEXT:    psllw $4, %xmm4
+; X64-NEXT:    pand %xmm3, %xmm4
+; X64-NEXT:    pandn %xmm0, %xmm3
+; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
+; X64-NEXT:    por %xmm3, %xmm4
+; X64-NEXT:    punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
+; X64-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3],xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
 ; X64-NEXT:    paddb %xmm1, %xmm1
-; X64-NEXT:    pxor %xmm6, %xmm6
-; X64-NEXT:    pcmpgtb %xmm1, %xmm6
-; X64-NEXT:    movdqa %xmm6, %xmm7
-; X64-NEXT:    pandn %xmm2, %xmm7
-; X64-NEXT:    psllw $2, %xmm2
-; X64-NEXT:    pand %xmm6, %xmm2
-; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; X64-NEXT:    por %xmm7, %xmm2
+; X64-NEXT:    pxor %xmm3, %xmm3
+; X64-NEXT:    pcmpgtb %xmm1, %xmm3
+; X64-NEXT:    movdqa {{.*#+}} xmm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; X64-NEXT:    pand %xmm3, %xmm7
+; X64-NEXT:    pandn %xmm4, %xmm3
+; X64-NEXT:    psllw $2, %xmm4
+; X64-NEXT:    pand %xmm4, %xmm7
+; X64-NEXT:    por %xmm7, %xmm3
 ; X64-NEXT:    paddb %xmm1, %xmm1
-; X64-NEXT:    pxor %xmm6, %xmm6
-; X64-NEXT:    pcmpgtb %xmm1, %xmm6
-; X64-NEXT:    movdqa %xmm6, %xmm1
-; X64-NEXT:    pandn %xmm2, %xmm1
-; X64-NEXT:    paddb %xmm2, %xmm2
-; X64-NEXT:    pand %xmm6, %xmm2
-; X64-NEXT:    por %xmm1, %xmm2
-; X64-NEXT:    punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
+; X64-NEXT:    pxor %xmm4, %xmm4
+; X64-NEXT:    pcmpgtb %xmm1, %xmm4
+; X64-NEXT:    movdqa %xmm4, %xmm1
+; X64-NEXT:    pandn %xmm3, %xmm1
+; X64-NEXT:    paddb %xmm3, %xmm3
+; X64-NEXT:    pand %xmm4, %xmm3
+; X64-NEXT:    por %xmm1, %xmm3
+; X64-NEXT:    punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
 ; X64-NEXT:    pxor %xmm1, %xmm1
-; X64-NEXT:    pcmpgtw %xmm4, %xmm1
+; X64-NEXT:    pcmpgtw %xmm5, %xmm1
 ; X64-NEXT:    movdqa %xmm1, %xmm7
-; X64-NEXT:    pandn %xmm6, %xmm7
-; X64-NEXT:    psraw $4, %xmm6
-; X64-NEXT:    pand %xmm1, %xmm6
-; X64-NEXT:    por %xmm7, %xmm6
-; X64-NEXT:    paddw %xmm4, %xmm4
+; X64-NEXT:    pandn %xmm4, %xmm7
+; X64-NEXT:    psraw $4, %xmm4
+; X64-NEXT:    pand %xmm1, %xmm4
+; X64-NEXT:    por %xmm7, %xmm4
+; X64-NEXT:    paddw %xmm5, %xmm5
 ; X64-NEXT:    pxor %xmm1, %xmm1
-; X64-NEXT:    pcmpgtw %xmm4, %xmm1
+; X64-NEXT:    pcmpgtw %xmm5, %xmm1
 ; X64-NEXT:    movdqa %xmm1, %xmm7
-; X64-NEXT:    pandn %xmm6, %xmm7
-; X64-NEXT:    psraw $2, %xmm6
-; X64-NEXT:    pand %xmm1, %xmm6
-; X64-NEXT:    por %xmm7, %xmm6
-; X64-NEXT:    paddw %xmm4, %xmm4
+; X64-NEXT:    pandn %xmm4, %xmm7
+; X64-NEXT:    psraw $2, %xmm4
+; X64-NEXT:    pand %xmm1, %xmm4
+; X64-NEXT:    por %xmm7, %xmm4
+; X64-NEXT:    paddw %xmm5, %xmm5
 ; X64-NEXT:    pxor %xmm1, %xmm1
-; X64-NEXT:    pcmpgtw %xmm4, %xmm1
-; X64-NEXT:    movdqa %xmm1, %xmm4
-; X64-NEXT:    pandn %xmm6, %xmm4
-; X64-NEXT:    psraw $1, %xmm6
-; X64-NEXT:    pand %xmm1, %xmm6
-; X64-NEXT:    por %xmm4, %xmm6
-; X64-NEXT:    psrlw $8, %xmm6
-; X64-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; X64-NEXT:    pxor %xmm4, %xmm4
-; X64-NEXT:    pcmpgtw %xmm5, %xmm4
-; X64-NEXT:    movdqa %xmm4, %xmm7
+; X64-NEXT:    pcmpgtw %xmm5, %xmm1
+; X64-NEXT:    movdqa %xmm1, %xmm5
+; X64-NEXT:    pandn %xmm4, %xmm5
+; X64-NEXT:    psraw $1, %xmm4
+; X64-NEXT:    pand %xmm1, %xmm4
+; X64-NEXT:    por %xmm5, %xmm4
+; X64-NEXT:    psrlw $8, %xmm4
+; X64-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; X64-NEXT:    pxor %xmm5, %xmm5
+; X64-NEXT:    pcmpgtw %xmm6, %xmm5
+; X64-NEXT:    movdqa %xmm5, %xmm7
 ; X64-NEXT:    pandn %xmm1, %xmm7
 ; X64-NEXT:    psraw $4, %xmm1
-; X64-NEXT:    pand %xmm4, %xmm1
+; X64-NEXT:    pand %xmm5, %xmm1
 ; X64-NEXT:    por %xmm7, %xmm1
-; X64-NEXT:    paddw %xmm5, %xmm5
-; X64-NEXT:    pxor %xmm4, %xmm4
-; X64-NEXT:    pcmpgtw %xmm5, %xmm4
-; X64-NEXT:    movdqa %xmm4, %xmm7
+; X64-NEXT:    paddw %xmm6, %xmm6
+; X64-NEXT:    pxor %xmm5, %xmm5
+; X64-NEXT:    pcmpgtw %xmm6, %xmm5
+; X64-NEXT:    movdqa %xmm5, %xmm7
 ; X64-NEXT:    pandn %xmm1, %xmm7
 ; X64-NEXT:    psraw $2, %xmm1
-; X64-NEXT:    pand %xmm4, %xmm1
+; X64-NEXT:    pand %xmm5, %xmm1
 ; X64-NEXT:    por %xmm7, %xmm1
-; X64-NEXT:    paddw %xmm5, %xmm5
-; X64-NEXT:    pxor %xmm4, %xmm4
-; X64-NEXT:    pcmpgtw %xmm5, %xmm4
-; X64-NEXT:    movdqa %xmm4, %xmm5
-; X64-NEXT:    pandn %xmm1, %xmm5
+; X64-NEXT:    paddw %xmm6, %xmm6
+; X64-NEXT:    pxor %xmm5, %xmm5
+; X64-NEXT:    pcmpgtw %xmm6, %xmm5
+; X64-NEXT:    movdqa %xmm5, %xmm6
+; X64-NEXT:    pandn %xmm1, %xmm6
 ; X64-NEXT:    psraw $1, %xmm1
-; X64-NEXT:    pand %xmm4, %xmm1
-; X64-NEXT:    por %xmm5, %xmm1
+; X64-NEXT:    pand %xmm5, %xmm1
+; X64-NEXT:    por %xmm6, %xmm1
 ; X64-NEXT:    psrlw $8, %xmm1
-; X64-NEXT:    packuswb %xmm6, %xmm1
+; X64-NEXT:    packuswb %xmm4, %xmm1
 ; X64-NEXT:    pcmpeqb %xmm0, %xmm1
-; X64-NEXT:    pand %xmm1, %xmm2
-; X64-NEXT:    pcmpgtb %xmm0, %xmm3
-; X64-NEXT:    pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
+; X64-NEXT:    pand %xmm1, %xmm3
+; X64-NEXT:    pcmpgtb %xmm0, %xmm2
+; X64-NEXT:    pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
 ; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-NEXT:    por %xmm3, %xmm0
+; X64-NEXT:    por %xmm2, %xmm0
 ; X64-NEXT:    pandn %xmm0, %xmm1
-; X64-NEXT:    por %xmm2, %xmm1
+; X64-NEXT:    por %xmm3, %xmm1
 ; X64-NEXT:    movdqa %xmm1, %xmm0
 ; X64-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/ushl_sat_vec.ll b/llvm/test/CodeGen/X86/ushl_sat_vec.ll
index b8e83da9cf361..e25c890dd22fd 100644
--- a/llvm/test/CodeGen/X86/ushl_sat_vec.ll
+++ b/llvm/test/CodeGen/X86/ushl_sat_vec.ll
@@ -402,36 +402,37 @@ define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
 ; X64-LABEL: vec_v16i8:
 ; X64:       # %bb.0:
 ; X64-NEXT:    psllw $5, %xmm1
-; X64-NEXT:    pxor %xmm3, %xmm3
+; X64-NEXT:    pxor %xmm2, %xmm2
 ; X64-NEXT:    pxor %xmm4, %xmm4
 ; X64-NEXT:    pcmpgtb %xmm1, %xmm4
-; X64-NEXT:    movdqa %xmm4, %xmm5
-; X64-NEXT:    pandn %xmm0, %xmm5
-; X64-NEXT:    movdqa %xmm0, %xmm2
-; X64-NEXT:    psllw $4, %xmm2
-; X64-NEXT:    pand %xmm4, %xmm2
-; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; X64-NEXT:    por %xmm5, %xmm2
+; X64-NEXT:    movdqa %xmm4, %xmm3
+; X64-NEXT:    pandn %xmm0, %xmm3
+; X64-NEXT:    movdqa %xmm0, %xmm6
+; X64-NEXT:    psllw $4, %xmm6
+; X64-NEXT:    pand %xmm4, %xmm6
+; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
+; X64-NEXT:    por %xmm3, %xmm6
 ; X64-NEXT:    paddb %xmm1, %xmm1
 ; X64-NEXT:    pxor %xmm5, %xmm5
 ; X64-NEXT:    pcmpgtb %xmm1, %xmm5
-; X64-NEXT:    movdqa %xmm5, %xmm6
-; X64-NEXT:    pandn %xmm2, %xmm6
-; X64-NEXT:    psllw $2, %xmm2
-; X64-NEXT:    pand %xmm5, %xmm2
-; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; X64-NEXT:    por %xmm6, %xmm2
+; X64-NEXT:    movdqa %xmm5, %xmm3
+; X64-NEXT:    pandn %xmm6, %xmm3
+; X64-NEXT:    psllw $2, %xmm6
+; X64-NEXT:    movdqa {{.*#+}} xmm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; X64-NEXT:    pand %xmm5, %xmm7
+; X64-NEXT:    pand %xmm6, %xmm7
+; X64-NEXT:    por %xmm7, %xmm3
 ; X64-NEXT:    paddb %xmm1, %xmm1
-; X64-NEXT:    pcmpgtb %xmm1, %xmm3
-; X64-NEXT:    movdqa %xmm3, %xmm1
-; X64-NEXT:    pandn %xmm2, %xmm1
-; X64-NEXT:    paddb %xmm2, %xmm2
-; X64-NEXT:    pand %xmm3, %xmm2
-; X64-NEXT:    por %xmm1, %xmm2
+; X64-NEXT:    pcmpgtb %xmm1, %xmm2
 ; X64-NEXT:    movdqa %xmm2, %xmm1
+; X64-NEXT:    pandn %xmm3, %xmm1
+; X64-NEXT:    paddb %xmm3, %xmm3
+; X64-NEXT:    pand %xmm2, %xmm3
+; X64-NEXT:    por %xmm1, %xmm3
+; X64-NEXT:    movdqa %xmm3, %xmm1
 ; X64-NEXT:    psrlw $4, %xmm1
 ; X64-NEXT:    pand %xmm4, %xmm1
-; X64-NEXT:    pandn %xmm2, %xmm4
+; X64-NEXT:    pandn %xmm3, %xmm4
 ; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
 ; X64-NEXT:    por %xmm4, %xmm1
 ; X64-NEXT:    movdqa %xmm5, %xmm4
@@ -440,16 +441,16 @@ define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
 ; X64-NEXT:    pand %xmm5, %xmm1
 ; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
 ; X64-NEXT:    por %xmm4, %xmm1
-; X64-NEXT:    movdqa %xmm3, %xmm4
+; X64-NEXT:    movdqa %xmm2, %xmm4
 ; X64-NEXT:    pandn %xmm1, %xmm4
 ; X64-NEXT:    psrlw $1, %xmm1
-; X64-NEXT:    pand %xmm3, %xmm1
+; X64-NEXT:    pand %xmm2, %xmm1
 ; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
 ; X64-NEXT:    por %xmm4, %xmm1
 ; X64-NEXT:    pcmpeqb %xmm1, %xmm0
 ; X64-NEXT:    pcmpeqd %xmm1, %xmm1
 ; X64-NEXT:    pxor %xmm1, %xmm0
-; X64-NEXT:    por %xmm2, %xmm0
+; X64-NEXT:    por %xmm3, %xmm0
 ; X64-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: vec_v16i8:
diff --git a/llvm/test/CodeGen/X86/vector-compress.ll b/llvm/test/CodeGen/X86/vector-compress.ll
index 1ab1a1a01e168..821395fe464e8 100644
--- a/llvm/test/CodeGen/X86/vector-compress.ll
+++ b/llvm/test/CodeGen/X86/vector-compress.ll
@@ -244,66 +244,70 @@ define <8 x i32> @test_compress_v8i32(<8 x i32> %vec, <8 x i1> %mask, <8 x i32>
 ; AVX2-NEXT:    subq $64, %rsp
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
 ; AVX2-NEXT:    vpslld $31, %ymm1, %ymm1
-; AVX2-NEXT:    vpsrad $31, %ymm1, %ymm3
+; AVX2-NEXT:    vpsrad $31, %ymm1, %ymm1
 ; AVX2-NEXT:    vmovaps %ymm2, (%rsp)
-; AVX2-NEXT:    vextracti128 $1, %ymm3, %xmm1
-; AVX2-NEXT:    vpackssdw %xmm1, %xmm3, %xmm2
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vmovd %xmm2, %esi
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u]
+; AVX2-NEXT:    vpinsrw $4, %esi, %xmm3, %xmm3
+; AVX2-NEXT:    vpextrd $1, %xmm2, %edx
+; AVX2-NEXT:    vpinsrw $5, %edx, %xmm3, %xmm3
+; AVX2-NEXT:    vpextrd $2, %xmm2, %ecx
+; AVX2-NEXT:    vpinsrw $6, %ecx, %xmm3, %xmm3
+; AVX2-NEXT:    vpextrd $3, %xmm2, %eax
+; AVX2-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm2
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
 ; AVX2-NEXT:    vpslld $31, %ymm2, %ymm2
 ; AVX2-NEXT:    vpsrld $31, %ymm2, %ymm2
-; AVX2-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX2-NEXT:    vpaddd %xmm4, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrd $1, %xmm2, %eax
-; AVX2-NEXT:    vmovd %xmm2, %ecx
-; AVX2-NEXT:    addl %eax, %ecx
-; AVX2-NEXT:    vpextrd $2, %xmm2, %edx
-; AVX2-NEXT:    vpextrd $3, %xmm2, %eax
-; AVX2-NEXT:    addl %edx, %eax
-; AVX2-NEXT:    addl %ecx, %eax
-; AVX2-NEXT:    andl $7, %eax
-; AVX2-NEXT:    vpextrd $1, %xmm3, %ecx
-; AVX2-NEXT:    andl $1, %ecx
-; AVX2-NEXT:    vmovd %xmm3, %edx
-; AVX2-NEXT:    andl $1, %edx
-; AVX2-NEXT:    addq %rdx, %rcx
-; AVX2-NEXT:    vpextrd $2, %xmm3, %esi
-; AVX2-NEXT:    andl $1, %esi
-; AVX2-NEXT:    addq %rcx, %rsi
-; AVX2-NEXT:    vpextrd $3, %xmm3, %edi
-; AVX2-NEXT:    andl $1, %edi
-; AVX2-NEXT:    addq %rsi, %rdi
-; AVX2-NEXT:    vmovd %xmm1, %r8d
+; AVX2-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT:    vpaddd %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpextrd $1, %xmm2, %edi
+; AVX2-NEXT:    vmovd %xmm2, %r8d
+; AVX2-NEXT:    addl %edi, %r8d
+; AVX2-NEXT:    vpextrd $2, %xmm2, %r9d
+; AVX2-NEXT:    vpextrd $3, %xmm2, %edi
+; AVX2-NEXT:    addl %r9d, %edi
+; AVX2-NEXT:    addl %r8d, %edi
+; AVX2-NEXT:    andl $7, %edi
+; AVX2-NEXT:    vpextrd $1, %xmm1, %r8d
 ; AVX2-NEXT:    andl $1, %r8d
-; AVX2-NEXT:    addq %rdi, %r8
-; AVX2-NEXT:    vpextrd $1, %xmm1, %r9d
+; AVX2-NEXT:    vmovd %xmm1, %r9d
 ; AVX2-NEXT:    andl $1, %r9d
-; AVX2-NEXT:    addq %r8, %r9
+; AVX2-NEXT:    addq %r9, %r8
 ; AVX2-NEXT:    vpextrd $2, %xmm1, %r10d
 ; AVX2-NEXT:    andl $1, %r10d
-; AVX2-NEXT:    addq %r9, %r10
+; AVX2-NEXT:    addq %r8, %r10
 ; AVX2-NEXT:    vpextrd $3, %xmm1, %r11d
 ; AVX2-NEXT:    andl $1, %r11d
 ; AVX2-NEXT:    addq %r10, %r11
+; AVX2-NEXT:    andl $1, %esi
+; AVX2-NEXT:    addq %r11, %rsi
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rsi, %rdx
+; AVX2-NEXT:    andl $1, %ecx
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    addq %rcx, %rax
 ; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vextractps $3, %xmm1, %ebx
-; AVX2-NEXT:    cmpq $8, %r11
-; AVX2-NEXT:    cmovbl (%rsp,%rax,4), %ebx
+; AVX2-NEXT:    cmpq $8, %rax
+; AVX2-NEXT:    cmovbl (%rsp,%rdi,4), %ebx
 ; AVX2-NEXT:    vmovss %xmm0, (%rsp)
-; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%rdx,4)
-; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%rcx,4)
-; AVX2-NEXT:    vextractps $3, %xmm0, (%rsp,%rsi,4)
-; AVX2-NEXT:    andl $7, %edi
-; AVX2-NEXT:    vmovss %xmm1, (%rsp,%rdi,4)
-; AVX2-NEXT:    andl $7, %r8d
-; AVX2-NEXT:    vextractps $1, %xmm1, (%rsp,%r8,4)
-; AVX2-NEXT:    andl $7, %r9d
-; AVX2-NEXT:    vextractps $2, %xmm1, (%rsp,%r9,4)
-; AVX2-NEXT:    andl $7, %r10d
-; AVX2-NEXT:    vextractps $3, %xmm1, (%rsp,%r10,4)
-; AVX2-NEXT:    cmpq $7, %r11
-; AVX2-NEXT:    movl $7, %eax
-; AVX2-NEXT:    cmovbq %r11, %rax
-; AVX2-NEXT:    movl %eax, %eax
+; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%r9,4)
+; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%r8,4)
+; AVX2-NEXT:    vextractps $3, %xmm0, (%rsp,%r10,4)
+; AVX2-NEXT:    andl $7, %r11d
+; AVX2-NEXT:    vmovss %xmm1, (%rsp,%r11,4)
+; AVX2-NEXT:    andl $7, %esi
+; AVX2-NEXT:    vextractps $1, %xmm1, (%rsp,%rsi,4)
+; AVX2-NEXT:    andl $7, %edx
+; AVX2-NEXT:    vextractps $2, %xmm1, (%rsp,%rdx,4)
+; AVX2-NEXT:    andl $7, %ecx
+; AVX2-NEXT:    vextractps $3, %xmm1, (%rsp,%rcx,4)
+; AVX2-NEXT:    cmpq $7, %rax
+; AVX2-NEXT:    movl $7, %ecx
+; AVX2-NEXT:    cmovbq %rax, %rcx
+; AVX2-NEXT:    movl %ecx, %eax
 ; AVX2-NEXT:    movl %ebx, (%rsp,%rax,4)
 ; AVX2-NEXT:    vmovaps (%rsp), %ymm0
 ; AVX2-NEXT:    leaq -8(%rbp), %rsp
@@ -345,66 +349,70 @@ define <8 x float> @test_compress_v8f32(<8 x float> %vec, <8 x i1> %mask, <8 x f
 ; AVX2-NEXT:    vpsrad $31, %ymm1, %ymm3
 ; AVX2-NEXT:    vmovaps %ymm2, (%rsp)
 ; AVX2-NEXT:    vextracti128 $1, %ymm3, %xmm1
-; AVX2-NEXT:    vpackssdw %xmm1, %xmm3, %xmm2
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX2-NEXT:    vpslld $31, %ymm2, %ymm2
-; AVX2-NEXT:    vpsrld $31, %ymm2, %ymm2
-; AVX2-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX2-NEXT:    vpaddd %xmm4, %xmm2, %xmm2
-; AVX2-NEXT:    vpextrd $1, %xmm2, %eax
-; AVX2-NEXT:    vmovd %xmm2, %ecx
-; AVX2-NEXT:    addl %eax, %ecx
-; AVX2-NEXT:    vpextrd $2, %xmm2, %eax
-; AVX2-NEXT:    vpextrd $3, %xmm2, %edx
-; AVX2-NEXT:    addl %eax, %edx
-; AVX2-NEXT:    addl %ecx, %edx
-; AVX2-NEXT:    andl $7, %edx
-; AVX2-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vmovd %xmm1, %esi
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm2 = xmm3[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u]
+; AVX2-NEXT:    vpinsrw $4, %esi, %xmm2, %xmm2
+; AVX2-NEXT:    vpextrd $1, %xmm1, %edx
+; AVX2-NEXT:    vpinsrw $5, %edx, %xmm2, %xmm2
+; AVX2-NEXT:    vpextrd $2, %xmm1, %ecx
+; AVX2-NEXT:    vpinsrw $6, %ecx, %xmm2, %xmm2
+; AVX2-NEXT:    vpextrd $3, %xmm1, %eax
+; AVX2-NEXT:    vpinsrw $7, %eax, %xmm2, %xmm1
+; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX2-NEXT:    vpslld $31, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrld $31, %ymm1, %ymm1
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpextrd $1, %xmm1, %edi
+; AVX2-NEXT:    vmovd %xmm1, %r8d
+; AVX2-NEXT:    addl %edi, %r8d
+; AVX2-NEXT:    vpextrd $2, %xmm1, %edi
+; AVX2-NEXT:    vpextrd $3, %xmm1, %r9d
+; AVX2-NEXT:    addl %edi, %r9d
+; AVX2-NEXT:    addl %r8d, %r9d
+; AVX2-NEXT:    andl $7, %r9d
+; AVX2-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; AVX2-NEXT:    vmovss %xmm0, (%rsp)
-; AVX2-NEXT:    vmovd %xmm3, %eax
-; AVX2-NEXT:    andl $1, %eax
-; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%rax,4)
-; AVX2-NEXT:    vpextrd $1, %xmm3, %ecx
-; AVX2-NEXT:    andl $1, %ecx
-; AVX2-NEXT:    addq %rax, %rcx
-; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%rcx,4)
-; AVX2-NEXT:    vpextrd $2, %xmm3, %eax
-; AVX2-NEXT:    andl $1, %eax
-; AVX2-NEXT:    addq %rcx, %rax
-; AVX2-NEXT:    vextractps $3, %xmm0, (%rsp,%rax,4)
-; AVX2-NEXT:    vpextrd $3, %xmm3, %ecx
+; AVX2-NEXT:    vmovd %xmm3, %edi
+; AVX2-NEXT:    andl $1, %edi
+; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%rdi,4)
+; AVX2-NEXT:    vpextrd $1, %xmm3, %r8d
+; AVX2-NEXT:    andl $1, %r8d
+; AVX2-NEXT:    addq %rdi, %r8
+; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%r8,4)
+; AVX2-NEXT:    vpextrd $2, %xmm3, %edi
+; AVX2-NEXT:    andl $1, %edi
+; AVX2-NEXT:    addq %r8, %rdi
+; AVX2-NEXT:    vextractps $3, %xmm0, (%rsp,%rdi,4)
+; AVX2-NEXT:    vpextrd $3, %xmm3, %r8d
+; AVX2-NEXT:    andl $1, %r8d
+; AVX2-NEXT:    addq %rdi, %r8
+; AVX2-NEXT:    andl $1, %esi
+; AVX2-NEXT:    addq %r8, %rsi
+; AVX2-NEXT:    # kill: def $r8d killed $r8d killed $r8 def $r8
+; AVX2-NEXT:    andl $7, %r8d
+; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX2-NEXT:    vmovss %xmm0, (%rsp,%r8,4)
+; AVX2-NEXT:    andl $1, %edx
+; AVX2-NEXT:    addq %rsi, %rdx
+; AVX2-NEXT:    # kill: def $esi killed $esi killed $rsi def $rsi
+; AVX2-NEXT:    andl $7, %esi
+; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%rsi,4)
 ; AVX2-NEXT:    andl $1, %ecx
-; AVX2-NEXT:    addq %rax, %rcx
-; AVX2-NEXT:    vmovd %xmm1, %eax
+; AVX2-NEXT:    addq %rdx, %rcx
+; AVX2-NEXT:    # kill: def $edx killed $edx killed $rdx def $rdx
+; AVX2-NEXT:    andl $7, %edx
+; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%rdx,4)
 ; AVX2-NEXT:    andl $1, %eax
 ; AVX2-NEXT:    addq %rcx, %rax
 ; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
 ; AVX2-NEXT:    andl $7, %ecx
-; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX2-NEXT:    vmovss %xmm0, (%rsp,%rcx,4)
-; AVX2-NEXT:    vpextrd $1, %xmm1, %ecx
-; AVX2-NEXT:    andl $1, %ecx
-; AVX2-NEXT:    addq %rax, %rcx
-; AVX2-NEXT:    # kill: def $eax killed $eax killed $rax def $rax
-; AVX2-NEXT:    andl $7, %eax
-; AVX2-NEXT:    vextractps $1, %xmm0, (%rsp,%rax,4)
-; AVX2-NEXT:    vpextrd $2, %xmm1, %edx
-; AVX2-NEXT:    andl $1, %edx
-; AVX2-NEXT:    addq %rcx, %rdx
-; AVX2-NEXT:    # kill: def $ecx killed $ecx killed $rcx def $rcx
-; AVX2-NEXT:    andl $7, %ecx
-; AVX2-NEXT:    vextractps $2, %xmm0, (%rsp,%rcx,4)
-; AVX2-NEXT:    vpextrd $3, %xmm1, %eax
-; AVX2-NEXT:    andl $1, %eax
-; AVX2-NEXT:    addq %rdx, %rax
-; AVX2-NEXT:    # kill: def $edx killed $edx killed $rdx def $rdx
-; AVX2-NEXT:    andl $7, %edx
 ; AVX2-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; AVX2-NEXT:    vmovss %xmm0, (%rsp,%rdx,4)
+; AVX2-NEXT:    vmovss %xmm0, (%rsp,%rcx,4)
 ; AVX2-NEXT:    cmpq $8, %rax
 ; AVX2-NEXT:    jae .LBB5_2
 ; AVX2-NEXT:  # %bb.1:
-; AVX2-NEXT:    vmovaps %xmm2, %xmm0
+; AVX2-NEXT:    vmovaps %xmm1, %xmm0
 ; AVX2-NEXT:  .LBB5_2:
 ; AVX2-NEXT:    cmpq $7, %rax
 ; AVX2-NEXT:    movl $7, %ecx
diff --git a/llvm/test/CodeGen/X86/vector-fshl-256.ll b/llvm/test/CodeGen/X86/vector-fshl-256.ll
index 6fbc10307e0bb..34a7086757d99 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-256.ll
@@ -594,16 +594,16 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt)
 ; AVX512F-NEXT:    vpblendvb %ymm4, %ymm5, %ymm1, %ymm1
 ; AVX512F-NEXT:    vpand %ymm3, %ymm2, %ymm2
 ; AVX512F-NEXT:    vpsllw $5, %ymm2, %ymm2
-; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm3
-; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm4
-; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
-; AVX512F-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsllw $2, %ymm0, %ymm2
-; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
-; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm3
+; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsllw $2, %ymm0, %ymm3
+; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 ;
@@ -628,16 +628,16 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt)
 ; AVX512VL-NEXT:    vpblendvb %ymm4, %ymm5, %ymm1, %ymm1
 ; AVX512VL-NEXT:    vpand %ymm3, %ymm2, %ymm2
 ; AVX512VL-NEXT:    vpsllw $5, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm3
-; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm4
-; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm2
-; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
-; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512VL-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
index 421fa98709d48..3321a3d091b34 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
@@ -377,12 +377,12 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind {
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
 ; AVX1-NEXT:    vpsllw $5, %xmm5, %xmm5
 ; AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vpsrlw $6, %xmm2, %xmm3
+; AVX1-NEXT:    vpsllw $2, %xmm2, %xmm3
 ; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX1-NEXT:    vpandn %xmm3, %xmm6, %xmm3
-; AVX1-NEXT:    vpsllw $2, %xmm2, %xmm7
-; AVX1-NEXT:    vpand %xmm6, %xmm7, %xmm7
-; AVX1-NEXT:    vpor %xmm3, %xmm7, %xmm3
+; AVX1-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $6, %xmm2, %xmm7
+; AVX1-NEXT:    vpandn %xmm7, %xmm6, %xmm7
+; AVX1-NEXT:    vpor %xmm7, %xmm3, %xmm3
 ; AVX1-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
 ; AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsrlw $7, %xmm2, %xmm3
@@ -399,11 +399,11 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind {
 ; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
 ; AVX1-NEXT:    vpsllw $5, %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $6, %xmm0, %xmm3
-; AVX1-NEXT:    vpandn %xmm3, %xmm6, %xmm3
-; AVX1-NEXT:    vpsllw $2, %xmm0, %xmm4
-; AVX1-NEXT:    vpand %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpsllw $2, %xmm0, %xmm3
+; AVX1-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $6, %xmm0, %xmm4
+; AVX1-NEXT:    vpandn %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpor %xmm4, %xmm3, %xmm3
 ; AVX1-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpsrlw $7, %xmm0, %xmm3
diff --git a/llvm/test/CodeGen/X86/vector-fshr-128.ll b/llvm/test/CodeGen/X86/vector-fshr-128.ll
index bf525442a419b..e65b9a9d9a8db 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-128.ll
@@ -775,20 +775,21 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt)
 ; SSE2-NEXT:    paddb %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT:    movdqa %xmm1, %xmm5
-; SSE2-NEXT:    pandn %xmm0, %xmm5
+; SSE2-NEXT:    movdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; SSE2-NEXT:    pand %xmm1, %xmm5
+; SSE2-NEXT:    pandn %xmm0, %xmm1
 ; SSE2-NEXT:    psllw $2, %xmm0
-; SSE2-NEXT:    pand %xmm1, %xmm0
-; SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    por %xmm5, %xmm0
+; SSE2-NEXT:    pand %xmm0, %xmm5
+; SSE2-NEXT:    por %xmm5, %xmm1
 ; SSE2-NEXT:    paddb %xmm2, %xmm2
 ; SSE2-NEXT:    pcmpgtb %xmm2, %xmm4
-; SSE2-NEXT:    movdqa %xmm4, %xmm1
-; SSE2-NEXT:    pandn %xmm0, %xmm1
-; SSE2-NEXT:    paddb %xmm0, %xmm0
-; SSE2-NEXT:    pand %xmm4, %xmm0
-; SSE2-NEXT:    por %xmm1, %xmm0
-; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    movdqa %xmm4, %xmm0
+; SSE2-NEXT:    pandn %xmm1, %xmm0
+; SSE2-NEXT:    paddb %xmm1, %xmm1
+; SSE2-NEXT:    pand %xmm4, %xmm1
+; SSE2-NEXT:    por %xmm0, %xmm1
+; SSE2-NEXT:    por %xmm3, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: var_funnnel_v16i8:
@@ -818,24 +819,23 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt)
 ; SSE41-NEXT:    pblendvb %xmm0, %xmm6, %xmm1
 ; SSE41-NEXT:    pandn %xmm5, %xmm3
 ; SSE41-NEXT:    psllw $5, %xmm3
-; SSE41-NEXT:    movdqa %xmm3, %xmm4
-; SSE41-NEXT:    paddb %xmm3, %xmm4
 ; SSE41-NEXT:    paddb %xmm2, %xmm2
-; SSE41-NEXT:    movdqa %xmm2, %xmm5
-; SSE41-NEXT:    psllw $4, %xmm5
-; SSE41-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5
+; SSE41-NEXT:    movdqa %xmm2, %xmm4
+; SSE41-NEXT:    psllw $4, %xmm4
+; SSE41-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
 ; SSE41-NEXT:    movdqa %xmm3, %xmm0
-; SSE41-NEXT:    pblendvb %xmm0, %xmm5, %xmm2
-; SSE41-NEXT:    movdqa %xmm2, %xmm3
-; SSE41-NEXT:    psllw $2, %xmm3
-; SSE41-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
-; SSE41-NEXT:    movdqa %xmm4, %xmm0
-; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm2
-; SSE41-NEXT:    movdqa %xmm2, %xmm3
-; SSE41-NEXT:    paddb %xmm2, %xmm3
-; SSE41-NEXT:    paddb %xmm4, %xmm4
-; SSE41-NEXT:    movdqa %xmm4, %xmm0
-; SSE41-NEXT:    pblendvb %xmm0, %xmm3, %xmm2
+; SSE41-NEXT:    pblendvb %xmm0, %xmm4, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm4
+; SSE41-NEXT:    psllw $2, %xmm4
+; SSE41-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
+; SSE41-NEXT:    paddb %xmm3, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm4, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm4
+; SSE41-NEXT:    paddb %xmm2, %xmm4
+; SSE41-NEXT:    paddb %xmm3, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    pblendvb %xmm0, %xmm4, %xmm2
 ; SSE41-NEXT:    por %xmm1, %xmm2
 ; SSE41-NEXT:    movdqa %xmm2, %xmm0
 ; SSE41-NEXT:    retq
@@ -858,17 +858,17 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt)
 ; AVX1-NEXT:    vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
 ; AVX1-NEXT:    vpandn %xmm3, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsllw $5, %xmm2, %xmm2
-; AVX1-NEXT:    vpaddb %xmm2, %xmm2, %xmm3
 ; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm0
-; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm4
-; AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4
-; AVX1-NEXT:    vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
-; AVX1-NEXT:    vpsllw $2, %xmm0, %xmm2
-; AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX1-NEXT:    vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
-; AVX1-NEXT:    vpaddb %xmm3, %xmm3, %xmm3
-; AVX1-NEXT:    vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpsllw $4, %xmm0, %xmm3
+; AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT:    vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpsllw $2, %xmm0, %xmm3
+; AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT:    vpaddb %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddb %xmm0, %xmm0, %xmm3
+; AVX1-NEXT:    vpaddb %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    retq
 ;
@@ -890,17 +890,17 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt)
 ; AVX2-NEXT:    vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
 ; AVX2-NEXT:    vpandn %xmm3, %xmm2, %xmm2
 ; AVX2-NEXT:    vpsllw $5, %xmm2, %xmm2
-; AVX2-NEXT:    vpaddb %xmm2, %xmm2, %xmm3
 ; AVX2-NEXT:    vpaddb %xmm0, %xmm0, %xmm0
-; AVX2-NEXT:    vpsllw $4, %xmm0, %xmm4
-; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm4
-; AVX2-NEXT:    vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
-; AVX2-NEXT:    vpsllw $2, %xmm0, %xmm2
-; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX2-NEXT:    vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
-; AVX2-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
-; AVX2-NEXT:    vpaddb %xmm3, %xmm3, %xmm3
-; AVX2-NEXT:    vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpsllw $4, %xmm0, %xmm3
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
+; AVX2-NEXT:    vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpsllw $2, %xmm0, %xmm3
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
+; AVX2-NEXT:    vpaddb %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpaddb %xmm0, %xmm0, %xmm3
+; AVX2-NEXT:    vpaddb %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
 ; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
@@ -1065,20 +1065,21 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt)
 ; X86-SSE2-NEXT:    paddb %xmm2, %xmm2
 ; X86-SSE2-NEXT:    pxor %xmm1, %xmm1
 ; X86-SSE2-NEXT:    pcmpgtb %xmm2, %xmm1
-; X86-SSE2-NEXT:    movdqa %xmm1, %xmm5
-; X86-SSE2-NEXT:    pandn %xmm0, %xmm5
+; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; X86-SSE2-NEXT:    pand %xmm1, %xmm5
+; X86-SSE2-NEXT:    pandn %xmm0, %xmm1
 ; X86-SSE2-NEXT:    psllw $2, %xmm0
-; X86-SSE2-NEXT:    pand %xmm1, %xmm0
-; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-SSE2-NEXT:    por %xmm5, %xmm0
+; X86-SSE2-NEXT:    pand %xmm0, %xmm5
+; X86-SSE2-NEXT:    por %xmm5, %xmm1
 ; X86-SSE2-NEXT:    paddb %xmm2, %xmm2
 ; X86-SSE2-NEXT:    pcmpgtb %xmm2, %xmm4
-; X86-SSE2-NEXT:    movdqa %xmm4, %xmm1
-; X86-SSE2-NEXT:    pandn %xmm0, %xmm1
-; X86-SSE2-NEXT:    paddb %xmm0, %xmm0
-; X86-SSE2-NEXT:    pand %xmm4, %xmm0
-; X86-SSE2-NEXT:    por %xmm1, %xmm0
-; X86-SSE2-NEXT:    por %xmm3, %xmm0
+; X86-SSE2-NEXT:    movdqa %xmm4, %xmm0
+; X86-SSE2-NEXT:    pandn %xmm1, %xmm0
+; X86-SSE2-NEXT:    paddb %xmm1, %xmm1
+; X86-SSE2-NEXT:    pand %xmm4, %xmm1
+; X86-SSE2-NEXT:    por %xmm0, %xmm1
+; X86-SSE2-NEXT:    por %xmm3, %xmm1
+; X86-SSE2-NEXT:    movdqa %xmm1, %xmm0
 ; X86-SSE2-NEXT:    retl
   %res = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt)
   ret <16 x i8> %res
diff --git a/llvm/test/CodeGen/X86/vector-fshr-256.ll b/llvm/test/CodeGen/X86/vector-fshr-256.ll
index b0a1a91bdccc5..81c7096e9e628 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-256.ll
@@ -594,17 +594,17 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt)
 ; AVX2-NEXT:    vpblendvb %ymm5, %ymm4, %ymm1, %ymm1
 ; AVX2-NEXT:    vpandn %ymm3, %ymm2, %ymm2
 ; AVX2-NEXT:    vpsllw $5, %ymm2, %ymm2
-; AVX2-NEXT:    vpaddb %ymm2, %ymm2, %ymm3
 ; AVX2-NEXT:    vpaddb %ymm0, %ymm0, %ymm0
-; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm4
-; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
-; AVX2-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; AVX2-NEXT:    vpsllw $2, %ymm0, %ymm2
-; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
-; AVX2-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpsllw $4, %ymm0, %ymm3
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpsllw $2, %ymm0, %ymm3
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
+; AVX2-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX2-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
 ; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
@@ -626,17 +626,17 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt)
 ; AVX512F-NEXT:    vpblendvb %ymm5, %ymm4, %ymm1, %ymm1
 ; AVX512F-NEXT:    vpandn %ymm3, %ymm2, %ymm2
 ; AVX512F-NEXT:    vpsllw $5, %ymm2, %ymm2
-; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm3
 ; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm4
-; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
-; AVX512F-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; AVX512F-NEXT:    vpsllw $2, %ymm0, %ymm2
-; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
-; AVX512F-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX512F-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsllw $4, %ymm0, %ymm3
+; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsllw $2, %ymm0, %ymm3
+; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512F-NEXT:    retq
 ;
@@ -658,17 +658,17 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt)
 ; AVX512VL-NEXT:    vpblendvb %ymm5, %ymm4, %ymm1, %ymm1
 ; AVX512VL-NEXT:    vpandn %ymm3, %ymm2, %ymm2
 ; AVX512VL-NEXT:    vpsllw $5, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm3
 ; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm4
-; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm4, %ymm4
-; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm2
-; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm2, %ymm2
-; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
-; AVX512VL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; AVX512VL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
 ; AVX512VL-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512VL-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
index e2a3e261c0411..6e8f666268b49 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
@@ -400,11 +400,11 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind {
 ; AVX1-NEXT:    vpsubb %xmm5, %xmm6, %xmm5
 ; AVX1-NEXT:    vpsllw $5, %xmm5, %xmm5
 ; AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vpsrlw $6, %xmm2, %xmm3
+; AVX1-NEXT:    vpsllw $2, %xmm2, %xmm3
 ; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX1-NEXT:    vpandn %xmm3, %xmm7, %xmm3
-; AVX1-NEXT:    vpsllw $2, %xmm2, %xmm8
-; AVX1-NEXT:    vpand %xmm7, %xmm8, %xmm8
+; AVX1-NEXT:    vpand %xmm7, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $6, %xmm2, %xmm8
+; AVX1-NEXT:    vpandn %xmm8, %xmm7, %xmm8
 ; AVX1-NEXT:    vpor %xmm3, %xmm8, %xmm3
 ; AVX1-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
 ; AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
@@ -423,11 +423,11 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind {
 ; AVX1-NEXT:    vpsubb %xmm1, %xmm6, %xmm1
 ; AVX1-NEXT:    vpsllw $5, %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $6, %xmm0, %xmm3
-; AVX1-NEXT:    vpandn %xmm3, %xmm7, %xmm3
-; AVX1-NEXT:    vpsllw $2, %xmm0, %xmm4
-; AVX1-NEXT:    vpand %xmm7, %xmm4, %xmm4
-; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpsllw $2, %xmm0, %xmm3
+; AVX1-NEXT:    vpand %xmm7, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $6, %xmm0, %xmm4
+; AVX1-NEXT:    vpandn %xmm4, %xmm7, %xmm4
+; AVX1-NEXT:    vpor %xmm4, %xmm3, %xmm3
 ; AVX1-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpsrlw $7, %xmm0, %xmm3
diff --git a/llvm/test/CodeGen/X86/vector-rotate-256.ll b/llvm/test/CodeGen/X86/vector-rotate-256.ll
index 64c31187f29ef..f79fb7ea12ce4 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-256.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-256.ll
@@ -321,12 +321,12 @@ define <32 x i8> @var_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
 ; AVX1-NEXT:    vpsllw $5, %xmm5, %xmm5
 ; AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vpsrlw $6, %xmm2, %xmm3
+; AVX1-NEXT:    vpsllw $2, %xmm2, %xmm3
 ; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX1-NEXT:    vpandn %xmm3, %xmm6, %xmm3
-; AVX1-NEXT:    vpsllw $2, %xmm2, %xmm7
-; AVX1-NEXT:    vpand %xmm6, %xmm7, %xmm7
-; AVX1-NEXT:    vpor %xmm3, %xmm7, %xmm3
+; AVX1-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $6, %xmm2, %xmm7
+; AVX1-NEXT:    vpandn %xmm7, %xmm6, %xmm7
+; AVX1-NEXT:    vpor %xmm7, %xmm3, %xmm3
 ; AVX1-NEXT:    vpaddb %xmm5, %xmm5, %xmm5
 ; AVX1-NEXT:    vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsrlw $7, %xmm2, %xmm3
@@ -343,11 +343,11 @@ define <32 x i8> @var_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
 ; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
 ; AVX1-NEXT:    vpsllw $5, %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrlw $6, %xmm0, %xmm3
-; AVX1-NEXT:    vpandn %xmm3, %xmm6, %xmm3
-; AVX1-NEXT:    vpsllw $2, %xmm0, %xmm4
-; AVX1-NEXT:    vpand %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpsllw $2, %xmm0, %xmm3
+; AVX1-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $6, %xmm0, %xmm4
+; AVX1-NEXT:    vpandn %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpor %xmm4, %xmm3, %xmm3
 ; AVX1-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpsrlw $7, %xmm0, %xmm3
diff --git a/llvm/test/CodeGen/X86/vector-shift-lut.ll b/llvm/test/CodeGen/X86/vector-shift-lut.ll
index 0bf2006090893..57ad8a4e101be 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lut.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lut.ll
@@ -13,32 +13,32 @@
 define <16 x i8> @uniform_shl_v16i8(<16 x i8> %a) nounwind {
 ; SSE2-LABEL: uniform_shl_v16i8:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movdqa %xmm0, %xmm1
-; SSE2-NEXT:    psllw $5, %xmm1
+; SSE2-NEXT:    psllw $5, %xmm0
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
-; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm3
-; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; SSE2-NEXT:    pand %xmm3, %xmm0
-; SSE2-NEXT:    paddb %xmm3, %xmm0
-; SSE2-NEXT:    pcmpeqd %xmm3, %xmm3
-; SSE2-NEXT:    psubb %xmm3, %xmm0
-; SSE2-NEXT:    paddb %xmm1, %xmm1
-; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm3
-; SSE2-NEXT:    movdqa %xmm3, %xmm4
-; SSE2-NEXT:    pandn %xmm0, %xmm4
-; SSE2-NEXT:    psllw $2, %xmm0
-; SSE2-NEXT:    pand %xmm3, %xmm0
-; SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    por %xmm4, %xmm0
-; SSE2-NEXT:    paddb %xmm1, %xmm1
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT:    movdqa %xmm2, %xmm1
-; SSE2-NEXT:    pandn %xmm0, %xmm1
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    pcmpgtb %xmm0, %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; SSE2-NEXT:    pand %xmm1, %xmm3
+; SSE2-NEXT:    paddb %xmm1, %xmm3
+; SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT:    psubb %xmm1, %xmm3
 ; SSE2-NEXT:    paddb %xmm0, %xmm0
-; SSE2-NEXT:    pand %xmm2, %xmm0
-; SSE2-NEXT:    por %xmm1, %xmm0
+; SSE2-NEXT:    pxor %xmm1, %xmm1
+; SSE2-NEXT:    pcmpgtb %xmm0, %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; SSE2-NEXT:    pand %xmm1, %xmm4
+; SSE2-NEXT:    pandn %xmm3, %xmm1
+; SSE2-NEXT:    psllw $2, %xmm3
+; SSE2-NEXT:    pand %xmm3, %xmm4
+; SSE2-NEXT:    por %xmm4, %xmm1
+; SSE2-NEXT:    paddb %xmm0, %xmm0
+; SSE2-NEXT:    pcmpgtb %xmm0, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    pandn %xmm1, %xmm0
+; SSE2-NEXT:    paddb %xmm1, %xmm1
+; SSE2-NEXT:    pand %xmm2, %xmm1
+; SSE2-NEXT:    por %xmm0, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: uniform_shl_v16i8:
@@ -196,24 +196,25 @@ define <32 x i8> @uniform_shl_v32i8(<32 x i8> %a) nounwind {
 ; SSE2-NEXT:    movdqa %xmm0, %xmm2
 ; SSE2-NEXT:    psllw $5, %xmm2
 ; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    pxor %xmm7, %xmm7
-; SSE2-NEXT:    pcmpgtb %xmm2, %xmm7
+; SSE2-NEXT:    pxor %xmm6, %xmm6
+; SSE2-NEXT:    pcmpgtb %xmm2, %xmm6
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
-; SSE2-NEXT:    movdqa %xmm7, %xmm0
+; SSE2-NEXT:    movdqa %xmm6, %xmm0
 ; SSE2-NEXT:    pandn %xmm4, %xmm0
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm5 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
-; SSE2-NEXT:    pand %xmm5, %xmm7
-; SSE2-NEXT:    por %xmm0, %xmm7
+; SSE2-NEXT:    pand %xmm5, %xmm6
+; SSE2-NEXT:    por %xmm0, %xmm6
 ; SSE2-NEXT:    paddb %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    pcmpgtb %xmm2, %xmm0
-; SSE2-NEXT:    movdqa %xmm0, %xmm8
-; SSE2-NEXT:    pandn %xmm7, %xmm8
-; SSE2-NEXT:    psllw $2, %xmm7
+; SSE2-NEXT:    movdqa %xmm0, %xmm7
+; SSE2-NEXT:    pandn %xmm6, %xmm0
+; SSE2-NEXT:    movdqa %xmm6, %xmm8
+; SSE2-NEXT:    psllw $2, %xmm8
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; SSE2-NEXT:    pand %xmm6, %xmm0
-; SSE2-NEXT:    pand %xmm7, %xmm0
-; SSE2-NEXT:    por %xmm8, %xmm0
+; SSE2-NEXT:    pand %xmm6, %xmm7
+; SSE2-NEXT:    pand %xmm8, %xmm7
+; SSE2-NEXT:    por %xmm7, %xmm0
 ; SSE2-NEXT:    paddb %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm7, %xmm7
 ; SSE2-NEXT:    pcmpgtb %xmm2, %xmm7
@@ -232,12 +233,11 @@ define <32 x i8> @uniform_shl_v32i8(<32 x i8> %a) nounwind {
 ; SSE2-NEXT:    paddb %xmm1, %xmm1
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT:    movdqa %xmm2, %xmm4
-; SSE2-NEXT:    pandn %xmm7, %xmm4
+; SSE2-NEXT:    pand %xmm2, %xmm6
+; SSE2-NEXT:    pandn %xmm7, %xmm2
 ; SSE2-NEXT:    psllw $2, %xmm7
-; SSE2-NEXT:    pand %xmm6, %xmm2
-; SSE2-NEXT:    pand %xmm7, %xmm2
-; SSE2-NEXT:    por %xmm4, %xmm2
+; SSE2-NEXT:    pand %xmm7, %xmm6
+; SSE2-NEXT:    por %xmm6, %xmm2
 ; SSE2-NEXT:    paddb %xmm1, %xmm1
 ; SSE2-NEXT:    pcmpgtb %xmm1, %xmm3
 ; SSE2-NEXT:    movdqa %xmm3, %xmm1
@@ -534,24 +534,25 @@ define <64 x i8> @uniform_shl_v64i8(<64 x i8> %a) nounwind {
 ; SSE2-NEXT:    movdqa %xmm0, %xmm1
 ; SSE2-NEXT:    psllw $5, %xmm1
 ; SSE2-NEXT:    pxor %xmm6, %xmm6
-; SSE2-NEXT:    pxor %xmm9, %xmm9
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm9
+; SSE2-NEXT:    pxor %xmm8, %xmm8
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm8
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm5 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; SSE2-NEXT:    movdqa %xmm9, %xmm0
+; SSE2-NEXT:    movdqa %xmm8, %xmm0
 ; SSE2-NEXT:    pandn %xmm5, %xmm0
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm7 = [112,112,112,112,112,112,112,112,112,112,112,112,112,112,112,112]
-; SSE2-NEXT:    pand %xmm7, %xmm9
-; SSE2-NEXT:    por %xmm0, %xmm9
+; SSE2-NEXT:    pand %xmm7, %xmm8
+; SSE2-NEXT:    por %xmm0, %xmm8
 ; SSE2-NEXT:    paddb %xmm1, %xmm1
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    pcmpgtb %xmm1, %xmm0
-; SSE2-NEXT:    movdqa %xmm0, %xmm10
-; SSE2-NEXT:    pandn %xmm9, %xmm10
-; SSE2-NEXT:    psllw $2, %xmm9
+; SSE2-NEXT:    movdqa %xmm0, %xmm9
+; SSE2-NEXT:    pandn %xmm8, %xmm0
+; SSE2-NEXT:    movdqa %xmm8, %xmm10
+; SSE2-NEXT:    psllw $2, %xmm10
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; SSE2-NEXT:    pand %xmm8, %xmm0
-; SSE2-NEXT:    pand %xmm9, %xmm0
-; SSE2-NEXT:    por %xmm10, %xmm0
+; SSE2-NEXT:    pand %xmm8, %xmm9
+; SSE2-NEXT:    pand %xmm10, %xmm9
+; SSE2-NEXT:    por %xmm9, %xmm0
 ; SSE2-NEXT:    paddb %xmm1, %xmm1
 ; SSE2-NEXT:    pxor %xmm9, %xmm9
 ; SSE2-NEXT:    pcmpgtb %xmm1, %xmm9
@@ -571,10 +572,10 @@ define <64 x i8> @uniform_shl_v64i8(<64 x i8> %a) nounwind {
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    pcmpgtb %xmm4, %xmm1
 ; SSE2-NEXT:    movdqa %xmm1, %xmm10
-; SSE2-NEXT:    pandn %xmm9, %xmm10
+; SSE2-NEXT:    pandn %xmm9, %xmm1
 ; SSE2-NEXT:    psllw $2, %xmm9
-; SSE2-NEXT:    pand %xmm8, %xmm1
-; SSE2-NEXT:    pand %xmm9, %xmm1
+; SSE2-NEXT:    pand %xmm8, %xmm10
+; SSE2-NEXT:    pand %xmm9, %xmm10
 ; SSE2-NEXT:    por %xmm10, %xmm1
 ; SSE2-NEXT:    paddb %xmm4, %xmm4
 ; SSE2-NEXT:    pxor %xmm9, %xmm9
@@ -595,10 +596,10 @@ define <64 x i8> @uniform_shl_v64i8(<64 x i8> %a) nounwind {
 ; SSE2-NEXT:    pxor %xmm4, %xmm4
 ; SSE2-NEXT:    pcmpgtb %xmm2, %xmm4
 ; SSE2-NEXT:    movdqa %xmm4, %xmm10
-; SSE2-NEXT:    pandn %xmm9, %xmm10
+; SSE2-NEXT:    pandn %xmm9, %xmm4
 ; SSE2-NEXT:    psllw $2, %xmm9
-; SSE2-NEXT:    pand %xmm8, %xmm4
-; SSE2-NEXT:    pand %xmm9, %xmm4
+; SSE2-NEXT:    pand %xmm8, %xmm10
+; SSE2-NEXT:    pand %xmm9, %xmm10
 ; SSE2-NEXT:    por %xmm10, %xmm4
 ; SSE2-NEXT:    paddb %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm9, %xmm9
@@ -618,12 +619,11 @@ define <64 x i8> @uniform_shl_v64i8(<64 x i8> %a) nounwind {
 ; SSE2-NEXT:    paddb %xmm3, %xmm3
 ; SSE2-NEXT:    pxor %xmm5, %xmm5
 ; SSE2-NEXT:    pcmpgtb %xmm3, %xmm5
-; SSE2-NEXT:    movdqa %xmm5, %xmm7
-; SSE2-NEXT:    pandn %xmm2, %xmm7
+; SSE2-NEXT:    pand %xmm5, %xmm8
+; SSE2-NEXT:    pandn %xmm2, %xmm5
 ; SSE2-NEXT:    psllw $2, %xmm2
-; SSE2-NEXT:    pand %xmm8, %xmm5
-; SSE2-NEXT:    pand %xmm2, %xmm5
-; SSE2-NEXT:    por %xmm7, %xmm5
+; SSE2-NEXT:    pand %xmm2, %xmm8
+; SSE2-NEXT:    por %xmm8, %xmm5
 ; SSE2-NEXT:    paddb %xmm3, %xmm3
 ; SSE2-NEXT:    pcmpgtb %xmm3, %xmm6
 ; SSE2-NEXT:    movdqa %xmm6, %xmm2
@@ -1033,22 +1033,23 @@ define <32 x i8> @perlane_shl_v32i8(<32 x i8> %a) nounwind {
 ; SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; SSE2-NEXT:    psllw $5, %xmm2
 ; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    pxor %xmm5, %xmm5
-; SSE2-NEXT:    pcmpgtb %xmm2, %xmm5
-; SSE2-NEXT:    movdqa %xmm5, %xmm1
+; SSE2-NEXT:    pxor %xmm4, %xmm4
+; SSE2-NEXT:    pcmpgtb %xmm2, %xmm4
+; SSE2-NEXT:    movdqa %xmm4, %xmm1
 ; SSE2-NEXT:    pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5
-; SSE2-NEXT:    por %xmm1, %xmm5
+; SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
+; SSE2-NEXT:    por %xmm1, %xmm4
 ; SSE2-NEXT:    paddb %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
 ; SSE2-NEXT:    pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT:    movdqa %xmm1, %xmm6
-; SSE2-NEXT:    pandn %xmm5, %xmm6
-; SSE2-NEXT:    psllw $2, %xmm5
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    pandn %xmm4, %xmm1
+; SSE2-NEXT:    movdqa %xmm4, %xmm6
+; SSE2-NEXT:    psllw $2, %xmm6
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; SSE2-NEXT:    pand %xmm4, %xmm1
-; SSE2-NEXT:    pand %xmm5, %xmm1
-; SSE2-NEXT:    por %xmm6, %xmm1
+; SSE2-NEXT:    pand %xmm4, %xmm5
+; SSE2-NEXT:    pand %xmm6, %xmm5
+; SSE2-NEXT:    por %xmm5, %xmm1
 ; SSE2-NEXT:    paddb %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm5, %xmm5
 ; SSE2-NEXT:    pcmpgtb %xmm2, %xmm5
@@ -1068,12 +1069,11 @@ define <32 x i8> @perlane_shl_v32i8(<32 x i8> %a) nounwind {
 ; SSE2-NEXT:    paddb %xmm0, %xmm0
 ; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    pcmpgtb %xmm0, %xmm2
-; SSE2-NEXT:    movdqa %xmm2, %xmm6
-; SSE2-NEXT:    pandn %xmm5, %xmm6
+; SSE2-NEXT:    pand %xmm2, %xmm4
+; SSE2-NEXT:    pandn %xmm5, %xmm2
 ; SSE2-NEXT:    psllw $2, %xmm5
-; SSE2-NEXT:    pand %xmm4, %xmm2
-; SSE2-NEXT:    pand %xmm5, %xmm2
-; SSE2-NEXT:    por %xmm6, %xmm2
+; SSE2-NEXT:    pand %xmm5, %xmm4
+; SSE2-NEXT:    por %xmm4, %xmm2
 ; SSE2-NEXT:    paddb %xmm0, %xmm0
 ; SSE2-NEXT:    pcmpgtb %xmm0, %xmm3
 ; SSE2-NEXT:    movdqa %xmm3, %xmm0
@@ -1371,11 +1371,11 @@ define <64 x i8> @perlane_shl_v64i8(<64 x i8> %a) nounwind {
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    pcmpgtb %xmm1, %xmm0
 ; SSE2-NEXT:    movdqa %xmm0, %xmm7
-; SSE2-NEXT:    pandn %xmm4, %xmm7
+; SSE2-NEXT:    pandn %xmm4, %xmm0
 ; SSE2-NEXT:    psllw $2, %xmm4
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; SSE2-NEXT:    pand %xmm6, %xmm0
-; SSE2-NEXT:    pand %xmm4, %xmm0
+; SSE2-NEXT:    pand %xmm6, %xmm7
+; SSE2-NEXT:    pand %xmm4, %xmm7
 ; SSE2-NEXT:    por %xmm7, %xmm0
 ; SSE2-NEXT:    paddb %xmm1, %xmm1
 ; SSE2-NEXT:    pxor %xmm4, %xmm4
@@ -1396,10 +1396,10 @@ define <64 x i8> @perlane_shl_v64i8(<64 x i8> %a) nounwind {
 ; SSE2-NEXT:    pxor %xmm4, %xmm4
 ; SSE2-NEXT:    pcmpgtb %xmm3, %xmm4
 ; SSE2-NEXT:    movdqa %xmm4, %xmm7
-; SSE2-NEXT:    pandn %xmm1, %xmm7
+; SSE2-NEXT:    pandn %xmm1, %xmm4
 ; SSE2-NEXT:    psllw $2, %xmm1
-; SSE2-NEXT:    pand %xmm6, %xmm4
-; SSE2-NEXT:    pand %xmm1, %xmm4
+; SSE2-NEXT:    pand %xmm6, %xmm7
+; SSE2-NEXT:    pand %xmm1, %xmm7
 ; SSE2-NEXT:    por %xmm7, %xmm4
 ; SSE2-NEXT:    paddb %xmm3, %xmm3
 ; SSE2-NEXT:    pxor %xmm1, %xmm1
@@ -1420,12 +1420,11 @@ define <64 x i8> @perlane_shl_v64i8(<64 x i8> %a) nounwind {
 ; SSE2-NEXT:    paddb %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm3, %xmm3
 ; SSE2-NEXT:    pcmpgtb %xmm2, %xmm3
-; SSE2-NEXT:    movdqa %xmm3, %xmm1
-; SSE2-NEXT:    pandn %xmm7, %xmm1
+; SSE2-NEXT:    pand %xmm3, %xmm6
+; SSE2-NEXT:    pandn %xmm7, %xmm3
 ; SSE2-NEXT:    psllw $2, %xmm7
-; SSE2-NEXT:    pand %xmm6, %xmm3
-; SSE2-NEXT:    pand %xmm7, %xmm3
-; SSE2-NEXT:    por %xmm1, %xmm3
+; SSE2-NEXT:    pand %xmm7, %xmm6
+; SSE2-NEXT:    por %xmm6, %xmm3
 ; SSE2-NEXT:    paddb %xmm2, %xmm2
 ; SSE2-NEXT:    pcmpgtb %xmm2, %xmm5
 ; SSE2-NEXT:    movdqa %xmm5, %xmm1
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
index 2b1cf5b671e53..5fe2a052c4d3f 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
@@ -281,31 +281,32 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
 ; SSE2-LABEL: var_shift_v16i8:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    psllw $5, %xmm1
-; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm3
-; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
 ; SSE2-NEXT:    pandn %xmm0, %xmm4
 ; SSE2-NEXT:    psllw $4, %xmm0
-; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    pand %xmm2, %xmm0
 ; SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE2-NEXT:    por %xmm4, %xmm0
 ; SSE2-NEXT:    paddb %xmm1, %xmm1
-; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm3
-; SSE2-NEXT:    movdqa %xmm3, %xmm4
-; SSE2-NEXT:    pandn %xmm0, %xmm4
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; SSE2-NEXT:    pand %xmm2, %xmm4
+; SSE2-NEXT:    pandn %xmm0, %xmm2
 ; SSE2-NEXT:    psllw $2, %xmm0
-; SSE2-NEXT:    pand %xmm3, %xmm0
-; SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    por %xmm4, %xmm0
+; SSE2-NEXT:    pand %xmm0, %xmm4
+; SSE2-NEXT:    por %xmm4, %xmm2
 ; SSE2-NEXT:    paddb %xmm1, %xmm1
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT:    movdqa %xmm2, %xmm1
-; SSE2-NEXT:    pandn %xmm0, %xmm1
-; SSE2-NEXT:    paddb %xmm0, %xmm0
-; SSE2-NEXT:    pand %xmm2, %xmm0
-; SSE2-NEXT:    por %xmm1, %xmm0
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm3
+; SSE2-NEXT:    movdqa %xmm3, %xmm0
+; SSE2-NEXT:    pandn %xmm2, %xmm0
+; SSE2-NEXT:    paddb %xmm2, %xmm2
+; SSE2-NEXT:    pand %xmm3, %xmm2
+; SSE2-NEXT:    por %xmm0, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: var_shift_v16i8:
@@ -391,31 +392,32 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
 ; X86-SSE-LABEL: var_shift_v16i8:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    psllw $5, %xmm1
-; X86-SSE-NEXT:    pxor %xmm2, %xmm2
 ; X86-SSE-NEXT:    pxor %xmm3, %xmm3
-; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm3
-; X86-SSE-NEXT:    movdqa %xmm3, %xmm4
+; X86-SSE-NEXT:    pxor %xmm2, %xmm2
+; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm2
+; X86-SSE-NEXT:    movdqa %xmm2, %xmm4
 ; X86-SSE-NEXT:    pandn %xmm0, %xmm4
 ; X86-SSE-NEXT:    psllw $4, %xmm0
-; X86-SSE-NEXT:    pand %xmm3, %xmm0
+; X86-SSE-NEXT:    pand %xmm2, %xmm0
 ; X86-SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE-NEXT:    por %xmm4, %xmm0
 ; X86-SSE-NEXT:    paddb %xmm1, %xmm1
-; X86-SSE-NEXT:    pxor %xmm3, %xmm3
-; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm3
-; X86-SSE-NEXT:    movdqa %xmm3, %xmm4
-; X86-SSE-NEXT:    pandn %xmm0, %xmm4
+; X86-SSE-NEXT:    pxor %xmm2, %xmm2
+; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm2
+; X86-SSE-NEXT:    movdqa {{.*#+}} xmm4 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; X86-SSE-NEXT:    pand %xmm2, %xmm4
+; X86-SSE-NEXT:    pandn %xmm0, %xmm2
 ; X86-SSE-NEXT:    psllw $2, %xmm0
-; X86-SSE-NEXT:    pand %xmm3, %xmm0
-; X86-SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-SSE-NEXT:    por %xmm4, %xmm0
+; X86-SSE-NEXT:    pand %xmm0, %xmm4
+; X86-SSE-NEXT:    por %xmm4, %xmm2
 ; X86-SSE-NEXT:    paddb %xmm1, %xmm1
-; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm2
-; X86-SSE-NEXT:    movdqa %xmm2, %xmm1
-; X86-SSE-NEXT:    pandn %xmm0, %xmm1
-; X86-SSE-NEXT:    paddb %xmm0, %xmm0
-; X86-SSE-NEXT:    pand %xmm2, %xmm0
-; X86-SSE-NEXT:    por %xmm1, %xmm0
+; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm3
+; X86-SSE-NEXT:    movdqa %xmm3, %xmm0
+; X86-SSE-NEXT:    pandn %xmm2, %xmm0
+; X86-SSE-NEXT:    paddb %xmm2, %xmm2
+; X86-SSE-NEXT:    pand %xmm3, %xmm2
+; X86-SSE-NEXT:    por %xmm0, %xmm2
+; X86-SSE-NEXT:    movdqa %xmm2, %xmm0
 ; X86-SSE-NEXT:    retl
   %shift = shl <16 x i8> %a, %b
   ret <16 x i8> %shift
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-sub128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-sub128.ll
index d245bdca6ee29..9e3f1246ff69f 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-sub128.ll
@@ -340,31 +340,32 @@ define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind {
 ; SSE2-LABEL: var_shift_v8i8:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    psllw $5, %xmm1
-; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm3
-; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
 ; SSE2-NEXT:    pandn %xmm0, %xmm4
 ; SSE2-NEXT:    psllw $4, %xmm0
-; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    pand %xmm2, %xmm0
 ; SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE2-NEXT:    por %xmm4, %xmm0
 ; SSE2-NEXT:    paddb %xmm1, %xmm1
-; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm3
-; SSE2-NEXT:    movdqa %xmm3, %xmm4
-; SSE2-NEXT:    pandn %xmm0, %xmm4
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; SSE2-NEXT:    pand %xmm2, %xmm4
+; SSE2-NEXT:    pandn %xmm0, %xmm2
 ; SSE2-NEXT:    psllw $2, %xmm0
-; SSE2-NEXT:    pand %xmm3, %xmm0
-; SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    por %xmm4, %xmm0
+; SSE2-NEXT:    pand %xmm0, %xmm4
+; SSE2-NEXT:    por %xmm4, %xmm2
 ; SSE2-NEXT:    paddb %xmm1, %xmm1
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT:    movdqa %xmm2, %xmm1
-; SSE2-NEXT:    pandn %xmm0, %xmm1
-; SSE2-NEXT:    paddb %xmm0, %xmm0
-; SSE2-NEXT:    pand %xmm2, %xmm0
-; SSE2-NEXT:    por %xmm1, %xmm0
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm3
+; SSE2-NEXT:    movdqa %xmm3, %xmm0
+; SSE2-NEXT:    pandn %xmm2, %xmm0
+; SSE2-NEXT:    paddb %xmm2, %xmm2
+; SSE2-NEXT:    pand %xmm3, %xmm2
+; SSE2-NEXT:    por %xmm0, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: var_shift_v8i8:
@@ -450,31 +451,32 @@ define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind {
 ; X86-SSE-LABEL: var_shift_v8i8:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    psllw $5, %xmm1
-; X86-SSE-NEXT:    pxor %xmm2, %xmm2
 ; X86-SSE-NEXT:    pxor %xmm3, %xmm3
-; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm3
-; X86-SSE-NEXT:    movdqa %xmm3, %xmm4
+; X86-SSE-NEXT:    pxor %xmm2, %xmm2
+; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm2
+; X86-SSE-NEXT:    movdqa %xmm2, %xmm4
 ; X86-SSE-NEXT:    pandn %xmm0, %xmm4
 ; X86-SSE-NEXT:    psllw $4, %xmm0
-; X86-SSE-NEXT:    pand %xmm3, %xmm0
+; X86-SSE-NEXT:    pand %xmm2, %xmm0
 ; X86-SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE-NEXT:    por %xmm4, %xmm0
 ; X86-SSE-NEXT:    paddb %xmm1, %xmm1
-; X86-SSE-NEXT:    pxor %xmm3, %xmm3
-; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm3
-; X86-SSE-NEXT:    movdqa %xmm3, %xmm4
-; X86-SSE-NEXT:    pandn %xmm0, %xmm4
+; X86-SSE-NEXT:    pxor %xmm2, %xmm2
+; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm2
+; X86-SSE-NEXT:    movdqa {{.*#+}} xmm4 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; X86-SSE-NEXT:    pand %xmm2, %xmm4
+; X86-SSE-NEXT:    pandn %xmm0, %xmm2
 ; X86-SSE-NEXT:    psllw $2, %xmm0
-; X86-SSE-NEXT:    pand %xmm3, %xmm0
-; X86-SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-SSE-NEXT:    por %xmm4, %xmm0
+; X86-SSE-NEXT:    pand %xmm0, %xmm4
+; X86-SSE-NEXT:    por %xmm4, %xmm2
 ; X86-SSE-NEXT:    paddb %xmm1, %xmm1
-; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm2
-; X86-SSE-NEXT:    movdqa %xmm2, %xmm1
-; X86-SSE-NEXT:    pandn %xmm0, %xmm1
-; X86-SSE-NEXT:    paddb %xmm0, %xmm0
-; X86-SSE-NEXT:    pand %xmm2, %xmm0
-; X86-SSE-NEXT:    por %xmm1, %xmm0
+; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm3
+; X86-SSE-NEXT:    movdqa %xmm3, %xmm0
+; X86-SSE-NEXT:    pandn %xmm2, %xmm0
+; X86-SSE-NEXT:    paddb %xmm2, %xmm2
+; X86-SSE-NEXT:    pand %xmm3, %xmm2
+; X86-SSE-NEXT:    por %xmm0, %xmm2
+; X86-SSE-NEXT:    movdqa %xmm2, %xmm0
 ; X86-SSE-NEXT:    retl
   %shift = shl <8 x i8> %a, %b
   ret <8 x i8> %shift
@@ -484,31 +486,32 @@ define <4 x i8> @var_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind {
 ; SSE2-LABEL: var_shift_v4i8:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    psllw $5, %xmm1
-; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm3
-; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
 ; SSE2-NEXT:    pandn %xmm0, %xmm4
 ; SSE2-NEXT:    psllw $4, %xmm0
-; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    pand %xmm2, %xmm0
 ; SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE2-NEXT:    por %xmm4, %xmm0
 ; SSE2-NEXT:    paddb %xmm1, %xmm1
-; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm3
-; SSE2-NEXT:    movdqa %xmm3, %xmm4
-; SSE2-NEXT:    pandn %xmm0, %xmm4
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; SSE2-NEXT:    pand %xmm2, %xmm4
+; SSE2-NEXT:    pandn %xmm0, %xmm2
 ; SSE2-NEXT:    psllw $2, %xmm0
-; SSE2-NEXT:    pand %xmm3, %xmm0
-; SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    por %xmm4, %xmm0
+; SSE2-NEXT:    pand %xmm0, %xmm4
+; SSE2-NEXT:    por %xmm4, %xmm2
 ; SSE2-NEXT:    paddb %xmm1, %xmm1
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT:    movdqa %xmm2, %xmm1
-; SSE2-NEXT:    pandn %xmm0, %xmm1
-; SSE2-NEXT:    paddb %xmm0, %xmm0
-; SSE2-NEXT:    pand %xmm2, %xmm0
-; SSE2-NEXT:    por %xmm1, %xmm0
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm3
+; SSE2-NEXT:    movdqa %xmm3, %xmm0
+; SSE2-NEXT:    pandn %xmm2, %xmm0
+; SSE2-NEXT:    paddb %xmm2, %xmm2
+; SSE2-NEXT:    pand %xmm3, %xmm2
+; SSE2-NEXT:    por %xmm0, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: var_shift_v4i8:
@@ -594,31 +597,32 @@ define <4 x i8> @var_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind {
 ; X86-SSE-LABEL: var_shift_v4i8:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    psllw $5, %xmm1
-; X86-SSE-NEXT:    pxor %xmm2, %xmm2
 ; X86-SSE-NEXT:    pxor %xmm3, %xmm3
-; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm3
-; X86-SSE-NEXT:    movdqa %xmm3, %xmm4
+; X86-SSE-NEXT:    pxor %xmm2, %xmm2
+; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm2
+; X86-SSE-NEXT:    movdqa %xmm2, %xmm4
 ; X86-SSE-NEXT:    pandn %xmm0, %xmm4
 ; X86-SSE-NEXT:    psllw $4, %xmm0
-; X86-SSE-NEXT:    pand %xmm3, %xmm0
+; X86-SSE-NEXT:    pand %xmm2, %xmm0
 ; X86-SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE-NEXT:    por %xmm4, %xmm0
 ; X86-SSE-NEXT:    paddb %xmm1, %xmm1
-; X86-SSE-NEXT:    pxor %xmm3, %xmm3
-; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm3
-; X86-SSE-NEXT:    movdqa %xmm3, %xmm4
-; X86-SSE-NEXT:    pandn %xmm0, %xmm4
+; X86-SSE-NEXT:    pxor %xmm2, %xmm2
+; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm2
+; X86-SSE-NEXT:    movdqa {{.*#+}} xmm4 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; X86-SSE-NEXT:    pand %xmm2, %xmm4
+; X86-SSE-NEXT:    pandn %xmm0, %xmm2
 ; X86-SSE-NEXT:    psllw $2, %xmm0
-; X86-SSE-NEXT:    pand %xmm3, %xmm0
-; X86-SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-SSE-NEXT:    por %xmm4, %xmm0
+; X86-SSE-NEXT:    pand %xmm0, %xmm4
+; X86-SSE-NEXT:    por %xmm4, %xmm2
 ; X86-SSE-NEXT:    paddb %xmm1, %xmm1
-; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm2
-; X86-SSE-NEXT:    movdqa %xmm2, %xmm1
-; X86-SSE-NEXT:    pandn %xmm0, %xmm1
-; X86-SSE-NEXT:    paddb %xmm0, %xmm0
-; X86-SSE-NEXT:    pand %xmm2, %xmm0
-; X86-SSE-NEXT:    por %xmm1, %xmm0
+; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm3
+; X86-SSE-NEXT:    movdqa %xmm3, %xmm0
+; X86-SSE-NEXT:    pandn %xmm2, %xmm0
+; X86-SSE-NEXT:    paddb %xmm2, %xmm2
+; X86-SSE-NEXT:    pand %xmm3, %xmm2
+; X86-SSE-NEXT:    por %xmm0, %xmm2
+; X86-SSE-NEXT:    movdqa %xmm2, %xmm0
 ; X86-SSE-NEXT:    retl
   %shift = shl <4 x i8> %a, %b
   ret <4 x i8> %shift
@@ -628,31 +632,32 @@ define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind {
 ; SSE2-LABEL: var_shift_v2i8:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    psllw $5, %xmm1
-; SSE2-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm3
-; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
 ; SSE2-NEXT:    pandn %xmm0, %xmm4
 ; SSE2-NEXT:    psllw $4, %xmm0
-; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    pand %xmm2, %xmm0
 ; SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE2-NEXT:    por %xmm4, %xmm0
 ; SSE2-NEXT:    paddb %xmm1, %xmm1
-; SSE2-NEXT:    pxor %xmm3, %xmm3
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm3
-; SSE2-NEXT:    movdqa %xmm3, %xmm4
-; SSE2-NEXT:    pandn %xmm0, %xmm4
+; SSE2-NEXT:    pxor %xmm2, %xmm2
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; SSE2-NEXT:    pand %xmm2, %xmm4
+; SSE2-NEXT:    pandn %xmm0, %xmm2
 ; SSE2-NEXT:    psllw $2, %xmm0
-; SSE2-NEXT:    pand %xmm3, %xmm0
-; SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    por %xmm4, %xmm0
+; SSE2-NEXT:    pand %xmm0, %xmm4
+; SSE2-NEXT:    por %xmm4, %xmm2
 ; SSE2-NEXT:    paddb %xmm1, %xmm1
-; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT:    movdqa %xmm2, %xmm1
-; SSE2-NEXT:    pandn %xmm0, %xmm1
-; SSE2-NEXT:    paddb %xmm0, %xmm0
-; SSE2-NEXT:    pand %xmm2, %xmm0
-; SSE2-NEXT:    por %xmm1, %xmm0
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm3
+; SSE2-NEXT:    movdqa %xmm3, %xmm0
+; SSE2-NEXT:    pandn %xmm2, %xmm0
+; SSE2-NEXT:    paddb %xmm2, %xmm2
+; SSE2-NEXT:    pand %xmm3, %xmm2
+; SSE2-NEXT:    por %xmm0, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: var_shift_v2i8:
@@ -738,31 +743,32 @@ define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind {
 ; X86-SSE-LABEL: var_shift_v2i8:
 ; X86-SSE:       # %bb.0:
 ; X86-SSE-NEXT:    psllw $5, %xmm1
-; X86-SSE-NEXT:    pxor %xmm2, %xmm2
 ; X86-SSE-NEXT:    pxor %xmm3, %xmm3
-; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm3
-; X86-SSE-NEXT:    movdqa %xmm3, %xmm4
+; X86-SSE-NEXT:    pxor %xmm2, %xmm2
+; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm2
+; X86-SSE-NEXT:    movdqa %xmm2, %xmm4
 ; X86-SSE-NEXT:    pandn %xmm0, %xmm4
 ; X86-SSE-NEXT:    psllw $4, %xmm0
-; X86-SSE-NEXT:    pand %xmm3, %xmm0
+; X86-SSE-NEXT:    pand %xmm2, %xmm0
 ; X86-SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE-NEXT:    por %xmm4, %xmm0
 ; X86-SSE-NEXT:    paddb %xmm1, %xmm1
-; X86-SSE-NEXT:    pxor %xmm3, %xmm3
-; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm3
-; X86-SSE-NEXT:    movdqa %xmm3, %xmm4
-; X86-SSE-NEXT:    pandn %xmm0, %xmm4
+; X86-SSE-NEXT:    pxor %xmm2, %xmm2
+; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm2
+; X86-SSE-NEXT:    movdqa {{.*#+}} xmm4 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; X86-SSE-NEXT:    pand %xmm2, %xmm4
+; X86-SSE-NEXT:    pandn %xmm0, %xmm2
 ; X86-SSE-NEXT:    psllw $2, %xmm0
-; X86-SSE-NEXT:    pand %xmm3, %xmm0
-; X86-SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-SSE-NEXT:    por %xmm4, %xmm0
+; X86-SSE-NEXT:    pand %xmm0, %xmm4
+; X86-SSE-NEXT:    por %xmm4, %xmm2
 ; X86-SSE-NEXT:    paddb %xmm1, %xmm1
-; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm2
-; X86-SSE-NEXT:    movdqa %xmm2, %xmm1
-; X86-SSE-NEXT:    pandn %xmm0, %xmm1
-; X86-SSE-NEXT:    paddb %xmm0, %xmm0
-; X86-SSE-NEXT:    pand %xmm2, %xmm0
-; X86-SSE-NEXT:    por %xmm1, %xmm0
+; X86-SSE-NEXT:    pcmpgtb %xmm1, %xmm3
+; X86-SSE-NEXT:    movdqa %xmm3, %xmm0
+; X86-SSE-NEXT:    pandn %xmm2, %xmm0
+; X86-SSE-NEXT:    paddb %xmm2, %xmm2
+; X86-SSE-NEXT:    pand %xmm3, %xmm2
+; X86-SSE-NEXT:    por %xmm0, %xmm2
+; X86-SSE-NEXT:    movdqa %xmm2, %xmm0
 ; X86-SSE-NEXT:    retl
   %shift = shl <2 x i8> %a, %b
   ret <2 x i8> %shift
diff --git a/llvm/test/CodeGen/X86/vector-trunc-packus.ll b/llvm/test/CodeGen/X86/vector-trunc-packus.ll
index da8a3f3fa0d4e..8ead853500e15 100644
--- a/llvm/test/CodeGen/X86/vector-trunc-packus.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc-packus.ll
@@ -671,29 +671,29 @@ define <8 x i32> @trunc_packus_v8i64_v8i32(ptr %p0) "min-legal-vector-width"="25
 ;
 ; AVX1-LABEL: trunc_packus_v8i64_v8i32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm2
-; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm3
-; AVX1-NEXT:    vpmovsxbd {{.*#+}} xmm4 = [4294967295,0,4294967295,0]
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm5
-; AVX1-NEXT:    vblendvpd %xmm5, %xmm2, %xmm4, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm5
-; AVX1-NEXT:    vblendvpd %xmm5, %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm5
-; AVX1-NEXT:    vblendvpd %xmm5, %xmm0, %xmm4, %xmm0
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm4, %xmm1
-; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
-; AVX1-NEXT:    vpand %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpmovsxbd {{.*#+}} xmm0 = [4294967295,0,4294967295,0]
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm3
+; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm3, %xmm0, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm4, %xmm0, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm5
 ; AVX1-NEXT:    vpand %xmm0, %xmm5, %xmm0
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm1
-; AVX1-NEXT:    vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm3
-; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm5
+; AVX1-NEXT:    vpand %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[0,2]
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm1
+; AVX1-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm2[0,2],xmm1[0,2]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
@@ -1628,29 +1628,29 @@ define <8 x i16> @trunc_packus_v8i64_v8i16(ptr %p0) "min-legal-vector-width"="25
 ;
 ; AVX1-LABEL: trunc_packus_v8i64_v8i16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm2
-; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm3
-; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm4 = [65535,65535]
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm5
-; AVX1-NEXT:    vblendvpd %xmm5, %xmm2, %xmm4, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm5
-; AVX1-NEXT:    vblendvpd %xmm5, %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm5
-; AVX1-NEXT:    vblendvpd %xmm5, %xmm0, %xmm4, %xmm0
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm4, %xmm1
-; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
-; AVX1-NEXT:    vpand %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm0 = [65535,65535]
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm3
+; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm3, %xmm0, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm4, %xmm0, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm5
 ; AVX1-NEXT:    vpand %xmm0, %xmm5, %xmm0
-; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm1
-; AVX1-NEXT:    vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm3
-; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm5
+; AVX1-NEXT:    vpand %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpackusdw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm1
+; AVX1-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm2, %xmm1
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    retq
@@ -3030,29 +3030,29 @@ define <8 x i8> @trunc_packus_v8i64_v8i8(ptr %p0) "min-legal-vector-width"="256"
 ;
 ; AVX1-LABEL: trunc_packus_v8i64_v8i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm2
-; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm3
-; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm4 = [255,255]
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm5
-; AVX1-NEXT:    vblendvpd %xmm5, %xmm2, %xmm4, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm5
-; AVX1-NEXT:    vblendvpd %xmm5, %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm5
-; AVX1-NEXT:    vblendvpd %xmm5, %xmm0, %xmm4, %xmm0
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm4, %xmm1
-; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
-; AVX1-NEXT:    vpand %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm0 = [255,255]
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm3
+; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm3, %xmm0, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm4, %xmm0, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm5
 ; AVX1-NEXT:    vpand %xmm0, %xmm5, %xmm0
-; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm1
-; AVX1-NEXT:    vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm3
-; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm5
+; AVX1-NEXT:    vpand %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpackusdw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm1
+; AVX1-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm2, %xmm1
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackuswb %xmm0, %xmm0, %xmm0
@@ -3317,29 +3317,29 @@ define void @trunc_packus_v8i64_v8i8_store(ptr %p0, ptr%p1) "min-legal-vector-wi
 ;
 ; AVX1-LABEL: trunc_packus_v8i64_v8i8_store:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm2
-; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm3
-; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm4 = [255,255]
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm5
-; AVX1-NEXT:    vblendvpd %xmm5, %xmm2, %xmm4, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm5
-; AVX1-NEXT:    vblendvpd %xmm5, %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm5
-; AVX1-NEXT:    vblendvpd %xmm5, %xmm0, %xmm4, %xmm0
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm4, %xmm1
-; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
-; AVX1-NEXT:    vpand %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm0 = [255,255]
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX1-NEXT:    vmovdqa 32(%rdi), %xmm3
+; AVX1-NEXT:    vmovdqa 48(%rdi), %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm3, %xmm0, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm4, %xmm0, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm5
 ; AVX1-NEXT:    vpand %xmm0, %xmm5, %xmm0
-; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm1
-; AVX1-NEXT:    vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm3
-; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm5
+; AVX1-NEXT:    vpand %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpackusdw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm1
+; AVX1-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm2, %xmm1
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackuswb %xmm0, %xmm0, %xmm0
@@ -3797,8 +3797,8 @@ define <16 x i8> @trunc_packus_v16i64_v16i8(ptr %p0) "min-legal-vector-width"="2
 ;
 ; AVX1-LABEL: trunc_packus_v16i64_v16i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa 96(%rdi), %xmm0
 ; AVX1-NEXT:    vpmovzxbq {{.*#+}} xmm2 = [255,255]
+; AVX1-NEXT:    vmovdqa 96(%rdi), %xmm0
 ; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm1
 ; AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vmovdqa 112(%rdi), %xmm1
diff --git a/llvm/test/CodeGen/X86/vshift-6.ll b/llvm/test/CodeGen/X86/vshift-6.ll
index 912ff750d9e91..60d2055a0f0e7 100644
--- a/llvm/test/CodeGen/X86/vshift-6.ll
+++ b/llvm/test/CodeGen/X86/vshift-6.ll
@@ -32,23 +32,23 @@ define <16 x i8> @do_not_crash(ptr, ptr, ptr, i32, i64, i8) {
 ; X86-NEXT:    movb %al, (%ecx)
 ; X86-NEXT:    movd %eax, %xmm1
 ; X86-NEXT:    psllq $56, %xmm1
-; X86-NEXT:    pcmpeqd %xmm3, %xmm3
+; X86-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X86-NEXT:    psllw $5, %xmm1
 ; X86-NEXT:    por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
 ; X86-NEXT:    pxor %xmm2, %xmm2
-; X86-NEXT:    pxor %xmm0, %xmm0
-; X86-NEXT:    pcmpgtb %xmm1, %xmm0
-; X86-NEXT:    pxor %xmm0, %xmm3
-; X86-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-NEXT:    por %xmm3, %xmm0
-; X86-NEXT:    paddb %xmm1, %xmm1
 ; X86-NEXT:    pxor %xmm3, %xmm3
 ; X86-NEXT:    pcmpgtb %xmm1, %xmm3
-; X86-NEXT:    movdqa %xmm3, %xmm4
-; X86-NEXT:    pandn %xmm0, %xmm4
-; X86-NEXT:    psllw $2, %xmm0
-; X86-NEXT:    pand %xmm3, %xmm0
-; X86-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    pxor %xmm3, %xmm0
+; X86-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm3
+; X86-NEXT:    por %xmm0, %xmm3
+; X86-NEXT:    paddb %xmm1, %xmm1
+; X86-NEXT:    pxor %xmm0, %xmm0
+; X86-NEXT:    pcmpgtb %xmm1, %xmm0
+; X86-NEXT:    movdqa {{.*#+}} xmm4 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; X86-NEXT:    pand %xmm0, %xmm4
+; X86-NEXT:    pandn %xmm3, %xmm0
+; X86-NEXT:    psllw $2, %xmm3
+; X86-NEXT:    pand %xmm3, %xmm4
 ; X86-NEXT:    por %xmm4, %xmm0
 ; X86-NEXT:    paddb %xmm1, %xmm1
 ; X86-NEXT:    pcmpgtb %xmm1, %xmm2
@@ -64,30 +64,30 @@ define <16 x i8> @do_not_crash(ptr, ptr, ptr, i32, i64, i8) {
 ; X64-NEXT:    movb %r9b, (%rdi)
 ; X64-NEXT:    movd %r9d, %xmm1
 ; X64-NEXT:    psllq $56, %xmm1
-; X64-NEXT:    pcmpeqd %xmm2, %xmm2
+; X64-NEXT:    pcmpeqd %xmm0, %xmm0
 ; X64-NEXT:    psllw $5, %xmm1
 ; X64-NEXT:    por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-NEXT:    pxor %xmm2, %xmm2
 ; X64-NEXT:    pxor %xmm3, %xmm3
+; X64-NEXT:    pcmpgtb %xmm1, %xmm3
+; X64-NEXT:    pxor %xmm3, %xmm0
+; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
+; X64-NEXT:    por %xmm0, %xmm3
+; X64-NEXT:    paddb %xmm1, %xmm1
 ; X64-NEXT:    pxor %xmm0, %xmm0
 ; X64-NEXT:    pcmpgtb %xmm1, %xmm0
-; X64-NEXT:    pxor %xmm0, %xmm2
-; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64-NEXT:    por %xmm2, %xmm0
-; X64-NEXT:    paddb %xmm1, %xmm1
-; X64-NEXT:    pxor %xmm2, %xmm2
-; X64-NEXT:    pcmpgtb %xmm1, %xmm2
-; X64-NEXT:    movdqa %xmm2, %xmm4
-; X64-NEXT:    pandn %xmm0, %xmm4
-; X64-NEXT:    psllw $2, %xmm0
-; X64-NEXT:    pand %xmm2, %xmm0
-; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT:    movdqa {{.*#+}} xmm4 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; X64-NEXT:    pand %xmm0, %xmm4
+; X64-NEXT:    pandn %xmm3, %xmm0
+; X64-NEXT:    psllw $2, %xmm3
+; X64-NEXT:    pand %xmm3, %xmm4
 ; X64-NEXT:    por %xmm4, %xmm0
 ; X64-NEXT:    paddb %xmm1, %xmm1
-; X64-NEXT:    pcmpgtb %xmm1, %xmm3
-; X64-NEXT:    movdqa %xmm3, %xmm1
+; X64-NEXT:    pcmpgtb %xmm1, %xmm2
+; X64-NEXT:    movdqa %xmm2, %xmm1
 ; X64-NEXT:    pandn %xmm0, %xmm1
 ; X64-NEXT:    paddb %xmm0, %xmm0
-; X64-NEXT:    pand %xmm3, %xmm0
+; X64-NEXT:    pand %xmm2, %xmm0
 ; X64-NEXT:    por %xmm1, %xmm0
 ; X64-NEXT:    retq
 entry:
diff --git a/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll b/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
index 81c4d5d71084c..8100bb82d5f69 100644
--- a/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
+++ b/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
@@ -834,7 +834,6 @@ define void @load_8byte_chunk_of_16byte_alloca_with_zero_upper_half(ptr %src, i6
 ; X86-NO-BMI2-NO-SHLD-NEXT:    pushl %esi
 ; X86-NO-BMI2-NO-SHLD-NEXT:    subl $44, %esp
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
@@ -845,26 +844,27 @@ define void @load_8byte_chunk_of_16byte_alloca_with_zero_upper_half(ptr %src, i6
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shrb $3, %cl
 ; X86-NO-BMI2-NO-SHLD-NEXT:    andb $12, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT:    movzbl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl (%esp,%ebx), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl 4(%esp,%ebx), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT:    notb %cl
-; X86-NO-BMI2-NO-SHLD-NEXT:    leal (%esi,%esi), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT:    orl %ebp, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movzbl %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl (%esp,%ebp), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl 4(%esp,%ebp), %edi
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT:    andb $24, %al
-; X86-NO-BMI2-NO-SHLD-NEXT:    notb %al
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl 8(%esp,%ebx), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    addl %ebx, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT:    notb %dl
+; X86-NO-BMI2-NO-SHLD-NEXT:    leal (%edi,%edi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edx, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %ebx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    orl %esi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl %ebx, 4(%edx)
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edi, (%edx)
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl 8(%esp,%ebp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT:    addl %eax, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT:    orl %edi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, 4(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %ebx, (%ecx)
 ; X86-NO-BMI2-NO-SHLD-NEXT:    addl $44, %esp
 ; X86-NO-BMI2-NO-SHLD-NEXT:    popl %esi
 ; X86-NO-BMI2-NO-SHLD-NEXT:    popl %edi
@@ -908,11 +908,10 @@ define void @load_8byte_chunk_of_16byte_alloca_with_zero_upper_half(ptr %src, i6
 ;
 ; X86-HAVE-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca_with_zero_upper_half:
 ; X86-HAVE-BMI2-NO-SHLD:       # %bb.0:
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    pushl %ebp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    pushl %ebx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    pushl %edi
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    pushl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    subl $44, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    subl $32, %esp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -925,28 +924,25 @@ define void @load_8byte_chunk_of_16byte_alloca_with_zero_upper_half(ptr %src, i6
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %ecx, %edx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrb $3, %dl
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    andb $12, %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movzbl %dl, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %ecx, (%esp,%edx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %ecx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    notb %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 4(%esp,%edx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 8(%esp,%edx), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    leal (%edi,%edi), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ebx, %edx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %ecx, %edi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    andb $24, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movzbl %dl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %ecx, (%esp,%esi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 4(%esp,%esi), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %ecx, %edi, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    # kill: def $cl killed $cl killed $ecx def $ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    notb %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ecx, %ebp, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 8(%esp,%esi), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ecx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ecx, %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %ebx, %ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %ecx, 4(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %edx, (%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl $44, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %edi, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl $32, %esp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %esi
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %edi
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %ebp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    retl
   %init = load <8 x i8>, ptr %src, align 1
   %intermediate.sroa.0.0.vec.expand = shufflevector <8 x i8> %init, <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1453,7 +1449,6 @@ define void @load_8byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i6
 ; X86-NO-BMI2-NO-SHLD-NEXT:    pushl %esi
 ; X86-NO-BMI2-NO-SHLD-NEXT:    subl $76, %esp
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movups (%ecx), %xmm0
 ; X86-NO-BMI2-NO-SHLD-NEXT:    xorps %xmm1, %xmm1
@@ -1464,26 +1459,27 @@ define void @load_8byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i6
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movaps %xmm0, (%esp)
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shrb $5, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT:    movzbl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl (%esp,%ebx,4), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl 4(%esp,%ebx,4), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT:    notb %cl
-; X86-NO-BMI2-NO-SHLD-NEXT:    leal (%esi,%esi), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT:    orl %ebp, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movzbl %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl (%esp,%ebp,4), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl 4(%esp,%ebp,4), %edi
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT:    andb $24, %al
-; X86-NO-BMI2-NO-SHLD-NEXT:    notb %al
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl 8(%esp,%ebx,4), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    addl %ebx, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT:    notb %dl
+; X86-NO-BMI2-NO-SHLD-NEXT:    leal (%edi,%edi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edx, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %ebx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    orl %esi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl %ebx, 4(%edx)
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edi, (%edx)
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl 8(%esp,%ebp,4), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT:    addl %eax, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT:    orl %edi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, 4(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %ebx, (%ecx)
 ; X86-NO-BMI2-NO-SHLD-NEXT:    addl $76, %esp
 ; X86-NO-BMI2-NO-SHLD-NEXT:    popl %esi
 ; X86-NO-BMI2-NO-SHLD-NEXT:    popl %edi
@@ -1527,11 +1523,10 @@ define void @load_8byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i6
 ;
 ; X86-HAVE-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_32byte_alloca_with_zero_upper_half:
 ; X86-HAVE-BMI2-NO-SHLD:       # %bb.0:
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    pushl %ebp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    pushl %ebx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    pushl %edi
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    pushl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    subl $76, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    subl $64, %esp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -1544,28 +1539,25 @@ define void @load_8byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i6
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movaps %xmm0, (%esp)
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %ecx, %edx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrb $5, %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movzbl %dl, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %ecx, (%esp,%edx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %ecx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    notb %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 4(%esp,%edx,4), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 8(%esp,%edx,4), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    leal (%edi,%edi), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ebx, %edx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %ecx, %edi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    andb $24, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movzbl %dl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %ecx, (%esp,%esi,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 4(%esp,%esi,4), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %ecx, %edi, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    # kill: def $cl killed $cl killed $ecx def $ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    notb %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ecx, %ebp, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 8(%esp,%esi,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ecx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ecx, %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %ebx, %ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %ecx, 4(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %edx, (%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl $76, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %edi, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl $64, %esp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %esi
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %edi
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %ebp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    retl
   %init = load <16 x i8>, ptr %src, align 1
   %intermediate.sroa.0.0.vec.expand = shufflevector <16 x i8> %init, <16 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1716,39 +1708,40 @@ define void @load_16byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movaps %xmm0, {{[0-9]+}}(%esp)
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shrb $5, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT:    movzbl %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl 16(%esp,%edi,4), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl 20(%esp,%edi,4), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT:    movzbl %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl 16(%esp,%ebx,4), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl 20(%esp,%ebx,4), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT:    notb %cl
-; X86-NO-BMI2-NO-SHLD-NEXT:    movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT:    leal (%ebp,%ebp), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT:    orl %edx, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT:    notb %dl
+; X86-NO-BMI2-NO-SHLD-NEXT:    leal (%ebp,%ebp), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT:    orl %esi, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT:    movb %al, %ch
-; X86-NO-BMI2-NO-SHLD-NEXT:    andb $24, %ch
-; X86-NO-BMI2-NO-SHLD-NEXT:    xorb $31, %ch
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl 24(%esp,%edi,4), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT:    leal (%edx,%edx), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl 24(%esp,%ebx,4), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT:    leal (%edi,%edi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edx, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %ebx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    orl %ebp, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movb %al, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl 28(%esp,%edi,4), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl 28(%esp,%ecx,4), %esi
 ; X86-NO-BMI2-NO-SHLD-NEXT:    leal (%esi,%esi), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT:    movb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edx, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT:    orl %edx, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT:    movb %al, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT:    orl %edi, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl 32(%esp,%edi,4), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl 32(%esp,%eax,4), %eax
 ; X86-NO-BMI2-NO-SHLD-NEXT:    addl %eax, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT:    movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edx, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %eax
 ; X86-NO-BMI2-NO-SHLD-NEXT:    orl %esi, %eax
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %ecx
@@ -1826,38 +1819,34 @@ define void @load_16byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrb $5, %cl
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movzbl %cl, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %eax, 16(%esp,%ecx,4), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 20(%esp,%ecx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %eax, %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %eax, 16(%esp,%ecx,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 20(%esp,%ecx,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %eax, %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %eax, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    notb %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %edx, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    notb %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 24(%esp,%ecx,4), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ebx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %eax, %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    leal (%ebp,%ebp), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ebx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 28(%esp,%ecx,4), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %eax, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ebx, %ebp, %ebp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 24(%esp,%ecx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    andb $24, %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    xorb $31, %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    leal (%esi,%esi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ebx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %eax, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 28(%esp,%ecx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %eax, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %edx, %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 32(%esp,%ecx,4), %ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %ecx, %ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ebx, %ecx, %ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %eax, %ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %ecx, 12(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %edx, 8(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %edi, 4(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %ebp, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %ebp, 8(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %esi, 4(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %edx, (%eax)
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl $92, %esp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %esi
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %edi
diff --git a/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll b/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll
index 8d36eef952a2b..70d47728ff478 100644
--- a/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll
+++ b/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll
@@ -1095,7 +1095,6 @@ define void @load_8byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst)
 ; X86-NO-BMI2-NO-SHLD-NEXT:    pushl %esi
 ; X86-NO-BMI2-NO-SHLD-NEXT:    subl $44, %esp
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movups (%ecx), %xmm0
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shll $3, %eax
@@ -1105,26 +1104,27 @@ define void @load_8byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst)
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shrb $3, %cl
 ; X86-NO-BMI2-NO-SHLD-NEXT:    andb $12, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT:    movzbl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl (%esp,%ebx), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl 4(%esp,%ebx), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT:    notb %cl
-; X86-NO-BMI2-NO-SHLD-NEXT:    leal (%esi,%esi), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT:    orl %ebp, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movzbl %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl (%esp,%ebp), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl 4(%esp,%ebp), %edi
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT:    andb $24, %al
-; X86-NO-BMI2-NO-SHLD-NEXT:    notb %al
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl 8(%esp,%ebx), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    addl %ebx, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT:    notb %dl
+; X86-NO-BMI2-NO-SHLD-NEXT:    leal (%edi,%edi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edx, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %ebx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    orl %esi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl %ebx, 4(%edx)
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edi, (%edx)
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl 8(%esp,%ebp), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT:    addl %eax, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT:    orl %edi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, 4(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %ebx, (%ecx)
 ; X86-NO-BMI2-NO-SHLD-NEXT:    addl $44, %esp
 ; X86-NO-BMI2-NO-SHLD-NEXT:    popl %esi
 ; X86-NO-BMI2-NO-SHLD-NEXT:    popl %edi
@@ -1167,11 +1167,10 @@ define void @load_8byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst)
 ;
 ; X86-HAVE-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_16byte_alloca:
 ; X86-HAVE-BMI2-NO-SHLD:       # %bb.0:
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    pushl %ebp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    pushl %ebx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    pushl %edi
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    pushl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    subl $44, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    subl $32, %esp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -1183,28 +1182,25 @@ define void @load_8byte_chunk_of_16byte_alloca(ptr %src, i64 %byteOff, ptr %dst)
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %ecx, %edx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrb $3, %dl
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    andb $12, %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movzbl %dl, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %ecx, (%esp,%edx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %ecx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    notb %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 4(%esp,%edx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 8(%esp,%edx), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    leal (%edi,%edi), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ebx, %edx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %ecx, %edi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    andb $24, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movzbl %dl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %ecx, (%esp,%esi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 4(%esp,%esi), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %ecx, %edi, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    # kill: def $cl killed $cl killed $ecx def $ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    notb %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ecx, %ebp, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 8(%esp,%esi), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ecx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ecx, %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %ebx, %ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %ecx, 4(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %edx, (%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl $44, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %edi, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl $32, %esp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %esi
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %edi
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %ebp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    retl
   %init = load <16 x i8>, ptr %src, align 1
   %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
@@ -1723,7 +1719,6 @@ define void @load_8byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst)
 ; X86-NO-BMI2-NO-SHLD-NEXT:    pushl %esi
 ; X86-NO-BMI2-NO-SHLD-NEXT:    subl $76, %esp
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movups (%ecx), %xmm0
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movups 16(%ecx), %xmm1
@@ -1735,26 +1730,27 @@ define void @load_8byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst)
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movaps %xmm0, (%esp)
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shrb $5, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT:    movzbl %cl, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl (%esp,%ebx,4), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl 4(%esp,%ebx,4), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT:    notb %cl
-; X86-NO-BMI2-NO-SHLD-NEXT:    leal (%esi,%esi), %edi
-; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT:    orl %ebp, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movzbl %cl, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl (%esp,%ebp,4), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl 4(%esp,%ebp,4), %edi
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT:    andb $24, %al
-; X86-NO-BMI2-NO-SHLD-NEXT:    notb %al
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl 8(%esp,%ebx,4), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    addl %ebx, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT:    notb %dl
+; X86-NO-BMI2-NO-SHLD-NEXT:    leal (%edi,%edi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edx, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %ebx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    orl %esi, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl %ebx, 4(%edx)
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edi, (%edx)
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl 8(%esp,%ebp,4), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT:    addl %eax, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT:    orl %edi, %eax
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, 4(%ecx)
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %ebx, (%ecx)
 ; X86-NO-BMI2-NO-SHLD-NEXT:    addl $76, %esp
 ; X86-NO-BMI2-NO-SHLD-NEXT:    popl %esi
 ; X86-NO-BMI2-NO-SHLD-NEXT:    popl %edi
@@ -1799,11 +1795,10 @@ define void @load_8byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst)
 ;
 ; X86-HAVE-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_32byte_alloca:
 ; X86-HAVE-BMI2-NO-SHLD:       # %bb.0:
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    pushl %ebp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    pushl %ebx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    pushl %edi
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    pushl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    subl $76, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    subl $64, %esp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %edx
@@ -1817,28 +1812,25 @@ define void @load_8byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst)
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movaps %xmm0, (%esp)
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %ecx, %edx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrb $5, %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movzbl %dl, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %ecx, (%esp,%edx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %ecx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    notb %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 4(%esp,%edx,4), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 8(%esp,%edx,4), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    leal (%edi,%edi), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ebx, %edx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %ecx, %edi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    andb $24, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movzbl %dl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %ecx, (%esp,%esi,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 4(%esp,%esi,4), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %ecx, %edi, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    # kill: def $cl killed $cl killed $ecx def $ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    notb %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ecx, %ebp, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 8(%esp,%esi,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ecx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ecx, %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %ebx, %ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %ecx, 4(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %edx, (%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl $76, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %edi, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl $64, %esp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %esi
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %edi
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %ebp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    retl
   %init = load <32 x i8>, ptr %src, align 1
   %byteOff.numbits = shl nuw nsw i64 %byteOff, 3
@@ -1992,39 +1984,40 @@ define void @load_16byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movaps %xmm0, {{[0-9]+}}(%esp)
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shrb $5, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT:    movzbl %cl, %edi
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl 16(%esp,%edi,4), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl 20(%esp,%edi,4), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT:    movzbl %cl, %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl 16(%esp,%ebx,4), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl 20(%esp,%ebx,4), %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
-; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT:    notb %cl
-; X86-NO-BMI2-NO-SHLD-NEXT:    movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
-; X86-NO-BMI2-NO-SHLD-NEXT:    leal (%ebp,%ebp), %esi
-; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT:    orl %edx, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %esi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %edx
+; X86-NO-BMI2-NO-SHLD-NEXT:    notb %dl
+; X86-NO-BMI2-NO-SHLD-NEXT:    leal (%ebp,%ebp), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edx, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT:    orl %esi, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT:    movb %al, %ch
-; X86-NO-BMI2-NO-SHLD-NEXT:    andb $24, %ch
-; X86-NO-BMI2-NO-SHLD-NEXT:    xorb $31, %ch
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl 24(%esp,%edi,4), %edx
-; X86-NO-BMI2-NO-SHLD-NEXT:    leal (%edx,%edx), %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl 24(%esp,%ebx,4), %edi
+; X86-NO-BMI2-NO-SHLD-NEXT:    leal (%edi,%edi), %ebx
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edx, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %ebx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    orl %ebp, %ebx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movb %al, %cl
-; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %edx
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl 28(%esp,%edi,4), %esi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
+; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %edi
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl 28(%esp,%ecx,4), %esi
 ; X86-NO-BMI2-NO-SHLD-NEXT:    leal (%esi,%esi), %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT:    movb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edx, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT:    orl %edx, %ebp
-; X86-NO-BMI2-NO-SHLD-NEXT:    movb %al, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT:    orl %edi, %ebp
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shrl %cl, %esi
-; X86-NO-BMI2-NO-SHLD-NEXT:    movl 32(%esp,%edi,4), %eax
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl 32(%esp,%eax,4), %eax
 ; X86-NO-BMI2-NO-SHLD-NEXT:    addl %eax, %eax
-; X86-NO-BMI2-NO-SHLD-NEXT:    movb %ch, %cl
+; X86-NO-BMI2-NO-SHLD-NEXT:    movl %edx, %ecx
 ; X86-NO-BMI2-NO-SHLD-NEXT:    shll %cl, %eax
 ; X86-NO-BMI2-NO-SHLD-NEXT:    orl %esi, %eax
 ; X86-NO-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %ecx
@@ -2104,38 +2097,34 @@ define void @load_16byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %eax, %ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrb $5, %cl
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movzbl %cl, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %eax, 16(%esp,%ecx,4), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 20(%esp,%ecx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %eax, %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %eax, 16(%esp,%ecx,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 20(%esp,%ecx,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %eax, %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %eax, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    notb %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %edx, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    notb %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 24(%esp,%ecx,4), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ebx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %eax, %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    leal (%ebp,%ebp), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ebx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 28(%esp,%ecx,4), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %eax, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ebx, %ebp, %ebp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 24(%esp,%ecx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    andb $24, %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    xorb $31, %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    leal (%esi,%esi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ebx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %eax, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 28(%esp,%ecx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shrxl %eax, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %edx, %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl 32(%esp,%ecx,4), %ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl %ecx, %ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    shlxl %ebx, %ecx, %ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    orl %eax, %ecx
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %ecx, 12(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %edx, 8(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %edi, 4(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %ebp, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %ebp, 8(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %esi, 4(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT:    movl %edx, (%eax)
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    addl $92, %esp
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %esi
 ; X86-HAVE-BMI2-NO-SHLD-NEXT:    popl %edi



More information about the llvm-commits mailing list