[llvm] [X86] Improve transform for add-like nodes to `add` (PR #83691)

via llvm-commits llvm-commits at lists.llvm.org
Sun Mar 3 09:44:43 PST 2024


https://github.com/goldsteinn updated https://github.com/llvm/llvm-project/pull/83691

>From d1db5f3d995dadccea59e051e5ed7693956f4dd8 Mon Sep 17 00:00:00 2001
From: Noah Goldstein <goldstein.w.n at gmail.com>
Date: Sun, 3 Mar 2024 11:26:07 -0600
Subject: [PATCH 1/2] [X86] Regen 2009-05-23-dagcombine-shifts.ll test; NFC

---
 llvm/test/CodeGen/X86/2009-05-23-dagcombine-shifts.ll | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/llvm/test/CodeGen/X86/2009-05-23-dagcombine-shifts.ll b/llvm/test/CodeGen/X86/2009-05-23-dagcombine-shifts.ll
index 609be3bb2e54f0..554fbc0eb58ef0 100644
--- a/llvm/test/CodeGen/X86/2009-05-23-dagcombine-shifts.ll
+++ b/llvm/test/CodeGen/X86/2009-05-23-dagcombine-shifts.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s | FileCheck %s
 
 ; Check that the shr(shl X, 56), 48) is not mistakenly turned into
@@ -16,11 +17,13 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 target triple = "x86_64-unknown-linux-gnu"
 
 define i64 @foo(i64 %b) nounwind readnone {
-entry:
 ; CHECK-LABEL: foo:
-; CHECK: movsbq %dil, %rax
-; CHECK: shlq $8, %rax
-; CHECK: orq $1, %rax
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movsbq %dil, %rax
+; CHECK-NEXT:    shlq $8, %rax
+; CHECK-NEXT:    orq $1, %rax
+; CHECK-NEXT:    retq
+entry:
 	%shl = shl i64 %b, 56		; <i64> [#uses=1]
 	%shr = ashr i64 %shl, 48		; <i64> [#uses=1]
 	%add5 = or i64 %shr, 1		; <i64> [#uses=1]

>From 3a677d3204ed5039d9a6480d78b1b2023de235e8 Mon Sep 17 00:00:00 2001
From: Noah Goldstein <goldstein.w.n at gmail.com>
Date: Sat, 2 Mar 2024 14:47:08 -0600
Subject: [PATCH 2/2] [X86] Improve transform for add-like nodes to `add`

We previously did this only in tablegen, but this catches a lot less
potential cases.
---
 llvm/lib/Target/X86/X86ISelDAGToDAG.cpp       |  14 +
 llvm/lib/Target/X86/X86InstrCompiler.td       |  33 +-
 .../X86/2009-05-23-dagcombine-shifts.ll       |   2 +-
 llvm/test/CodeGen/X86/3addr-or.ll             |   6 +-
 llvm/test/CodeGen/X86/addcarry2.ll            |   4 +-
 llvm/test/CodeGen/X86/and-or-fold.ll          |   4 +-
 llvm/test/CodeGen/X86/andimm8.ll              |   4 +-
 llvm/test/CodeGen/X86/atomic-unordered.ll     |   2 +-
 llvm/test/CodeGen/X86/avx512-calling-conv.ll  | 292 +++++++++---------
 .../test/CodeGen/X86/avx512-insert-extract.ll |  34 +-
 llvm/test/CodeGen/X86/avx512-vec-cmp.ll       |   8 +-
 .../CodeGen/X86/avx512vl-vec-masked-cmp.ll    |  64 ++--
 llvm/test/CodeGen/X86/bfloat.ll               | 128 ++++----
 .../test/CodeGen/X86/bitcast-and-setcc-256.ll |   4 +-
 .../test/CodeGen/X86/bitcast-and-setcc-512.ll |  22 +-
 llvm/test/CodeGen/X86/bitcast-setcc-128.ll    |   4 +-
 llvm/test/CodeGen/X86/bitcast-setcc-256.ll    |   8 +-
 llvm/test/CodeGen/X86/bitcast-setcc-512.ll    |  40 +--
 llvm/test/CodeGen/X86/bitcast-vector-bool.ll  |  82 ++---
 llvm/test/CodeGen/X86/bitreverse.ll           | 130 ++++----
 llvm/test/CodeGen/X86/bitselect.ll            |  23 +-
 llvm/test/CodeGen/X86/bool-math.ll            |  12 +-
 llvm/test/CodeGen/X86/bool-vector.ll          |  12 +-
 llvm/test/CodeGen/X86/bswap.ll                |   4 +-
 llvm/test/CodeGen/X86/bswap_tree2.ll          |  16 +-
 llvm/test/CodeGen/X86/buildvec-insertvec.ll   |   4 +-
 llvm/test/CodeGen/X86/clz.ll                  |  12 +-
 llvm/test/CodeGen/X86/combine-bitreverse.ll   |  36 +--
 llvm/test/CodeGen/X86/combine-bswap.ll        |   2 +-
 llvm/test/CodeGen/X86/combine-fneg.ll         |   8 +-
 llvm/test/CodeGen/X86/combine-rotates.ll      |   2 +-
 llvm/test/CodeGen/X86/commute-two-addr.ll     |  75 ++++-
 llvm/test/CodeGen/X86/dagcombine-select.ll    |   2 +-
 llvm/test/CodeGen/X86/dagcombine-shifts.ll    |   4 +-
 llvm/test/CodeGen/X86/disable-shrink-store.ll |   2 +-
 llvm/test/CodeGen/X86/extract-bits.ll         | 128 ++++----
 llvm/test/CodeGen/X86/fold-masked-merge.ll    |  48 +--
 llvm/test/CodeGen/X86/fp128-i128.ll           |   8 +-
 llvm/test/CodeGen/X86/fpenv.ll                |  26 +-
 .../test/CodeGen/X86/fptosi-sat-vector-128.ll |  34 +-
 .../test/CodeGen/X86/fptoui-sat-vector-128.ll |  34 +-
 llvm/test/CodeGen/X86/fshl.ll                 |  20 +-
 llvm/test/CodeGen/X86/fshr.ll                 |  22 +-
 llvm/test/CodeGen/X86/funnel-shift.ll         |   4 +-
 llvm/test/CodeGen/X86/half.ll                 |   6 +-
 .../CodeGen/X86/illegal-bitfield-loadstore.ll |  28 +-
 ...iller-impdef-on-implicit-def-regression.ll |   2 +-
 llvm/test/CodeGen/X86/insert.ll               |   4 +-
 llvm/test/CodeGen/X86/is_fpclass-fp80.ll      |   6 +-
 llvm/test/CodeGen/X86/is_fpclass.ll           |  12 +-
 llvm/test/CodeGen/X86/kshift.ll               |  16 +-
 llvm/test/CodeGen/X86/limited-prec.ll         |  18 +-
 llvm/test/CodeGen/X86/llvm.frexp.ll           |  28 +-
 llvm/test/CodeGen/X86/load-chain.ll           |   2 +-
 llvm/test/CodeGen/X86/load-combine.ll         |  87 +++---
 llvm/test/CodeGen/X86/load-local-v3i1.ll      |   8 +-
 llvm/test/CodeGen/X86/load-local-v3i129.ll    |   4 +-
 llvm/test/CodeGen/X86/load-local-v4i5.ll      |   6 +-
 llvm/test/CodeGen/X86/logic-shift.ll          |   4 +-
 llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll   |   8 +-
 llvm/test/CodeGen/X86/madd.ll                 |   6 +-
 llvm/test/CodeGen/X86/masked_compressstore.ll |   6 +-
 llvm/test/CodeGen/X86/masked_expandload.ll    |   6 +-
 llvm/test/CodeGen/X86/masked_load.ll          |   6 +-
 llvm/test/CodeGen/X86/masked_store.ll         |  18 +-
 llvm/test/CodeGen/X86/masked_store_trunc.ll   |   6 +-
 .../CodeGen/X86/masked_store_trunc_ssat.ll    |   6 +-
 .../CodeGen/X86/masked_store_trunc_usat.ll    |   6 +-
 llvm/test/CodeGen/X86/memset-inline.ll        |   2 +-
 llvm/test/CodeGen/X86/movmsk-cmp.ll           |   4 +-
 llvm/test/CodeGen/X86/mul128.ll               |   2 +-
 llvm/test/CodeGen/X86/no-wide-load.ll         |   2 +-
 llvm/test/CodeGen/X86/or-lea.ll               |   4 +-
 llvm/test/CodeGen/X86/pr20011.ll              |   4 +-
 llvm/test/CodeGen/X86/pr23664.ll              |   2 +-
 llvm/test/CodeGen/X86/pr27202.ll              |   2 +-
 llvm/test/CodeGen/X86/pr28173.ll              |   4 +-
 llvm/test/CodeGen/X86/pr35636.ll              |   4 +-
 llvm/test/CodeGen/X86/pr35763.ll              |   2 +-
 llvm/test/CodeGen/X86/pr43820.ll              |  32 +-
 llvm/test/CodeGen/X86/pr47299.ll              |  10 +-
 llvm/test/CodeGen/X86/pr62653.ll              |  74 ++---
 llvm/test/CodeGen/X86/pr69965.ll              |   4 +-
 llvm/test/CodeGen/X86/pr77459.ll              |  34 +-
 llvm/test/CodeGen/X86/promote-vec3.ll         |   2 +-
 llvm/test/CodeGen/X86/rev16.ll                |  10 +-
 llvm/test/CodeGen/X86/rotate-extract.ll       |  16 +-
 llvm/test/CodeGen/X86/select.ll               |   4 +-
 llvm/test/CodeGen/X86/select_const.ll         |   6 +-
 llvm/test/CodeGen/X86/setcc-fsh.ll            |   2 +-
 llvm/test/CodeGen/X86/shrink-compare-pgso.ll  |   2 +-
 llvm/test/CodeGen/X86/shrink-compare.ll       |   2 +-
 llvm/test/CodeGen/X86/smul_fix.ll             |   4 +-
 llvm/test/CodeGen/X86/smul_fix_sat.ll         |   4 +-
 llvm/test/CodeGen/X86/split-store.ll          |   4 +-
 .../CodeGen/X86/srem-seteq-vec-nonsplat.ll    |   4 +-
 .../subvectorwise-store-of-vector-splat.ll    | 104 +++----
 llvm/test/CodeGen/X86/umul_fix_sat.ll         |   4 +-
 ...asked-merge-scalar-constmask-innerouter.ll |   4 +-
 ...-merge-scalar-constmask-interleavedbits.ll |   4 +-
 ...-scalar-constmask-interleavedbytehalves.ll |   4 +-
 ...d-masked-merge-scalar-constmask-lowhigh.ll |  12 +-
 ...unfold-masked-merge-scalar-variablemask.ll |  69 +++--
 ...unfold-masked-merge-vector-variablemask.ll |  49 +--
 .../CodeGen/X86/urem-seteq-illegal-types.ll   |   6 +-
 llvm/test/CodeGen/X86/vector-bitreverse.ll    |  40 +--
 .../test/CodeGen/X86/vector-compare-all_of.ll |   4 +-
 .../CodeGen/X86/vector-compare-results.ll     |  70 ++---
 llvm/test/CodeGen/X86/vector-pcmp.ll          |   4 +-
 llvm/test/CodeGen/X86/vector-sext.ll          |   8 +-
 .../CodeGen/X86/vector-shuffle-128-v16.ll     |   4 +-
 llvm/test/CodeGen/X86/vector-shuffle-v1.ll    |   8 +-
 llvm/test/CodeGen/X86/vector-trunc.ll         |   2 +-
 llvm/test/CodeGen/X86/vector-zext.ll          |  12 +-
 llvm/test/CodeGen/X86/xor-lea.ll              |   4 +-
 115 files changed, 1241 insertions(+), 1139 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 5cbd9ab4dc2d6c..5329f37c4ee30f 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -5295,6 +5295,20 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
     if (tryVPTERNLOG(Node))
       return;
 
+    // Convert addlike to add before final selection. Do this before we drop
+    // flags like `disjoint`.
+    // NB: Conversion to add is preferable so we use `lea` in codegen.
+    if (NVT.isScalarInteger() &&
+        (Opcode == ISD::OR ||
+         (NVT == MVT::i8 || NVT == MVT::i16 || NVT == MVT::i32)) &&
+        CurDAG->isADDLike(SDValue(Node, 0))) {
+      SDValue AsAdd = CurDAG->getNode(ISD::ADD, SDLoc(Node), NVT,
+                                      Node->getOperand(0), Node->getOperand(1));
+      ReplaceUses(SDValue(Node, 0), AsAdd);
+      CurDAG->RemoveDeadNode(Node);
+      Node = AsAdd.getNode();
+      Opcode = ISD::ADD;
+    }
     [[fallthrough]];
   case ISD::ADD:
     if (Opcode == ISD::ADD && matchBitExtract(Node))
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index f393f86e64aadd..ca36afd3b403e0 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -1560,21 +1560,40 @@ let Predicates = [HasNDD] in {
 }
 
 // Depositing value to 8/16 bit subreg:
-def : Pat<(or (and GR64:$dst, -256), 
+def : Pat<(or (and GR64:$dst, -256),
               (i64 (zextloadi8 addr:$src))),
-          (INSERT_SUBREG (i64 (COPY $dst)), (MOV8rm  i8mem:$src), sub_8bit)>; 
+          (INSERT_SUBREG (i64 (COPY $dst)), (MOV8rm  i8mem:$src), sub_8bit)>;
 
-def : Pat<(or (and GR32:$dst, -256), 
+def : Pat<(or (and GR32:$dst, -256),
               (i32 (zextloadi8 addr:$src))),
-          (INSERT_SUBREG (i32 (COPY $dst)), (MOV8rm  i8mem:$src), sub_8bit)>; 
+          (INSERT_SUBREG (i32 (COPY $dst)), (MOV8rm  i8mem:$src), sub_8bit)>;
 
-def : Pat<(or (and GR64:$dst, -65536), 
+def : Pat<(or (and GR64:$dst, -65536),
               (i64 (zextloadi16 addr:$src))),
           (INSERT_SUBREG (i64 (COPY $dst)), (MOV16rm  i16mem:$src), sub_16bit)>;
 
-def : Pat<(or (and GR32:$dst, -65536), 
+def : Pat<(or (and GR32:$dst, -65536),
               (i32 (zextloadi16 addr:$src))),
-          (INSERT_SUBREG (i32 (COPY $dst)), (MOV16rm  i16mem:$src), sub_16bit)>; 
+          (INSERT_SUBREG (i32 (COPY $dst)), (MOV16rm  i16mem:$src), sub_16bit)>;
+
+// Same pattern as above but supporting `add` as the join
+// operator. Need to support `add` as well, as we can convert `or` ->
+// `add` when the `or` is `disjoint` (as in this patterns case).
+def : Pat<(add (and GR64:$dst, -256),
+               (i64 (zextloadi8 addr:$src))),
+          (INSERT_SUBREG (i64 (COPY $dst)), (MOV8rm  i8mem:$src), sub_8bit)>;
+
+def : Pat<(add (and GR32:$dst, -256),
+               (i32 (zextloadi8 addr:$src))),
+          (INSERT_SUBREG (i32 (COPY $dst)), (MOV8rm  i8mem:$src), sub_8bit)>;
+
+def : Pat<(add (and GR64:$dst, -65536),
+               (i64 (zextloadi16 addr:$src))),
+          (INSERT_SUBREG (i64 (COPY $dst)), (MOV16rm  i16mem:$src), sub_16bit)>;
+
+def : Pat<(add (and GR32:$dst, -65536),
+               (i32 (zextloadi16 addr:$src))),
+          (INSERT_SUBREG (i32 (COPY $dst)), (MOV16rm  i16mem:$src), sub_16bit)>;
 
 // To avoid needing to materialize an immediate in a register, use a 32-bit and
 // with implicit zero-extension instead of a 64-bit and if the immediate has at
diff --git a/llvm/test/CodeGen/X86/2009-05-23-dagcombine-shifts.ll b/llvm/test/CodeGen/X86/2009-05-23-dagcombine-shifts.ll
index 554fbc0eb58ef0..50e736ac68d29e 100644
--- a/llvm/test/CodeGen/X86/2009-05-23-dagcombine-shifts.ll
+++ b/llvm/test/CodeGen/X86/2009-05-23-dagcombine-shifts.ll
@@ -21,7 +21,7 @@ define i64 @foo(i64 %b) nounwind readnone {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movsbq %dil, %rax
 ; CHECK-NEXT:    shlq $8, %rax
-; CHECK-NEXT:    orq $1, %rax
+; CHECK-NEXT:    incq %rax
 ; CHECK-NEXT:    retq
 entry:
 	%shl = shl i64 %b, 56		; <i64> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/3addr-or.ll b/llvm/test/CodeGen/X86/3addr-or.ll
index 65f6d2b4123e8e..1f466afcadc9ca 100644
--- a/llvm/test/CodeGen/X86/3addr-or.ll
+++ b/llvm/test/CodeGen/X86/3addr-or.ll
@@ -24,7 +24,7 @@ define i64 @test2(i8 %A, i8 %B) nounwind {
 ; CHECK-NEXT:    andl $48, %edi
 ; CHECK-NEXT:    movzbl %sil, %eax
 ; CHECK-NEXT:    shrl $4, %eax
-; CHECK-NEXT:    orl %edi, %eax
+; CHECK-NEXT:    addl %edi, %eax
 ; CHECK-NEXT:    retq
   %C = zext i8 %A to i64
   %D = shl i64 %C, 4
@@ -42,7 +42,7 @@ define void @test3(i32 %x, ptr %P) nounwind readnone ssp {
 ; CHECK-LABEL: test3:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    shll $5, %edi
-; CHECK-NEXT:    orl $3, %edi
+; CHECK-NEXT:    addl $3, %edi
 ; CHECK-NEXT:    movl %edi, (%rsi)
 ; CHECK-NEXT:    retq
   %t0 = shl i32 %x, 5
@@ -71,7 +71,7 @@ define void @test5(i32 %a, i32 %b, ptr nocapture %P) nounwind ssp {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andl $6, %edi
 ; CHECK-NEXT:    andl $16, %esi
-; CHECK-NEXT:    orl %edi, %esi
+; CHECK-NEXT:    addl %edi, %esi
 ; CHECK-NEXT:    movl %esi, (%rdx)
 ; CHECK-NEXT:    retq
   %and = and i32 %a, 6
diff --git a/llvm/test/CodeGen/X86/addcarry2.ll b/llvm/test/CodeGen/X86/addcarry2.ll
index 0338577dbddc2b..1a5d0f4fe45416 100644
--- a/llvm/test/CodeGen/X86/addcarry2.ll
+++ b/llvm/test/CodeGen/X86/addcarry2.ll
@@ -138,7 +138,7 @@ define void @adc_load_store_32_127(ptr inreg %x, ptr inreg %x2, i32 inreg %y) no
 ; X64-NEXT:    movl (%rdi), %eax # encoding: [0x8b,0x07]
 ; X64-NEXT:    shlq $32, %rax # encoding: [0x48,0xc1,0xe0,0x20]
 ; X64-NEXT:    movl %edx, %ecx # encoding: [0x89,0xd1]
-; X64-NEXT:    orq %rax, %rcx # encoding: [0x48,0x09,0xc1]
+; X64-NEXT:    addq %rax, %rcx # encoding: [0x48,0x01,0xc1]
 ; X64-NEXT:    movabsq $545460846593, %rax # encoding: [0x48,0xb8,0x01,0x00,0x00,0x00,0x7f,0x00,0x00,0x00]
 ; X64-NEXT:    # imm = 0x7F00000001
 ; X64-NEXT:    xorl %edx, %edx # encoding: [0x31,0xd2]
@@ -178,7 +178,7 @@ define void @adc_load_store_32_128(ptr inreg %x, ptr inreg %x2, i32 inreg %y) no
 ; X64-NEXT:    movl (%rdi), %eax # encoding: [0x8b,0x07]
 ; X64-NEXT:    shlq $32, %rax # encoding: [0x48,0xc1,0xe0,0x20]
 ; X64-NEXT:    movl %edx, %ecx # encoding: [0x89,0xd1]
-; X64-NEXT:    orq %rax, %rcx # encoding: [0x48,0x09,0xc1]
+; X64-NEXT:    addq %rax, %rcx # encoding: [0x48,0x01,0xc1]
 ; X64-NEXT:    movabsq $549755813889, %rax # encoding: [0x48,0xb8,0x01,0x00,0x00,0x00,0x80,0x00,0x00,0x00]
 ; X64-NEXT:    # imm = 0x8000000001
 ; X64-NEXT:    xorl %edx, %edx # encoding: [0x31,0xd2]
diff --git a/llvm/test/CodeGen/X86/and-or-fold.ll b/llvm/test/CodeGen/X86/and-or-fold.ll
index 1bb5fdeebac71c..4071b364a25c3b 100644
--- a/llvm/test/CodeGen/X86/and-or-fold.ll
+++ b/llvm/test/CodeGen/X86/and-or-fold.ll
@@ -45,7 +45,7 @@ define i32 @test1(i32 %x, i16 %y) {
 ; DARWIN-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
 ; DARWIN-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; DARWIN-NEXT:    shll $16, %eax
-; DARWIN-NEXT:    orl %ecx, %eax
+; DARWIN-NEXT:    addl %ecx, %eax
 ; DARWIN-NEXT:    andl $16711807, %eax ## imm = 0xFF007F
 ; DARWIN-NEXT:    retl
 ;
@@ -54,7 +54,7 @@ define i32 @test1(i32 %x, i16 %y) {
 ; DARWIN-OPT-NEXT:    andl $127, %esi
 ; DARWIN-OPT-NEXT:    movzbl %dil, %eax
 ; DARWIN-OPT-NEXT:    shll $16, %eax
-; DARWIN-OPT-NEXT:    orl %esi, %eax
+; DARWIN-OPT-NEXT:    addl %esi, %eax
 ; DARWIN-OPT-NEXT:    retq
   %tmp1 = zext i16 %y to i32
   %tmp2 = and i32 %tmp1, 127
diff --git a/llvm/test/CodeGen/X86/andimm8.ll b/llvm/test/CodeGen/X86/andimm8.ll
index 6242d4f4c222bb..506e28300e71b0 100644
--- a/llvm/test/CodeGen/X86/andimm8.ll
+++ b/llvm/test/CodeGen/X86/andimm8.ll
@@ -29,7 +29,7 @@ define void @foo(i64 %zed, ptr %x) nounwind {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx # encoding: [0x8b,0x54,0x24,0x08]
 ; X86-NEXT:    andl $-4, %ecx # encoding: [0x83,0xe1,0xfc]
-; X86-NEXT:    orl $2, %ecx # encoding: [0x83,0xc9,0x02]
+; X86-NEXT:    addl $2, %ecx # encoding: [0x83,0xc1,0x02]
 ; X86-NEXT:    movl %edx, 4(%eax) # encoding: [0x89,0x50,0x04]
 ; X86-NEXT:    movl %ecx, (%eax) # encoding: [0x89,0x08]
 ; X86-NEXT:    retl # encoding: [0xc3]
@@ -37,7 +37,7 @@ define void @foo(i64 %zed, ptr %x) nounwind {
 ; X64-LABEL: foo:
 ; X64:       # %bb.0:
 ; X64-NEXT:    andq $-4, %rdi # encoding: [0x48,0x83,0xe7,0xfc]
-; X64-NEXT:    orq $2, %rdi # encoding: [0x48,0x83,0xcf,0x02]
+; X64-NEXT:    addq $2, %rdi # encoding: [0x48,0x83,0xc7,0x02]
 ; X64-NEXT:    movq %rdi, (%rsi) # encoding: [0x48,0x89,0x3e]
 ; X64-NEXT:    retq # encoding: [0xc3]
   %t1 = and i64 %zed, -4
diff --git a/llvm/test/CodeGen/X86/atomic-unordered.ll b/llvm/test/CodeGen/X86/atomic-unordered.ll
index df123be53474f0..903951dd5a8cff 100644
--- a/llvm/test/CodeGen/X86/atomic-unordered.ll
+++ b/llvm/test/CodeGen/X86/atomic-unordered.ll
@@ -2359,7 +2359,7 @@ define i16 @load_combine(ptr %p) {
 ; CHECK-O3-NEXT:    movzbl (%rdi), %ecx
 ; CHECK-O3-NEXT:    movzbl 1(%rdi), %eax
 ; CHECK-O3-NEXT:    shll $8, %eax
-; CHECK-O3-NEXT:    orl %ecx, %eax
+; CHECK-O3-NEXT:    addl %ecx, %eax
 ; CHECK-O3-NEXT:    # kill: def $ax killed $ax killed $eax
 ; CHECK-O3-NEXT:    retq
   %v1 = load atomic i8, ptr %p unordered, align 2
diff --git a/llvm/test/CodeGen/X86/avx512-calling-conv.ll b/llvm/test/CodeGen/X86/avx512-calling-conv.ll
index b39b089faa2a5e..b4c37a2e34d95d 100644
--- a/llvm/test/CodeGen/X86/avx512-calling-conv.ll
+++ b/llvm/test/CodeGen/X86/avx512-calling-conv.ll
@@ -910,13 +910,13 @@ define <17 x i1> @test16(<17 x i1> %a, <17 x i1> %b) nounwind {
 ; KNL-NEXT:    kandw %k2, %k0, %k0
 ; KNL-NEXT:    kmovw %r10d, %k2
 ; KNL-NEXT:    kandw %k1, %k2, %k1
-; KNL-NEXT:    kmovw %k1, %edx
+; KNL-NEXT:    kmovw %k1, %esi
 ; KNL-NEXT:    kshiftrw $1, %k0, %k1
 ; KNL-NEXT:    kmovw %k1, %r9d
 ; KNL-NEXT:    kshiftrw $2, %k0, %k1
 ; KNL-NEXT:    kmovw %k1, %r8d
 ; KNL-NEXT:    kshiftrw $3, %k0, %k1
-; KNL-NEXT:    kmovw %k1, %esi
+; KNL-NEXT:    kmovw %k1, %edx
 ; KNL-NEXT:    kshiftrw $4, %k0, %k1
 ; KNL-NEXT:    kmovw %k1, %edi
 ; KNL-NEXT:    kshiftrw $5, %k0, %k1
@@ -928,9 +928,9 @@ define <17 x i1> @test16(<17 x i1> %a, <17 x i1> %b) nounwind {
 ; KNL-NEXT:    kshiftrw $8, %k0, %k1
 ; KNL-NEXT:    kmovw %k1, %ebp
 ; KNL-NEXT:    kshiftrw $9, %k0, %k1
-; KNL-NEXT:    kmovw %k1, %r14d
-; KNL-NEXT:    kshiftrw $10, %k0, %k1
 ; KNL-NEXT:    kmovw %k1, %r11d
+; KNL-NEXT:    kshiftrw $10, %k0, %k1
+; KNL-NEXT:    kmovw %k1, %r14d
 ; KNL-NEXT:    kshiftrw $11, %k0, %k1
 ; KNL-NEXT:    kmovw %k1, %r15d
 ; KNL-NEXT:    kshiftrw $12, %k0, %k1
@@ -938,25 +938,25 @@ define <17 x i1> @test16(<17 x i1> %a, <17 x i1> %b) nounwind {
 ; KNL-NEXT:    kshiftrw $13, %k0, %k1
 ; KNL-NEXT:    kmovw %k1, %r13d
 ; KNL-NEXT:    kshiftrw $14, %k0, %k1
-; KNL-NEXT:    andl $1, %edx
-; KNL-NEXT:    movb %dl, 2(%rax)
-; KNL-NEXT:    kmovw %k0, %edx
-; KNL-NEXT:    andl $1, %edx
+; KNL-NEXT:    andl $1, %esi
+; KNL-NEXT:    movb %sil, 2(%rax)
+; KNL-NEXT:    kmovw %k0, %esi
+; KNL-NEXT:    andl $1, %esi
 ; KNL-NEXT:    andl $1, %r9d
-; KNL-NEXT:    leal (%rdx,%r9,2), %r9d
-; KNL-NEXT:    kmovw %k1, %edx
+; KNL-NEXT:    leal (%rsi,%r9,2), %r9d
+; KNL-NEXT:    kmovw %k1, %esi
 ; KNL-NEXT:    kshiftrw $15, %k0, %k0
 ; KNL-NEXT:    andl $1, %r8d
 ; KNL-NEXT:    leal (%r9,%r8,4), %r9d
 ; KNL-NEXT:    kmovw %k0, %r8d
-; KNL-NEXT:    andl $1, %esi
-; KNL-NEXT:    leal (%r9,%rsi,8), %esi
+; KNL-NEXT:    andl $1, %edx
+; KNL-NEXT:    leal (%r9,%rdx,8), %edx
 ; KNL-NEXT:    andl $1, %edi
 ; KNL-NEXT:    shll $4, %edi
-; KNL-NEXT:    orl %esi, %edi
 ; KNL-NEXT:    andl $1, %ecx
 ; KNL-NEXT:    shll $5, %ecx
-; KNL-NEXT:    orl %edi, %ecx
+; KNL-NEXT:    addl %edi, %ecx
+; KNL-NEXT:    addl %edx, %ecx
 ; KNL-NEXT:    andl $1, %r10d
 ; KNL-NEXT:    shll $6, %r10d
 ; KNL-NEXT:    andl $1, %ebx
@@ -965,28 +965,28 @@ define <17 x i1> @test16(<17 x i1> %a, <17 x i1> %b) nounwind {
 ; KNL-NEXT:    andl $1, %ebp
 ; KNL-NEXT:    shll $8, %ebp
 ; KNL-NEXT:    orl %ebx, %ebp
-; KNL-NEXT:    andl $1, %r14d
-; KNL-NEXT:    shll $9, %r14d
-; KNL-NEXT:    orl %ebp, %r14d
 ; KNL-NEXT:    andl $1, %r11d
-; KNL-NEXT:    shll $10, %r11d
-; KNL-NEXT:    orl %r14d, %r11d
+; KNL-NEXT:    shll $9, %r11d
+; KNL-NEXT:    orl %ebp, %r11d
 ; KNL-NEXT:    orl %ecx, %r11d
+; KNL-NEXT:    andl $1, %r14d
+; KNL-NEXT:    shll $10, %r14d
 ; KNL-NEXT:    andl $1, %r15d
 ; KNL-NEXT:    shll $11, %r15d
+; KNL-NEXT:    orl %r14d, %r15d
 ; KNL-NEXT:    andl $1, %r12d
 ; KNL-NEXT:    shll $12, %r12d
 ; KNL-NEXT:    orl %r15d, %r12d
 ; KNL-NEXT:    andl $1, %r13d
 ; KNL-NEXT:    shll $13, %r13d
 ; KNL-NEXT:    orl %r12d, %r13d
-; KNL-NEXT:    andl $1, %edx
-; KNL-NEXT:    shll $14, %edx
-; KNL-NEXT:    orl %r13d, %edx
+; KNL-NEXT:    andl $1, %esi
+; KNL-NEXT:    shll $14, %esi
+; KNL-NEXT:    orl %r13d, %esi
+; KNL-NEXT:    orl %r11d, %esi
 ; KNL-NEXT:    andl $1, %r8d
 ; KNL-NEXT:    shll $15, %r8d
-; KNL-NEXT:    orl %edx, %r8d
-; KNL-NEXT:    orl %r11d, %r8d
+; KNL-NEXT:    orl %esi, %r8d
 ; KNL-NEXT:    movw %r8w, (%rax)
 ; KNL-NEXT:    popq %rbx
 ; KNL-NEXT:    popq %r12
@@ -1223,13 +1223,13 @@ define <17 x i1> @test16(<17 x i1> %a, <17 x i1> %b) nounwind {
 ; SKX-NEXT:    kmovd {{[-0-9]+}}(%r{{[sb]}}p), %k1 ## 4-byte Reload
 ; SKX-NEXT:    kandd %k1, %k0, %k0
 ; SKX-NEXT:    kshiftrd $16, %k0, %k1
-; SKX-NEXT:    kmovd %k1, %edx
+; SKX-NEXT:    kmovd %k1, %esi
 ; SKX-NEXT:    kshiftrd $1, %k0, %k1
 ; SKX-NEXT:    kmovd %k1, %r9d
 ; SKX-NEXT:    kshiftrd $2, %k0, %k1
 ; SKX-NEXT:    kmovd %k1, %r8d
 ; SKX-NEXT:    kshiftrd $3, %k0, %k1
-; SKX-NEXT:    kmovd %k1, %esi
+; SKX-NEXT:    kmovd %k1, %edx
 ; SKX-NEXT:    kshiftrd $4, %k0, %k1
 ; SKX-NEXT:    kmovd %k1, %edi
 ; SKX-NEXT:    kshiftrd $5, %k0, %k1
@@ -1241,9 +1241,9 @@ define <17 x i1> @test16(<17 x i1> %a, <17 x i1> %b) nounwind {
 ; SKX-NEXT:    kshiftrd $8, %k0, %k1
 ; SKX-NEXT:    kmovd %k1, %ebp
 ; SKX-NEXT:    kshiftrd $9, %k0, %k1
-; SKX-NEXT:    kmovd %k1, %r14d
-; SKX-NEXT:    kshiftrd $10, %k0, %k1
 ; SKX-NEXT:    kmovd %k1, %r11d
+; SKX-NEXT:    kshiftrd $10, %k0, %k1
+; SKX-NEXT:    kmovd %k1, %r14d
 ; SKX-NEXT:    kshiftrd $11, %k0, %k1
 ; SKX-NEXT:    kmovd %k1, %r15d
 ; SKX-NEXT:    kshiftrd $12, %k0, %k1
@@ -1251,25 +1251,25 @@ define <17 x i1> @test16(<17 x i1> %a, <17 x i1> %b) nounwind {
 ; SKX-NEXT:    kshiftrd $13, %k0, %k1
 ; SKX-NEXT:    kmovd %k1, %r13d
 ; SKX-NEXT:    kshiftrd $14, %k0, %k1
-; SKX-NEXT:    andl $1, %edx
-; SKX-NEXT:    movb %dl, 2(%rax)
-; SKX-NEXT:    kmovd %k0, %edx
-; SKX-NEXT:    andl $1, %edx
+; SKX-NEXT:    andl $1, %esi
+; SKX-NEXT:    movb %sil, 2(%rax)
+; SKX-NEXT:    kmovd %k0, %esi
+; SKX-NEXT:    andl $1, %esi
 ; SKX-NEXT:    andl $1, %r9d
-; SKX-NEXT:    leal (%rdx,%r9,2), %r9d
-; SKX-NEXT:    kmovd %k1, %edx
+; SKX-NEXT:    leal (%rsi,%r9,2), %r9d
+; SKX-NEXT:    kmovd %k1, %esi
 ; SKX-NEXT:    kshiftrd $15, %k0, %k0
 ; SKX-NEXT:    andl $1, %r8d
 ; SKX-NEXT:    leal (%r9,%r8,4), %r9d
 ; SKX-NEXT:    kmovd %k0, %r8d
-; SKX-NEXT:    andl $1, %esi
-; SKX-NEXT:    leal (%r9,%rsi,8), %esi
+; SKX-NEXT:    andl $1, %edx
+; SKX-NEXT:    leal (%r9,%rdx,8), %edx
 ; SKX-NEXT:    andl $1, %edi
 ; SKX-NEXT:    shll $4, %edi
-; SKX-NEXT:    orl %esi, %edi
 ; SKX-NEXT:    andl $1, %ecx
 ; SKX-NEXT:    shll $5, %ecx
-; SKX-NEXT:    orl %edi, %ecx
+; SKX-NEXT:    addl %edi, %ecx
+; SKX-NEXT:    addl %edx, %ecx
 ; SKX-NEXT:    andl $1, %r10d
 ; SKX-NEXT:    shll $6, %r10d
 ; SKX-NEXT:    andl $1, %ebx
@@ -1278,28 +1278,28 @@ define <17 x i1> @test16(<17 x i1> %a, <17 x i1> %b) nounwind {
 ; SKX-NEXT:    andl $1, %ebp
 ; SKX-NEXT:    shll $8, %ebp
 ; SKX-NEXT:    orl %ebx, %ebp
-; SKX-NEXT:    andl $1, %r14d
-; SKX-NEXT:    shll $9, %r14d
-; SKX-NEXT:    orl %ebp, %r14d
 ; SKX-NEXT:    andl $1, %r11d
-; SKX-NEXT:    shll $10, %r11d
-; SKX-NEXT:    orl %r14d, %r11d
+; SKX-NEXT:    shll $9, %r11d
+; SKX-NEXT:    orl %ebp, %r11d
 ; SKX-NEXT:    orl %ecx, %r11d
+; SKX-NEXT:    andl $1, %r14d
+; SKX-NEXT:    shll $10, %r14d
 ; SKX-NEXT:    andl $1, %r15d
 ; SKX-NEXT:    shll $11, %r15d
+; SKX-NEXT:    orl %r14d, %r15d
 ; SKX-NEXT:    andl $1, %r12d
 ; SKX-NEXT:    shll $12, %r12d
 ; SKX-NEXT:    orl %r15d, %r12d
 ; SKX-NEXT:    andl $1, %r13d
 ; SKX-NEXT:    shll $13, %r13d
 ; SKX-NEXT:    orl %r12d, %r13d
-; SKX-NEXT:    andl $1, %edx
-; SKX-NEXT:    shll $14, %edx
-; SKX-NEXT:    orl %r13d, %edx
+; SKX-NEXT:    andl $1, %esi
+; SKX-NEXT:    shll $14, %esi
+; SKX-NEXT:    orl %r13d, %esi
+; SKX-NEXT:    orl %r11d, %esi
 ; SKX-NEXT:    andl $1, %r8d
 ; SKX-NEXT:    shll $15, %r8d
-; SKX-NEXT:    orl %edx, %r8d
-; SKX-NEXT:    orl %r11d, %r8d
+; SKX-NEXT:    orl %esi, %r8d
 ; SKX-NEXT:    movw %r8w, (%rax)
 ; SKX-NEXT:    popq %rbx
 ; SKX-NEXT:    popq %r12
@@ -1556,9 +1556,9 @@ define <17 x i1> @test16(<17 x i1> %a, <17 x i1> %b) nounwind {
 ; KNL_X32-NEXT:    kshiftrw $1, %k0, %k1
 ; KNL_X32-NEXT:    kmovw %k1, %ebp
 ; KNL_X32-NEXT:    kshiftrw $2, %k0, %k1
-; KNL_X32-NEXT:    kmovw %k1, %esi
-; KNL_X32-NEXT:    kshiftrw $3, %k0, %k1
 ; KNL_X32-NEXT:    kmovw %k1, %edi
+; KNL_X32-NEXT:    kshiftrw $3, %k0, %k1
+; KNL_X32-NEXT:    kmovw %k1, %esi
 ; KNL_X32-NEXT:    kshiftrw $4, %k0, %k1
 ; KNL_X32-NEXT:    kmovw %k1, %edx
 ; KNL_X32-NEXT:    kshiftrw $5, %k0, %k1
@@ -1569,67 +1569,67 @@ define <17 x i1> @test16(<17 x i1> %a, <17 x i1> %b) nounwind {
 ; KNL_X32-NEXT:    kmovw %k0, %ebx
 ; KNL_X32-NEXT:    andl $1, %ebx
 ; KNL_X32-NEXT:    andl $1, %ebp
-; KNL_X32-NEXT:    leal (%ebx,%ebp,2), %ebx
-; KNL_X32-NEXT:    kmovw %k1, %ebp
+; KNL_X32-NEXT:    leal (%ebx,%ebp,2), %ebp
+; KNL_X32-NEXT:    kmovw %k1, %ebx
 ; KNL_X32-NEXT:    kshiftrw $7, %k0, %k1
-; KNL_X32-NEXT:    andl $1, %esi
-; KNL_X32-NEXT:    leal (%ebx,%esi,4), %ebx
-; KNL_X32-NEXT:    kmovw %k1, %esi
-; KNL_X32-NEXT:    kshiftrw $8, %k0, %k1
 ; KNL_X32-NEXT:    andl $1, %edi
-; KNL_X32-NEXT:    leal (%ebx,%edi,8), %ebx
+; KNL_X32-NEXT:    leal (%ebp,%edi,4), %ebp
 ; KNL_X32-NEXT:    kmovw %k1, %edi
+; KNL_X32-NEXT:    kshiftrw $8, %k0, %k1
+; KNL_X32-NEXT:    andl $1, %esi
+; KNL_X32-NEXT:    leal (%ebp,%esi,8), %ebp
+; KNL_X32-NEXT:    kmovw %k1, %esi
 ; KNL_X32-NEXT:    kshiftrw $9, %k0, %k1
 ; KNL_X32-NEXT:    andl $1, %edx
 ; KNL_X32-NEXT:    shll $4, %edx
-; KNL_X32-NEXT:    orl %ebx, %edx
-; KNL_X32-NEXT:    kmovw %k1, %ebx
-; KNL_X32-NEXT:    kshiftrw $10, %k0, %k1
 ; KNL_X32-NEXT:    andl $1, %ecx
 ; KNL_X32-NEXT:    shll $5, %ecx
-; KNL_X32-NEXT:    orl %edx, %ecx
+; KNL_X32-NEXT:    addl %edx, %ecx
 ; KNL_X32-NEXT:    kmovw %k1, %edx
-; KNL_X32-NEXT:    kshiftrw $11, %k0, %k1
-; KNL_X32-NEXT:    andl $1, %ebp
-; KNL_X32-NEXT:    shll $6, %ebp
-; KNL_X32-NEXT:    andl $1, %esi
-; KNL_X32-NEXT:    shll $7, %esi
-; KNL_X32-NEXT:    orl %ebp, %esi
+; KNL_X32-NEXT:    kshiftrw $10, %k0, %k1
+; KNL_X32-NEXT:    addl %ebp, %ecx
 ; KNL_X32-NEXT:    kmovw %k1, %ebp
-; KNL_X32-NEXT:    kshiftrw $12, %k0, %k1
-; KNL_X32-NEXT:    andl $1, %edi
-; KNL_X32-NEXT:    shll $8, %edi
-; KNL_X32-NEXT:    orl %esi, %edi
-; KNL_X32-NEXT:    kmovw %k1, %esi
-; KNL_X32-NEXT:    kshiftrw $13, %k0, %k1
+; KNL_X32-NEXT:    kshiftrw $11, %k0, %k1
 ; KNL_X32-NEXT:    andl $1, %ebx
-; KNL_X32-NEXT:    shll $9, %ebx
-; KNL_X32-NEXT:    orl %edi, %ebx
+; KNL_X32-NEXT:    shll $6, %ebx
+; KNL_X32-NEXT:    andl $1, %edi
+; KNL_X32-NEXT:    shll $7, %edi
+; KNL_X32-NEXT:    orl %ebx, %edi
+; KNL_X32-NEXT:    kmovw %k1, %ebx
+; KNL_X32-NEXT:    kshiftrw $12, %k0, %k1
+; KNL_X32-NEXT:    andl $1, %esi
+; KNL_X32-NEXT:    shll $8, %esi
+; KNL_X32-NEXT:    orl %edi, %esi
 ; KNL_X32-NEXT:    kmovw %k1, %edi
-; KNL_X32-NEXT:    kshiftrw $14, %k0, %k1
+; KNL_X32-NEXT:    kshiftrw $13, %k0, %k1
 ; KNL_X32-NEXT:    andl $1, %edx
-; KNL_X32-NEXT:    shll $10, %edx
-; KNL_X32-NEXT:    orl %ebx, %edx
-; KNL_X32-NEXT:    kmovw %k1, %ebx
-; KNL_X32-NEXT:    kshiftrw $15, %k0, %k0
+; KNL_X32-NEXT:    shll $9, %edx
+; KNL_X32-NEXT:    orl %esi, %edx
+; KNL_X32-NEXT:    kmovw %k1, %esi
+; KNL_X32-NEXT:    kshiftrw $14, %k0, %k1
 ; KNL_X32-NEXT:    orl %ecx, %edx
-; KNL_X32-NEXT:    kmovw %k0, %ecx
+; KNL_X32-NEXT:    kmovw %k1, %ecx
+; KNL_X32-NEXT:    kshiftrw $15, %k0, %k0
 ; KNL_X32-NEXT:    andl $1, %ebp
-; KNL_X32-NEXT:    shll $11, %ebp
-; KNL_X32-NEXT:    andl $1, %esi
-; KNL_X32-NEXT:    shll $12, %esi
-; KNL_X32-NEXT:    orl %ebp, %esi
-; KNL_X32-NEXT:    andl $1, %edi
-; KNL_X32-NEXT:    shll $13, %edi
-; KNL_X32-NEXT:    orl %esi, %edi
+; KNL_X32-NEXT:    shll $10, %ebp
 ; KNL_X32-NEXT:    andl $1, %ebx
-; KNL_X32-NEXT:    shll $14, %ebx
-; KNL_X32-NEXT:    orl %edi, %ebx
+; KNL_X32-NEXT:    shll $11, %ebx
+; KNL_X32-NEXT:    orl %ebp, %ebx
+; KNL_X32-NEXT:    kmovw %k0, %ebp
+; KNL_X32-NEXT:    andl $1, %edi
+; KNL_X32-NEXT:    shll $12, %edi
+; KNL_X32-NEXT:    orl %ebx, %edi
+; KNL_X32-NEXT:    andl $1, %esi
+; KNL_X32-NEXT:    shll $13, %esi
+; KNL_X32-NEXT:    orl %edi, %esi
 ; KNL_X32-NEXT:    andl $1, %ecx
-; KNL_X32-NEXT:    shll $15, %ecx
-; KNL_X32-NEXT:    orl %ebx, %ecx
+; KNL_X32-NEXT:    shll $14, %ecx
+; KNL_X32-NEXT:    orl %esi, %ecx
 ; KNL_X32-NEXT:    orl %edx, %ecx
-; KNL_X32-NEXT:    movw %cx, (%eax)
+; KNL_X32-NEXT:    andl $1, %ebp
+; KNL_X32-NEXT:    shll $15, %ebp
+; KNL_X32-NEXT:    orl %ecx, %ebp
+; KNL_X32-NEXT:    movw %bp, (%eax)
 ; KNL_X32-NEXT:    addl $16, %esp
 ; KNL_X32-NEXT:    popl %esi
 ; KNL_X32-NEXT:    popl %edi
@@ -1864,13 +1864,13 @@ define <17 x i1> @test16(<17 x i1> %a, <17 x i1> %b) nounwind {
 ; FASTISEL-NEXT:    kmovd {{[-0-9]+}}(%r{{[sb]}}p), %k1 ## 4-byte Reload
 ; FASTISEL-NEXT:    kandd %k1, %k0, %k0
 ; FASTISEL-NEXT:    kshiftrd $16, %k0, %k1
-; FASTISEL-NEXT:    kmovd %k1, %edx
+; FASTISEL-NEXT:    kmovd %k1, %esi
 ; FASTISEL-NEXT:    kshiftrd $1, %k0, %k1
 ; FASTISEL-NEXT:    kmovd %k1, %r9d
 ; FASTISEL-NEXT:    kshiftrd $2, %k0, %k1
 ; FASTISEL-NEXT:    kmovd %k1, %r8d
 ; FASTISEL-NEXT:    kshiftrd $3, %k0, %k1
-; FASTISEL-NEXT:    kmovd %k1, %esi
+; FASTISEL-NEXT:    kmovd %k1, %edx
 ; FASTISEL-NEXT:    kshiftrd $4, %k0, %k1
 ; FASTISEL-NEXT:    kmovd %k1, %edi
 ; FASTISEL-NEXT:    kshiftrd $5, %k0, %k1
@@ -1882,9 +1882,9 @@ define <17 x i1> @test16(<17 x i1> %a, <17 x i1> %b) nounwind {
 ; FASTISEL-NEXT:    kshiftrd $8, %k0, %k1
 ; FASTISEL-NEXT:    kmovd %k1, %ebp
 ; FASTISEL-NEXT:    kshiftrd $9, %k0, %k1
-; FASTISEL-NEXT:    kmovd %k1, %r14d
-; FASTISEL-NEXT:    kshiftrd $10, %k0, %k1
 ; FASTISEL-NEXT:    kmovd %k1, %r11d
+; FASTISEL-NEXT:    kshiftrd $10, %k0, %k1
+; FASTISEL-NEXT:    kmovd %k1, %r14d
 ; FASTISEL-NEXT:    kshiftrd $11, %k0, %k1
 ; FASTISEL-NEXT:    kmovd %k1, %r15d
 ; FASTISEL-NEXT:    kshiftrd $12, %k0, %k1
@@ -1892,25 +1892,25 @@ define <17 x i1> @test16(<17 x i1> %a, <17 x i1> %b) nounwind {
 ; FASTISEL-NEXT:    kshiftrd $13, %k0, %k1
 ; FASTISEL-NEXT:    kmovd %k1, %r13d
 ; FASTISEL-NEXT:    kshiftrd $14, %k0, %k1
-; FASTISEL-NEXT:    andl $1, %edx
-; FASTISEL-NEXT:    movb %dl, 2(%rax)
-; FASTISEL-NEXT:    kmovd %k0, %edx
-; FASTISEL-NEXT:    andl $1, %edx
+; FASTISEL-NEXT:    andl $1, %esi
+; FASTISEL-NEXT:    movb %sil, 2(%rax)
+; FASTISEL-NEXT:    kmovd %k0, %esi
+; FASTISEL-NEXT:    andl $1, %esi
 ; FASTISEL-NEXT:    andl $1, %r9d
-; FASTISEL-NEXT:    leal (%rdx,%r9,2), %r9d
-; FASTISEL-NEXT:    kmovd %k1, %edx
+; FASTISEL-NEXT:    leal (%rsi,%r9,2), %r9d
+; FASTISEL-NEXT:    kmovd %k1, %esi
 ; FASTISEL-NEXT:    kshiftrd $15, %k0, %k0
 ; FASTISEL-NEXT:    andl $1, %r8d
 ; FASTISEL-NEXT:    leal (%r9,%r8,4), %r9d
 ; FASTISEL-NEXT:    kmovd %k0, %r8d
-; FASTISEL-NEXT:    andl $1, %esi
-; FASTISEL-NEXT:    leal (%r9,%rsi,8), %esi
+; FASTISEL-NEXT:    andl $1, %edx
+; FASTISEL-NEXT:    leal (%r9,%rdx,8), %edx
 ; FASTISEL-NEXT:    andl $1, %edi
 ; FASTISEL-NEXT:    shll $4, %edi
-; FASTISEL-NEXT:    orl %esi, %edi
 ; FASTISEL-NEXT:    andl $1, %ecx
 ; FASTISEL-NEXT:    shll $5, %ecx
-; FASTISEL-NEXT:    orl %edi, %ecx
+; FASTISEL-NEXT:    addl %edi, %ecx
+; FASTISEL-NEXT:    addl %edx, %ecx
 ; FASTISEL-NEXT:    andl $1, %r10d
 ; FASTISEL-NEXT:    shll $6, %r10d
 ; FASTISEL-NEXT:    andl $1, %ebx
@@ -1919,28 +1919,28 @@ define <17 x i1> @test16(<17 x i1> %a, <17 x i1> %b) nounwind {
 ; FASTISEL-NEXT:    andl $1, %ebp
 ; FASTISEL-NEXT:    shll $8, %ebp
 ; FASTISEL-NEXT:    orl %ebx, %ebp
-; FASTISEL-NEXT:    andl $1, %r14d
-; FASTISEL-NEXT:    shll $9, %r14d
-; FASTISEL-NEXT:    orl %ebp, %r14d
 ; FASTISEL-NEXT:    andl $1, %r11d
-; FASTISEL-NEXT:    shll $10, %r11d
-; FASTISEL-NEXT:    orl %r14d, %r11d
+; FASTISEL-NEXT:    shll $9, %r11d
+; FASTISEL-NEXT:    orl %ebp, %r11d
 ; FASTISEL-NEXT:    orl %ecx, %r11d
+; FASTISEL-NEXT:    andl $1, %r14d
+; FASTISEL-NEXT:    shll $10, %r14d
 ; FASTISEL-NEXT:    andl $1, %r15d
 ; FASTISEL-NEXT:    shll $11, %r15d
+; FASTISEL-NEXT:    orl %r14d, %r15d
 ; FASTISEL-NEXT:    andl $1, %r12d
 ; FASTISEL-NEXT:    shll $12, %r12d
 ; FASTISEL-NEXT:    orl %r15d, %r12d
 ; FASTISEL-NEXT:    andl $1, %r13d
 ; FASTISEL-NEXT:    shll $13, %r13d
 ; FASTISEL-NEXT:    orl %r12d, %r13d
-; FASTISEL-NEXT:    andl $1, %edx
-; FASTISEL-NEXT:    shll $14, %edx
-; FASTISEL-NEXT:    orl %r13d, %edx
+; FASTISEL-NEXT:    andl $1, %esi
+; FASTISEL-NEXT:    shll $14, %esi
+; FASTISEL-NEXT:    orl %r13d, %esi
+; FASTISEL-NEXT:    orl %r11d, %esi
 ; FASTISEL-NEXT:    andl $1, %r8d
 ; FASTISEL-NEXT:    shll $15, %r8d
-; FASTISEL-NEXT:    orl %edx, %r8d
-; FASTISEL-NEXT:    orl %r11d, %r8d
+; FASTISEL-NEXT:    orl %esi, %r8d
 ; FASTISEL-NEXT:    movw %r8w, (%rax)
 ; FASTISEL-NEXT:    popq %rbx
 ; FASTISEL-NEXT:    popq %r12
@@ -2342,19 +2342,19 @@ define <7 x i1> @test17(<7 x i1> %a, <7 x i1> %b, <7 x i1> %c, <7 x i1> %d, <7 x
 ; KNL-NEXT:    andb $1, %r10b
 ; KNL-NEXT:    andb $1, %r9b
 ; KNL-NEXT:    addb %r9b, %r9b
-; KNL-NEXT:    orb %r10b, %r9b
+; KNL-NEXT:    addb %r10b, %r9b
 ; KNL-NEXT:    andb $1, %r8b
 ; KNL-NEXT:    shlb $2, %r8b
-; KNL-NEXT:    orb %r9b, %r8b
 ; KNL-NEXT:    andb $1, %dil
 ; KNL-NEXT:    shlb $3, %dil
-; KNL-NEXT:    orb %r8b, %dil
+; KNL-NEXT:    addb %r8b, %dil
+; KNL-NEXT:    addb %r9b, %dil
 ; KNL-NEXT:    andb $1, %sil
 ; KNL-NEXT:    shlb $4, %sil
-; KNL-NEXT:    orb %dil, %sil
 ; KNL-NEXT:    andb $1, %dl
 ; KNL-NEXT:    shlb $5, %dl
-; KNL-NEXT:    orb %sil, %dl
+; KNL-NEXT:    addb %sil, %dl
+; KNL-NEXT:    addb %dil, %dl
 ; KNL-NEXT:    shlb $6, %cl
 ; KNL-NEXT:    orb %dl, %cl
 ; KNL-NEXT:    andb $127, %cl
@@ -2710,19 +2710,19 @@ define <7 x i1> @test17(<7 x i1> %a, <7 x i1> %b, <7 x i1> %c, <7 x i1> %d, <7 x
 ; SKX-NEXT:    andb $1, %r10b
 ; SKX-NEXT:    andb $1, %r9b
 ; SKX-NEXT:    addb %r9b, %r9b
-; SKX-NEXT:    orb %r10b, %r9b
+; SKX-NEXT:    addb %r10b, %r9b
 ; SKX-NEXT:    andb $1, %r8b
 ; SKX-NEXT:    shlb $2, %r8b
-; SKX-NEXT:    orb %r9b, %r8b
 ; SKX-NEXT:    andb $1, %dil
 ; SKX-NEXT:    shlb $3, %dil
-; SKX-NEXT:    orb %r8b, %dil
+; SKX-NEXT:    addb %r8b, %dil
+; SKX-NEXT:    addb %r9b, %dil
 ; SKX-NEXT:    andb $1, %sil
 ; SKX-NEXT:    shlb $4, %sil
-; SKX-NEXT:    orb %dil, %sil
 ; SKX-NEXT:    andb $1, %dl
 ; SKX-NEXT:    shlb $5, %dl
-; SKX-NEXT:    orb %sil, %dl
+; SKX-NEXT:    addb %sil, %dl
+; SKX-NEXT:    addb %dil, %dl
 ; SKX-NEXT:    shlb $6, %cl
 ; SKX-NEXT:    orb %dl, %cl
 ; SKX-NEXT:    andb $127, %cl
@@ -3109,7 +3109,7 @@ define <7 x i1> @test17(<7 x i1> %a, <7 x i1> %b, <7 x i1> %c, <7 x i1> %d, <7 x
 ; KNL_X32-NEXT:    kandw %k1, %k0, %k0
 ; KNL_X32-NEXT:    kshiftrw $6, %k0, %k1
 ; KNL_X32-NEXT:    kmovw %k1, %ecx
-; KNL_X32-NEXT:    kshiftrw $5, %k0, %k1
+; KNL_X32-NEXT:    kshiftrw $3, %k0, %k1
 ; KNL_X32-NEXT:    kmovw %k1, %eax
 ; KNL_X32-NEXT:    kshiftrw $1, %k0, %k1
 ; KNL_X32-NEXT:    kmovw %k1, %edx
@@ -3118,26 +3118,26 @@ define <7 x i1> @test17(<7 x i1> %a, <7 x i1> %b, <7 x i1> %c, <7 x i1> %d, <7 x
 ; KNL_X32-NEXT:    andb $1, %bl
 ; KNL_X32-NEXT:    andb $1, %dl
 ; KNL_X32-NEXT:    addb %dl, %dl
-; KNL_X32-NEXT:    orb %bl, %dl
+; KNL_X32-NEXT:    addb %bl, %dl
 ; KNL_X32-NEXT:    kmovw %k1, %ebx
-; KNL_X32-NEXT:    kshiftrw $3, %k0, %k1
+; KNL_X32-NEXT:    kshiftrw $5, %k0, %k1
 ; KNL_X32-NEXT:    andb $1, %bl
 ; KNL_X32-NEXT:    shlb $2, %bl
-; KNL_X32-NEXT:    orb %dl, %bl
-; KNL_X32-NEXT:    kmovw %k1, %edx
+; KNL_X32-NEXT:    andb $1, %al
+; KNL_X32-NEXT:    shlb $3, %al
+; KNL_X32-NEXT:    addb %bl, %al
+; KNL_X32-NEXT:    kmovw %k1, %ebx
 ; KNL_X32-NEXT:    kshiftrw $4, %k0, %k0
+; KNL_X32-NEXT:    addb %dl, %al
+; KNL_X32-NEXT:    kmovw %k0, %edx
 ; KNL_X32-NEXT:    andb $1, %dl
-; KNL_X32-NEXT:    shlb $3, %dl
-; KNL_X32-NEXT:    orb %bl, %dl
-; KNL_X32-NEXT:    kmovw %k0, %ebx
+; KNL_X32-NEXT:    shlb $4, %dl
 ; KNL_X32-NEXT:    andb $1, %bl
-; KNL_X32-NEXT:    shlb $4, %bl
-; KNL_X32-NEXT:    orb %dl, %bl
-; KNL_X32-NEXT:    andb $1, %al
-; KNL_X32-NEXT:    shlb $5, %al
-; KNL_X32-NEXT:    orb %bl, %al
+; KNL_X32-NEXT:    shlb $5, %bl
+; KNL_X32-NEXT:    addb %dl, %bl
+; KNL_X32-NEXT:    addb %al, %bl
 ; KNL_X32-NEXT:    shlb $6, %cl
-; KNL_X32-NEXT:    orb %al, %cl
+; KNL_X32-NEXT:    orb %bl, %cl
 ; KNL_X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; KNL_X32-NEXT:    andb $127, %cl
 ; KNL_X32-NEXT:    movb %cl, (%eax)
@@ -3499,19 +3499,19 @@ define <7 x i1> @test17(<7 x i1> %a, <7 x i1> %b, <7 x i1> %c, <7 x i1> %d, <7 x
 ; FASTISEL-NEXT:    andb $1, %r10b
 ; FASTISEL-NEXT:    andb $1, %r9b
 ; FASTISEL-NEXT:    addb %r9b, %r9b
-; FASTISEL-NEXT:    orb %r10b, %r9b
+; FASTISEL-NEXT:    addb %r10b, %r9b
 ; FASTISEL-NEXT:    andb $1, %r8b
 ; FASTISEL-NEXT:    shlb $2, %r8b
-; FASTISEL-NEXT:    orb %r9b, %r8b
 ; FASTISEL-NEXT:    andb $1, %dil
 ; FASTISEL-NEXT:    shlb $3, %dil
-; FASTISEL-NEXT:    orb %r8b, %dil
+; FASTISEL-NEXT:    addb %r8b, %dil
+; FASTISEL-NEXT:    addb %r9b, %dil
 ; FASTISEL-NEXT:    andb $1, %sil
 ; FASTISEL-NEXT:    shlb $4, %sil
-; FASTISEL-NEXT:    orb %dil, %sil
 ; FASTISEL-NEXT:    andb $1, %dl
 ; FASTISEL-NEXT:    shlb $5, %dl
-; FASTISEL-NEXT:    orb %sil, %dl
+; FASTISEL-NEXT:    addb %sil, %dl
+; FASTISEL-NEXT:    addb %dil, %dl
 ; FASTISEL-NEXT:    shlb $6, %cl
 ; FASTISEL-NEXT:    orb %dl, %cl
 ; FASTISEL-NEXT:    andb $127, %cl
diff --git a/llvm/test/CodeGen/X86/avx512-insert-extract.ll b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
index 3e40bfa1e791d0..16ec98e120f843 100644
--- a/llvm/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
@@ -855,7 +855,7 @@ define i32 @test_insertelement_v32i1(i32 %a, i32 %b, <32 x i32> %x , <32 x i32>
 ; KNL-NEXT:    kshiftrw $11, %k1, %k1
 ; KNL-NEXT:    korw %k1, %k0, %k0
 ; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    orl %ecx, %eax
+; KNL-NEXT:    addl %ecx, %eax
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
 ;
@@ -1666,7 +1666,7 @@ define i32 @test_insertelement_variable_v32i1(<32 x i8> %a, i8 %b, i32 %index) n
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; KNL-NEXT:    kmovw %k0, %eax
 ; KNL-NEXT:    shll $16, %eax
-; KNL-NEXT:    orl %ecx, %eax
+; KNL-NEXT:    addl %ecx, %eax
 ; KNL-NEXT:    movq %rbp, %rsp
 ; KNL-NEXT:    popq %rbp
 ; KNL-NEXT:    vzeroupper
@@ -1720,7 +1720,7 @@ define i64 @test_insertelement_variable_v64i1(<64 x i8> %a, i8 %b, i32 %index) n
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; KNL-NEXT:    kmovw %k0, %ecx
 ; KNL-NEXT:    shll $16, %ecx
-; KNL-NEXT:    orl %eax, %ecx
+; KNL-NEXT:    addl %eax, %ecx
 ; KNL-NEXT:    vpmovsxbd {{[0-9]+}}(%rsp), %zmm0
 ; KNL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
@@ -1730,9 +1730,9 @@ define i64 @test_insertelement_variable_v64i1(<64 x i8> %a, i8 %b, i32 %index) n
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; KNL-NEXT:    kmovw %k0, %eax
 ; KNL-NEXT:    shll $16, %eax
-; KNL-NEXT:    orl %edx, %eax
+; KNL-NEXT:    addl %edx, %eax
 ; KNL-NEXT:    shlq $32, %rax
-; KNL-NEXT:    orq %rcx, %rax
+; KNL-NEXT:    addq %rcx, %rax
 ; KNL-NEXT:    movq %rbp, %rsp
 ; KNL-NEXT:    popq %rbp
 ; KNL-NEXT:    vzeroupper
@@ -1888,7 +1888,7 @@ define i96 @test_insertelement_variable_v96i1(<96 x i8> %a, i8 %b, i32 %index) n
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; KNL-NEXT:    kmovw %k0, %ecx
 ; KNL-NEXT:    shll $16, %ecx
-; KNL-NEXT:    orl %eax, %ecx
+; KNL-NEXT:    addl %eax, %ecx
 ; KNL-NEXT:    vpmovsxbd {{[0-9]+}}(%rsp), %zmm0
 ; KNL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
@@ -1898,9 +1898,9 @@ define i96 @test_insertelement_variable_v96i1(<96 x i8> %a, i8 %b, i32 %index) n
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; KNL-NEXT:    kmovw %k0, %eax
 ; KNL-NEXT:    shll $16, %eax
-; KNL-NEXT:    orl %edx, %eax
+; KNL-NEXT:    addl %edx, %eax
 ; KNL-NEXT:    shlq $32, %rax
-; KNL-NEXT:    orq %rcx, %rax
+; KNL-NEXT:    addq %rcx, %rax
 ; KNL-NEXT:    vpmovsxbd {{[0-9]+}}(%rsp), %zmm0
 ; KNL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
@@ -1910,7 +1910,7 @@ define i96 @test_insertelement_variable_v96i1(<96 x i8> %a, i8 %b, i32 %index) n
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; KNL-NEXT:    kmovw %k0, %esi
 ; KNL-NEXT:    shll $16, %esi
-; KNL-NEXT:    orl %ecx, %esi
+; KNL-NEXT:    addl %ecx, %esi
 ; KNL-NEXT:    vpmovsxbd {{[0-9]+}}(%rsp), %zmm0
 ; KNL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
@@ -1920,9 +1920,9 @@ define i96 @test_insertelement_variable_v96i1(<96 x i8> %a, i8 %b, i32 %index) n
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; KNL-NEXT:    kmovw %k0, %edx
 ; KNL-NEXT:    shll $16, %edx
-; KNL-NEXT:    orl %ecx, %edx
+; KNL-NEXT:    addl %ecx, %edx
 ; KNL-NEXT:    shlq $32, %rdx
-; KNL-NEXT:    orq %rsi, %rdx
+; KNL-NEXT:    addq %rsi, %rdx
 ; KNL-NEXT:    movq %rbp, %rsp
 ; KNL-NEXT:    popq %rbp
 ; KNL-NEXT:    vzeroupper
@@ -2094,7 +2094,7 @@ define i128 @test_insertelement_variable_v128i1(<128 x i8> %a, i8 %b, i32 %index
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; KNL-NEXT:    kmovw %k0, %ecx
 ; KNL-NEXT:    shll $16, %ecx
-; KNL-NEXT:    orl %eax, %ecx
+; KNL-NEXT:    addl %eax, %ecx
 ; KNL-NEXT:    vpmovsxbd {{[0-9]+}}(%rsp), %zmm0
 ; KNL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
@@ -2104,9 +2104,9 @@ define i128 @test_insertelement_variable_v128i1(<128 x i8> %a, i8 %b, i32 %index
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; KNL-NEXT:    kmovw %k0, %eax
 ; KNL-NEXT:    shll $16, %eax
-; KNL-NEXT:    orl %edx, %eax
+; KNL-NEXT:    addl %edx, %eax
 ; KNL-NEXT:    shlq $32, %rax
-; KNL-NEXT:    orq %rcx, %rax
+; KNL-NEXT:    addq %rcx, %rax
 ; KNL-NEXT:    vpmovsxbd {{[0-9]+}}(%rsp), %zmm0
 ; KNL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
@@ -2116,7 +2116,7 @@ define i128 @test_insertelement_variable_v128i1(<128 x i8> %a, i8 %b, i32 %index
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; KNL-NEXT:    kmovw %k0, %esi
 ; KNL-NEXT:    shll $16, %esi
-; KNL-NEXT:    orl %ecx, %esi
+; KNL-NEXT:    addl %ecx, %esi
 ; KNL-NEXT:    vpmovsxbd {{[0-9]+}}(%rsp), %zmm0
 ; KNL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
@@ -2126,9 +2126,9 @@ define i128 @test_insertelement_variable_v128i1(<128 x i8> %a, i8 %b, i32 %index
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; KNL-NEXT:    kmovw %k0, %edx
 ; KNL-NEXT:    shll $16, %edx
-; KNL-NEXT:    orl %ecx, %edx
+; KNL-NEXT:    addl %ecx, %edx
 ; KNL-NEXT:    shlq $32, %rdx
-; KNL-NEXT:    orq %rsi, %rdx
+; KNL-NEXT:    addq %rsi, %rdx
 ; KNL-NEXT:    movq %rbp, %rsp
 ; KNL-NEXT:    popq %rbp
 ; KNL-NEXT:    vzeroupper
diff --git a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
index 86ebb1e40870f8..5f32ffd10424eb 100644
--- a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
@@ -216,7 +216,7 @@ define i32 @test12_v32i32(<32 x i32> %a, <32 x i32> %b) nounwind {
 ; KNL-NEXT:    vpcmpeqd %zmm3, %zmm1, %k0 ## encoding: [0x62,0xf1,0x75,0x48,0x76,0xc3]
 ; KNL-NEXT:    kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
 ; KNL-NEXT:    shll $16, %eax ## encoding: [0xc1,0xe0,0x10]
-; KNL-NEXT:    orl %ecx, %eax ## encoding: [0x09,0xc8]
+; KNL-NEXT:    addl %ecx, %eax ## encoding: [0x01,0xc8]
 ; KNL-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
 ; KNL-NEXT:    retq ## encoding: [0xc3]
 ;
@@ -256,7 +256,7 @@ define i64 @test12_v64i16(<64 x i16> %a, <64 x i16> %b) nounwind {
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0 ## encoding: [0x62,0xf2,0x7d,0x48,0x27,0xc0]
 ; KNL-NEXT:    kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
 ; KNL-NEXT:    shll $16, %ecx ## encoding: [0xc1,0xe1,0x10]
-; KNL-NEXT:    orl %eax, %ecx ## encoding: [0x09,0xc1]
+; KNL-NEXT:    addl %eax, %ecx ## encoding: [0x01,0xc1]
 ; KNL-NEXT:    vpcmpeqw %ymm3, %ymm1, %ymm0 ## encoding: [0xc5,0xf5,0x75,0xc3]
 ; KNL-NEXT:    vpmovsxwd %ymm0, %zmm0 ## encoding: [0x62,0xf2,0x7d,0x48,0x23,0xc0]
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0 ## encoding: [0x62,0xf2,0x7d,0x48,0x27,0xc0]
@@ -268,9 +268,9 @@ define i64 @test12_v64i16(<64 x i16> %a, <64 x i16> %b) nounwind {
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0 ## encoding: [0x62,0xf2,0x7d,0x48,0x27,0xc0]
 ; KNL-NEXT:    kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
 ; KNL-NEXT:    shll $16, %eax ## encoding: [0xc1,0xe0,0x10]
-; KNL-NEXT:    orl %edx, %eax ## encoding: [0x09,0xd0]
+; KNL-NEXT:    addl %edx, %eax ## encoding: [0x01,0xd0]
 ; KNL-NEXT:    shlq $32, %rax ## encoding: [0x48,0xc1,0xe0,0x20]
-; KNL-NEXT:    orq %rcx, %rax ## encoding: [0x48,0x09,0xc8]
+; KNL-NEXT:    addq %rcx, %rax ## encoding: [0x48,0x01,0xc8]
 ; KNL-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
 ; KNL-NEXT:    retq ## encoding: [0xc3]
 ;
diff --git a/llvm/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll b/llvm/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
index bed8d5fcb18697..07f7e0e064e3a5 100644
--- a/llvm/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
@@ -235,7 +235,7 @@ define zeroext i64 @test_vpcmpeqb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, %eax
 ; NoVLX-NEXT:    shll $16, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -266,7 +266,7 @@ define zeroext i64 @test_vpcmpeqb_v32i1_v64i1_mask_mem(<4 x i64> %__a, ptr %__b)
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, %eax
 ; NoVLX-NEXT:    shll $16, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -303,7 +303,7 @@ define zeroext i64 @test_masked_vpcmpeqb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x
 ; NoVLX-NEXT:    andl %edi, %ecx
 ; NoVLX-NEXT:    shll $16, %ecx
 ; NoVLX-NEXT:    movzwl %ax, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -341,7 +341,7 @@ define zeroext i64 @test_masked_vpcmpeqb_v32i1_v64i1_mask_mem(i32 zeroext %__u,
 ; NoVLX-NEXT:    andl %edi, %ecx
 ; NoVLX-NEXT:    shll $16, %ecx
 ; NoVLX-NEXT:    movzwl %ax, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -929,7 +929,7 @@ define zeroext i64 @test_vpcmpeqw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, %eax
 ; NoVLX-NEXT:    shll $16, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -961,7 +961,7 @@ define zeroext i64 @test_vpcmpeqw_v32i1_v64i1_mask_mem(<8 x i64> %__a, ptr %__b)
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, %eax
 ; NoVLX-NEXT:    shll $16, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -1000,7 +1000,7 @@ define zeroext i64 @test_masked_vpcmpeqw_v32i1_v64i1_mask(i32 zeroext %__u, <8 x
 ; NoVLX-NEXT:    andl %edi, %ecx
 ; NoVLX-NEXT:    shll $16, %ecx
 ; NoVLX-NEXT:    movzwl %ax, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -1039,7 +1039,7 @@ define zeroext i64 @test_masked_vpcmpeqw_v32i1_v64i1_mask_mem(i32 zeroext %__u,
 ; NoVLX-NEXT:    andl %edi, %ecx
 ; NoVLX-NEXT:    shll $16, %ecx
 ; NoVLX-NEXT:    movzwl %ax, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -5064,7 +5064,7 @@ define zeroext i64 @test_vpcmpsgtb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %_
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, %eax
 ; NoVLX-NEXT:    shll $16, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -5095,7 +5095,7 @@ define zeroext i64 @test_vpcmpsgtb_v32i1_v64i1_mask_mem(<4 x i64> %__a, ptr %__b
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, %eax
 ; NoVLX-NEXT:    shll $16, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -5132,7 +5132,7 @@ define zeroext i64 @test_masked_vpcmpsgtb_v32i1_v64i1_mask(i32 zeroext %__u, <4
 ; NoVLX-NEXT:    andl %edi, %ecx
 ; NoVLX-NEXT:    shll $16, %ecx
 ; NoVLX-NEXT:    movzwl %ax, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -5170,7 +5170,7 @@ define zeroext i64 @test_masked_vpcmpsgtb_v32i1_v64i1_mask_mem(i32 zeroext %__u,
 ; NoVLX-NEXT:    andl %edi, %ecx
 ; NoVLX-NEXT:    shll $16, %ecx
 ; NoVLX-NEXT:    movzwl %ax, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -5758,7 +5758,7 @@ define zeroext i64 @test_vpcmpsgtw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %_
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, %eax
 ; NoVLX-NEXT:    shll $16, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -5790,7 +5790,7 @@ define zeroext i64 @test_vpcmpsgtw_v32i1_v64i1_mask_mem(<8 x i64> %__a, ptr %__b
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, %eax
 ; NoVLX-NEXT:    shll $16, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -5829,7 +5829,7 @@ define zeroext i64 @test_masked_vpcmpsgtw_v32i1_v64i1_mask(i32 zeroext %__u, <8
 ; NoVLX-NEXT:    andl %edi, %ecx
 ; NoVLX-NEXT:    shll $16, %ecx
 ; NoVLX-NEXT:    movzwl %ax, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -5868,7 +5868,7 @@ define zeroext i64 @test_masked_vpcmpsgtw_v32i1_v64i1_mask_mem(i32 zeroext %__u,
 ; NoVLX-NEXT:    andl %edi, %ecx
 ; NoVLX-NEXT:    shll $16, %ecx
 ; NoVLX-NEXT:    movzwl %ax, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -9875,7 +9875,7 @@ define zeroext i64 @test_vpcmpsgeb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %_
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, %eax
 ; NoVLX-NEXT:    shll $16, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -9908,7 +9908,7 @@ define zeroext i64 @test_vpcmpsgeb_v32i1_v64i1_mask_mem(<4 x i64> %__a, ptr %__b
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, %eax
 ; NoVLX-NEXT:    shll $16, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -9946,7 +9946,7 @@ define zeroext i64 @test_masked_vpcmpsgeb_v32i1_v64i1_mask(i32 zeroext %__u, <4
 ; NoVLX-NEXT:    andl %edi, %ecx
 ; NoVLX-NEXT:    shll $16, %ecx
 ; NoVLX-NEXT:    movzwl %ax, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -9986,7 +9986,7 @@ define zeroext i64 @test_masked_vpcmpsgeb_v32i1_v64i1_mask_mem(i32 zeroext %__u,
 ; NoVLX-NEXT:    andl %edi, %ecx
 ; NoVLX-NEXT:    shll $16, %ecx
 ; NoVLX-NEXT:    movzwl %ax, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -10606,7 +10606,7 @@ define zeroext i64 @test_vpcmpsgew_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %_
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, %eax
 ; NoVLX-NEXT:    shll $16, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -10642,7 +10642,7 @@ define zeroext i64 @test_vpcmpsgew_v32i1_v64i1_mask_mem(<8 x i64> %__a, ptr %__b
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, %eax
 ; NoVLX-NEXT:    shll $16, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -10683,7 +10683,7 @@ define zeroext i64 @test_masked_vpcmpsgew_v32i1_v64i1_mask(i32 zeroext %__u, <8
 ; NoVLX-NEXT:    andl %edi, %ecx
 ; NoVLX-NEXT:    shll $16, %ecx
 ; NoVLX-NEXT:    movzwl %ax, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -10726,7 +10726,7 @@ define zeroext i64 @test_masked_vpcmpsgew_v32i1_v64i1_mask_mem(i32 zeroext %__u,
 ; NoVLX-NEXT:    andl %edi, %ecx
 ; NoVLX-NEXT:    shll $16, %ecx
 ; NoVLX-NEXT:    movzwl %ax, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -14738,7 +14738,7 @@ define zeroext i64 @test_vpcmpultb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %_
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, %eax
 ; NoVLX-NEXT:    shll $16, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -14771,7 +14771,7 @@ define zeroext i64 @test_vpcmpultb_v32i1_v64i1_mask_mem(<4 x i64> %__a, ptr %__b
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, %eax
 ; NoVLX-NEXT:    shll $16, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -14810,7 +14810,7 @@ define zeroext i64 @test_masked_vpcmpultb_v32i1_v64i1_mask(i32 zeroext %__u, <4
 ; NoVLX-NEXT:    andl %edi, %ecx
 ; NoVLX-NEXT:    shll $16, %ecx
 ; NoVLX-NEXT:    movzwl %ax, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -14850,7 +14850,7 @@ define zeroext i64 @test_masked_vpcmpultb_v32i1_v64i1_mask_mem(i32 zeroext %__u,
 ; NoVLX-NEXT:    andl %edi, %ecx
 ; NoVLX-NEXT:    shll $16, %ecx
 ; NoVLX-NEXT:    movzwl %ax, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -15482,7 +15482,7 @@ define zeroext i64 @test_vpcmpultw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %_
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, %eax
 ; NoVLX-NEXT:    shll $16, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -15518,7 +15518,7 @@ define zeroext i64 @test_vpcmpultw_v32i1_v64i1_mask_mem(<8 x i64> %__a, ptr %__b
 ; NoVLX-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; NoVLX-NEXT:    kmovw %k0, %eax
 ; NoVLX-NEXT:    shll $16, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -15561,7 +15561,7 @@ define zeroext i64 @test_masked_vpcmpultw_v32i1_v64i1_mask(i32 zeroext %__u, <8
 ; NoVLX-NEXT:    andl %edi, %ecx
 ; NoVLX-NEXT:    shll $16, %ecx
 ; NoVLX-NEXT:    movzwl %ax, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
@@ -15604,7 +15604,7 @@ define zeroext i64 @test_masked_vpcmpultw_v32i1_v64i1_mask_mem(i32 zeroext %__u,
 ; NoVLX-NEXT:    andl %edi, %ecx
 ; NoVLX-NEXT:    shll $16, %ecx
 ; NoVLX-NEXT:    movzwl %ax, %eax
-; NoVLX-NEXT:    orl %ecx, %eax
+; NoVLX-NEXT:    addl %ecx, %eax
 ; NoVLX-NEXT:    vzeroupper
 ; NoVLX-NEXT:    retq
 entry:
diff --git a/llvm/test/CodeGen/X86/bfloat.ll b/llvm/test/CodeGen/X86/bfloat.ll
index f2d3c4fb34199e..bf2b5f1acdb4b1 100644
--- a/llvm/test/CodeGen/X86/bfloat.ll
+++ b/llvm/test/CodeGen/X86/bfloat.ll
@@ -511,7 +511,7 @@ define void @fold_ext_trunc(ptr %pa, ptr %pc) nounwind {
 define bfloat @fold_ext_trunc2(bfloat %a) nounwind {
 ; X86-LABEL: fold_ext_trunc2:
 ; X86:       # %bb.0:
-; X86-NEXT:    vmovsh {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    retl
 ;
 ; CHECK-LABEL: fold_ext_trunc2:
@@ -695,9 +695,9 @@ define <8 x bfloat> @addv(<8 x bfloat> %a, <8 x bfloat> %b) nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %ebx
 ; SSE2-NEXT:    shll $16, %ebx
-; SSE2-NEXT:    orl %r14d, %ebx
+; SSE2-NEXT:    addl %r14d, %ebx
 ; SSE2-NEXT:    shlq $32, %rbx
-; SSE2-NEXT:    orq %r15, %rbx
+; SSE2-NEXT:    addq %r15, %rbx
 ; SSE2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
 ; SSE2-NEXT:    movl %r15d, %eax
 ; SSE2-NEXT:    shll $16, %eax
@@ -720,7 +720,7 @@ define <8 x bfloat> @addv(<8 x bfloat> %a, <8 x bfloat> %b) nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %r14d
 ; SSE2-NEXT:    shll $16, %r14d
-; SSE2-NEXT:    orl %ebp, %r14d
+; SSE2-NEXT:    addl %ebp, %r14d
 ; SSE2-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
 ; SSE2-NEXT:    shll $16, %eax
 ; SSE2-NEXT:    movd %eax, %xmm1
@@ -741,9 +741,9 @@ define <8 x bfloat> @addv(<8 x bfloat> %a, <8 x bfloat> %b) nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    shll $16, %eax
-; SSE2-NEXT:    orl %ebp, %eax
+; SSE2-NEXT:    addl %ebp, %eax
 ; SSE2-NEXT:    shlq $32, %rax
-; SSE2-NEXT:    orq %r14, %rax
+; SSE2-NEXT:    addq %r14, %rax
 ; SSE2-NEXT:    movq %rax, %xmm0
 ; SSE2-NEXT:    movq %rbx, %xmm1
 ; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -934,8 +934,8 @@ define <8 x bfloat> @addv(<8 x bfloat> %a, <8 x bfloat> %b) nounwind {
 define <2 x bfloat> @pr62997(bfloat %a, bfloat %b) {
 ; X86-LABEL: pr62997:
 ; X86:       # %bb.0:
-; X86-NEXT:    vmovsh {{[0-9]+}}(%esp), %xmm0
-; X86-NEXT:    vmovsh {{[0-9]+}}(%esp), %xmm1
+; X86-NEXT:    vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
+; X86-NEXT:    vmovsh {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero
 ; X86-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; X86-NEXT:    retl
 ;
@@ -1348,7 +1348,7 @@ define <32 x bfloat> @pr63017_2() nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %r14d
-; SSE2-NEXT:    orl %ebx, %r14d
+; SSE2-NEXT:    addl %ebx, %r14d
 ; SSE2-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
 ; SSE2-NEXT:    # xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
@@ -1359,9 +1359,9 @@ define <32 x bfloat> @pr63017_2() nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %eax
-; SSE2-NEXT:    orl %ebx, %eax
+; SSE2-NEXT:    addl %ebx, %eax
 ; SSE2-NEXT:    shlq $32, %rax
-; SSE2-NEXT:    orq %r14, %rax
+; SSE2-NEXT:    addq %r14, %rax
 ; SSE2-NEXT:    movq %rax, %xmm0
 ; SSE2-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; SSE2-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
@@ -1374,7 +1374,7 @@ define <32 x bfloat> @pr63017_2() nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %r14d
-; SSE2-NEXT:    orl %ebx, %r14d
+; SSE2-NEXT:    addl %ebx, %r14d
 ; SSE2-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
 ; SSE2-NEXT:    # xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
@@ -1385,9 +1385,9 @@ define <32 x bfloat> @pr63017_2() nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %eax
-; SSE2-NEXT:    orl %ebx, %eax
+; SSE2-NEXT:    addl %ebx, %eax
 ; SSE2-NEXT:    shlq $32, %rax
-; SSE2-NEXT:    orq %r14, %rax
+; SSE2-NEXT:    addq %r14, %rax
 ; SSE2-NEXT:    movq %rax, %xmm0
 ; SSE2-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
@@ -1402,7 +1402,7 @@ define <32 x bfloat> @pr63017_2() nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %r14d
-; SSE2-NEXT:    orl %ebx, %r14d
+; SSE2-NEXT:    addl %ebx, %r14d
 ; SSE2-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
 ; SSE2-NEXT:    # xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
@@ -1413,9 +1413,9 @@ define <32 x bfloat> @pr63017_2() nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %eax
-; SSE2-NEXT:    orl %ebx, %eax
+; SSE2-NEXT:    addl %ebx, %eax
 ; SSE2-NEXT:    shlq $32, %rax
-; SSE2-NEXT:    orq %r14, %rax
+; SSE2-NEXT:    addq %r14, %rax
 ; SSE2-NEXT:    movq %rax, %xmm0
 ; SSE2-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; SSE2-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
@@ -1428,7 +1428,7 @@ define <32 x bfloat> @pr63017_2() nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %r14d
-; SSE2-NEXT:    orl %ebx, %r14d
+; SSE2-NEXT:    addl %ebx, %r14d
 ; SSE2-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
 ; SSE2-NEXT:    # xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
@@ -1439,9 +1439,9 @@ define <32 x bfloat> @pr63017_2() nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %eax
-; SSE2-NEXT:    orl %ebx, %eax
+; SSE2-NEXT:    addl %ebx, %eax
 ; SSE2-NEXT:    shlq $32, %rax
-; SSE2-NEXT:    orq %r14, %rax
+; SSE2-NEXT:    addq %r14, %rax
 ; SSE2-NEXT:    movq %rax, %xmm0
 ; SSE2-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
@@ -1456,7 +1456,7 @@ define <32 x bfloat> @pr63017_2() nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %r14d
-; SSE2-NEXT:    orl %ebx, %r14d
+; SSE2-NEXT:    addl %ebx, %r14d
 ; SSE2-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
 ; SSE2-NEXT:    # xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
@@ -1467,9 +1467,9 @@ define <32 x bfloat> @pr63017_2() nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %eax
-; SSE2-NEXT:    orl %ebx, %eax
+; SSE2-NEXT:    addl %ebx, %eax
 ; SSE2-NEXT:    shlq $32, %rax
-; SSE2-NEXT:    orq %r14, %rax
+; SSE2-NEXT:    addq %r14, %rax
 ; SSE2-NEXT:    movq %rax, %xmm0
 ; SSE2-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; SSE2-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
@@ -1482,7 +1482,7 @@ define <32 x bfloat> @pr63017_2() nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %r14d
-; SSE2-NEXT:    orl %ebx, %r14d
+; SSE2-NEXT:    addl %ebx, %r14d
 ; SSE2-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
 ; SSE2-NEXT:    # xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
@@ -1493,9 +1493,9 @@ define <32 x bfloat> @pr63017_2() nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %eax
-; SSE2-NEXT:    orl %ebx, %eax
+; SSE2-NEXT:    addl %ebx, %eax
 ; SSE2-NEXT:    shlq $32, %rax
-; SSE2-NEXT:    orq %r14, %rax
+; SSE2-NEXT:    addq %r14, %rax
 ; SSE2-NEXT:    movq %rax, %xmm0
 ; SSE2-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
@@ -1510,7 +1510,7 @@ define <32 x bfloat> @pr63017_2() nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %r14d
-; SSE2-NEXT:    orl %ebx, %r14d
+; SSE2-NEXT:    addl %ebx, %r14d
 ; SSE2-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
 ; SSE2-NEXT:    # xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
@@ -1521,9 +1521,9 @@ define <32 x bfloat> @pr63017_2() nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %eax
-; SSE2-NEXT:    orl %ebx, %eax
+; SSE2-NEXT:    addl %ebx, %eax
 ; SSE2-NEXT:    shlq $32, %rax
-; SSE2-NEXT:    orq %r14, %rax
+; SSE2-NEXT:    addq %r14, %rax
 ; SSE2-NEXT:    movq %rax, %xmm0
 ; SSE2-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; SSE2-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
@@ -1536,7 +1536,7 @@ define <32 x bfloat> @pr63017_2() nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %r14d
-; SSE2-NEXT:    orl %ebx, %r14d
+; SSE2-NEXT:    addl %ebx, %r14d
 ; SSE2-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
 ; SSE2-NEXT:    # xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
@@ -1547,9 +1547,9 @@ define <32 x bfloat> @pr63017_2() nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %eax
-; SSE2-NEXT:    orl %ebx, %eax
+; SSE2-NEXT:    addl %ebx, %eax
 ; SSE2-NEXT:    shlq $32, %rax
-; SSE2-NEXT:    orq %r14, %rax
+; SSE2-NEXT:    addq %r14, %rax
 ; SSE2-NEXT:    movq %rax, %xmm0
 ; SSE2-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
 ; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
@@ -1833,8 +1833,8 @@ define <32 x bfloat> @pr62997_3(<32 x bfloat> %0, bfloat %1) {
 ; SSE2-NEXT:    movzwl %ax, %eax
 ; SSE2-NEXT:    pextrw $0, %xmm4, %edx
 ; SSE2-NEXT:    shll $16, %edx
-; SSE2-NEXT:    orl %eax, %edx
-; SSE2-NEXT:    orq %rcx, %rdx
+; SSE2-NEXT:    addl %eax, %edx
+; SSE2-NEXT:    addq %rcx, %rdx
 ; SSE2-NEXT:    movq %rdx, %xmm4
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
 ; SSE2-NEXT:    retq
@@ -2230,7 +2230,7 @@ define <8 x bfloat> @fptrunc_v8f32(<8 x float> %a) nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %r14d
-; SSE2-NEXT:    orl %ebx, %r14d
+; SSE2-NEXT:    addl %ebx, %r14d
 ; SSE2-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
@@ -2241,9 +2241,9 @@ define <8 x bfloat> @fptrunc_v8f32(<8 x float> %a) nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %ebx
-; SSE2-NEXT:    orl %ebp, %ebx
+; SSE2-NEXT:    addl %ebp, %ebx
 ; SSE2-NEXT:    shlq $32, %rbx
-; SSE2-NEXT:    orq %r14, %rbx
+; SSE2-NEXT:    addq %r14, %rbx
 ; SSE2-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
@@ -2253,7 +2253,7 @@ define <8 x bfloat> @fptrunc_v8f32(<8 x float> %a) nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %r14d
-; SSE2-NEXT:    orl %ebp, %r14d
+; SSE2-NEXT:    addl %ebp, %r14d
 ; SSE2-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
@@ -2264,9 +2264,9 @@ define <8 x bfloat> @fptrunc_v8f32(<8 x float> %a) nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %eax
-; SSE2-NEXT:    orl %ebp, %eax
+; SSE2-NEXT:    addl %ebp, %eax
 ; SSE2-NEXT:    shlq $32, %rax
-; SSE2-NEXT:    orq %r14, %rax
+; SSE2-NEXT:    addq %r14, %rax
 ; SSE2-NEXT:    movq %rax, %xmm1
 ; SSE2-NEXT:    movq %rbx, %xmm0
 ; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -2318,7 +2318,7 @@ define <16 x bfloat> @fptrunc_v16f32(<16 x float> %a) nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %r14d
-; SSE2-NEXT:    orl %ebx, %r14d
+; SSE2-NEXT:    addl %ebx, %r14d
 ; SSE2-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
@@ -2329,9 +2329,9 @@ define <16 x bfloat> @fptrunc_v16f32(<16 x float> %a) nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %ebx
-; SSE2-NEXT:    orl %ebp, %ebx
+; SSE2-NEXT:    addl %ebp, %ebx
 ; SSE2-NEXT:    shlq $32, %rbx
-; SSE2-NEXT:    orq %r14, %rbx
+; SSE2-NEXT:    addq %r14, %rbx
 ; SSE2-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
@@ -2341,7 +2341,7 @@ define <16 x bfloat> @fptrunc_v16f32(<16 x float> %a) nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %r15d
-; SSE2-NEXT:    orl %ebp, %r15d
+; SSE2-NEXT:    addl %ebp, %r15d
 ; SSE2-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
@@ -2352,9 +2352,9 @@ define <16 x bfloat> @fptrunc_v16f32(<16 x float> %a) nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %r14d
-; SSE2-NEXT:    orl %ebp, %r14d
+; SSE2-NEXT:    addl %ebp, %r14d
 ; SSE2-NEXT:    shlq $32, %r14
-; SSE2-NEXT:    orq %r15, %r14
+; SSE2-NEXT:    addq %r15, %r14
 ; SSE2-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
@@ -2364,7 +2364,7 @@ define <16 x bfloat> @fptrunc_v16f32(<16 x float> %a) nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %r12d
-; SSE2-NEXT:    orl %ebp, %r12d
+; SSE2-NEXT:    addl %ebp, %r12d
 ; SSE2-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
@@ -2375,9 +2375,9 @@ define <16 x bfloat> @fptrunc_v16f32(<16 x float> %a) nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %r15d
-; SSE2-NEXT:    orl %ebp, %r15d
+; SSE2-NEXT:    addl %ebp, %r15d
 ; SSE2-NEXT:    shlq $32, %r15
-; SSE2-NEXT:    orq %r12, %r15
+; SSE2-NEXT:    addq %r12, %r15
 ; SSE2-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
@@ -2387,7 +2387,7 @@ define <16 x bfloat> @fptrunc_v16f32(<16 x float> %a) nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %r12d
-; SSE2-NEXT:    orl %ebp, %r12d
+; SSE2-NEXT:    addl %ebp, %r12d
 ; SSE2-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
@@ -2398,9 +2398,9 @@ define <16 x bfloat> @fptrunc_v16f32(<16 x float> %a) nounwind {
 ; SSE2-NEXT:    callq __truncsfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %eax
-; SSE2-NEXT:    orl %ebp, %eax
+; SSE2-NEXT:    addl %ebp, %eax
 ; SSE2-NEXT:    shlq $32, %rax
-; SSE2-NEXT:    orq %r12, %rax
+; SSE2-NEXT:    addq %r12, %rax
 ; SSE2-NEXT:    movq %rax, %xmm1
 ; SSE2-NEXT:    movq %r15, %xmm0
 ; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -2512,7 +2512,7 @@ define <8 x bfloat> @fptrunc_v8f64(<8 x double> %a) nounwind {
 ; SSE2-NEXT:    callq __truncdfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %r14d
-; SSE2-NEXT:    orl %ebx, %r14d
+; SSE2-NEXT:    addl %ebx, %r14d
 ; SSE2-NEXT:    movdqa (%rsp), %xmm0 # 16-byte Reload
 ; SSE2-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
 ; SSE2-NEXT:    callq __truncdfbf2 at PLT
@@ -2522,9 +2522,9 @@ define <8 x bfloat> @fptrunc_v8f64(<8 x double> %a) nounwind {
 ; SSE2-NEXT:    callq __truncdfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %ebx
-; SSE2-NEXT:    orl %ebp, %ebx
+; SSE2-NEXT:    addl %ebp, %ebx
 ; SSE2-NEXT:    shlq $32, %rbx
-; SSE2-NEXT:    orq %r14, %rbx
+; SSE2-NEXT:    addq %r14, %rbx
 ; SSE2-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE2-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
 ; SSE2-NEXT:    callq __truncdfbf2 at PLT
@@ -2534,7 +2534,7 @@ define <8 x bfloat> @fptrunc_v8f64(<8 x double> %a) nounwind {
 ; SSE2-NEXT:    callq __truncdfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %r14d
-; SSE2-NEXT:    orl %ebp, %r14d
+; SSE2-NEXT:    addl %ebp, %r14d
 ; SSE2-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; SSE2-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
 ; SSE2-NEXT:    callq __truncdfbf2 at PLT
@@ -2544,9 +2544,9 @@ define <8 x bfloat> @fptrunc_v8f64(<8 x double> %a) nounwind {
 ; SSE2-NEXT:    callq __truncdfbf2 at PLT
 ; SSE2-NEXT:    pextrw $0, %xmm0, %eax
 ; SSE2-NEXT:    movzwl %ax, %eax
-; SSE2-NEXT:    orl %ebp, %eax
+; SSE2-NEXT:    addl %ebp, %eax
 ; SSE2-NEXT:    shlq $32, %rax
-; SSE2-NEXT:    orq %r14, %rax
+; SSE2-NEXT:    addq %r14, %rax
 ; SSE2-NEXT:    movq %rax, %xmm1
 ; SSE2-NEXT:    movq %rbx, %xmm0
 ; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -2757,23 +2757,23 @@ define <8 x bfloat> @extract_v32bf16_v8bf16(<32 x bfloat> %x) {
 ; SSE2-NEXT:    pextrw $0, %xmm1, %eax
 ; SSE2-NEXT:    pextrw $1, %xmm1, %ecx
 ; SSE2-NEXT:    shll $16, %ecx
-; SSE2-NEXT:    orl %eax, %ecx
+; SSE2-NEXT:    addl %eax, %ecx
 ; SSE2-NEXT:    pextrw $2, %xmm1, %eax
 ; SSE2-NEXT:    pextrw $3, %xmm1, %edx
 ; SSE2-NEXT:    shll $16, %edx
-; SSE2-NEXT:    orl %eax, %edx
+; SSE2-NEXT:    addl %eax, %edx
 ; SSE2-NEXT:    shlq $32, %rdx
-; SSE2-NEXT:    orq %rcx, %rdx
+; SSE2-NEXT:    addq %rcx, %rdx
 ; SSE2-NEXT:    pextrw $4, %xmm1, %eax
 ; SSE2-NEXT:    pextrw $5, %xmm1, %ecx
 ; SSE2-NEXT:    shll $16, %ecx
-; SSE2-NEXT:    orl %eax, %ecx
+; SSE2-NEXT:    addl %eax, %ecx
 ; SSE2-NEXT:    pextrw $6, %xmm1, %eax
 ; SSE2-NEXT:    pextrw $7, %xmm1, %esi
 ; SSE2-NEXT:    shll $16, %esi
-; SSE2-NEXT:    orl %eax, %esi
+; SSE2-NEXT:    addl %eax, %esi
 ; SSE2-NEXT:    shlq $32, %rsi
-; SSE2-NEXT:    orq %rcx, %rsi
+; SSE2-NEXT:    addq %rcx, %rsi
 ; SSE2-NEXT:    movq %rsi, %xmm1
 ; SSE2-NEXT:    movq %rdx, %xmm0
 ; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
diff --git a/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll b/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
index 34ef23db345755..4d5e621494aa4f 100644
--- a/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
+++ b/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
@@ -812,7 +812,7 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) {
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm4, %ecx
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm5, %eax
 ; SSE2-SSSE3-NEXT:    shll $16, %eax
-; SSE2-SSSE3-NEXT:    orl %ecx, %eax
+; SSE2-SSSE3-NEXT:    addl %ecx, %eax
 ; SSE2-SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: v32i8:
@@ -830,7 +830,7 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) {
 ; AVX1-NEXT:    vpmovmskb %xmm0, %ecx
 ; AVX1-NEXT:    vpmovmskb %xmm1, %eax
 ; AVX1-NEXT:    shll $16, %eax
-; AVX1-NEXT:    orl %ecx, %eax
+; AVX1-NEXT:    addl %ecx, %eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll b/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll
index 6acce84645e889..1a5ef5e6e32870 100644
--- a/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll
+++ b/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll
@@ -213,7 +213,7 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) {
 ; SSE-NEXT:    pmovmskb %xmm11, %ecx
 ; SSE-NEXT:    pmovmskb %xmm9, %eax
 ; SSE-NEXT:    shll $16, %eax
-; SSE-NEXT:    orl %ecx, %eax
+; SSE-NEXT:    addl %ecx, %eax
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: v32i16:
@@ -243,7 +243,7 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) {
 ; AVX1-NEXT:    vpmovmskb %xmm0, %ecx
 ; AVX1-NEXT:    vpmovmskb %xmm1, %eax
 ; AVX1-NEXT:    shll $16, %eax
-; AVX1-NEXT:    orl %ecx, %eax
+; AVX1-NEXT:    addl %ecx, %eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -280,7 +280,7 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) {
 ; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; AVX512F-NEXT:    kmovw %k0, %eax
 ; AVX512F-NEXT:    shll $16, %eax
-; AVX512F-NEXT:    orl %ecx, %eax
+; AVX512F-NEXT:    addl %ecx, %eax
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
@@ -510,13 +510,13 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) {
 ; SSE-NEXT:    pmovmskb %xmm11, %eax
 ; SSE-NEXT:    pmovmskb %xmm10, %ecx
 ; SSE-NEXT:    shll $16, %ecx
-; SSE-NEXT:    orl %eax, %ecx
+; SSE-NEXT:    addl %eax, %ecx
 ; SSE-NEXT:    pmovmskb %xmm9, %edx
 ; SSE-NEXT:    pmovmskb %xmm8, %eax
 ; SSE-NEXT:    shll $16, %eax
-; SSE-NEXT:    orl %edx, %eax
+; SSE-NEXT:    addl %edx, %eax
 ; SSE-NEXT:    shlq $32, %rax
-; SSE-NEXT:    orq %rcx, %rax
+; SSE-NEXT:    addq %rcx, %rax
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: v64i8:
@@ -544,13 +544,13 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) {
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    vpmovmskb %xmm3, %ecx
 ; AVX1-NEXT:    shll $16, %ecx
-; AVX1-NEXT:    orl %eax, %ecx
+; AVX1-NEXT:    addl %eax, %ecx
 ; AVX1-NEXT:    vpmovmskb %xmm1, %edx
 ; AVX1-NEXT:    vpmovmskb %xmm2, %eax
 ; AVX1-NEXT:    shll $16, %eax
-; AVX1-NEXT:    orl %edx, %eax
+; AVX1-NEXT:    addl %edx, %eax
 ; AVX1-NEXT:    shlq $32, %rax
-; AVX1-NEXT:    orq %rcx, %rax
+; AVX1-NEXT:    addq %rcx, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -565,7 +565,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) {
 ; AVX2-NEXT:    vpmovmskb %ymm0, %ecx
 ; AVX2-NEXT:    vpmovmskb %ymm1, %eax
 ; AVX2-NEXT:    shlq $32, %rax
-; AVX2-NEXT:    orq %rcx, %rax
+; AVX2-NEXT:    addq %rcx, %rax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -584,7 +584,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) {
 ; AVX512F-NEXT:    vpmovmskb %ymm0, %ecx
 ; AVX512F-NEXT:    vpmovmskb %ymm1, %eax
 ; AVX512F-NEXT:    shlq $32, %rax
-; AVX512F-NEXT:    orq %rcx, %rax
+; AVX512F-NEXT:    addq %rcx, %rax
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/bitcast-setcc-128.ll b/llvm/test/CodeGen/X86/bitcast-setcc-128.ll
index f21c3f7043e69b..7085b136c3ae27 100644
--- a/llvm/test/CodeGen/X86/bitcast-setcc-128.ll
+++ b/llvm/test/CodeGen/X86/bitcast-setcc-128.ll
@@ -498,7 +498,7 @@ define i64 @v16i8_widened_with_ones(<16 x i8> %a, <16 x i8> %b) {
 ; SSE2-SSSE3:       # %bb.0: # %entry
 ; SSE2-SSSE3-NEXT:    pcmpeqb %xmm1, %xmm0
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm0, %ecx
-; SSE2-SSSE3-NEXT:    orl $-65536, %ecx # imm = 0xFFFF0000
+; SSE2-SSSE3-NEXT:    addl $-65536, %ecx # imm = 0xFFFF0000
 ; SSE2-SSSE3-NEXT:    movabsq $-4294967296, %rax # imm = 0xFFFFFFFF00000000
 ; SSE2-SSSE3-NEXT:    orq %rcx, %rax
 ; SSE2-SSSE3-NEXT:    retq
@@ -507,7 +507,7 @@ define i64 @v16i8_widened_with_ones(<16 x i8> %a, <16 x i8> %b) {
 ; AVX1:       # %bb.0: # %entry
 ; AVX1-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %ecx
-; AVX1-NEXT:    orl $-65536, %ecx # imm = 0xFFFF0000
+; AVX1-NEXT:    addl $-65536, %ecx # imm = 0xFFFF0000
 ; AVX1-NEXT:    movabsq $-4294967296, %rax # imm = 0xFFFFFFFF00000000
 ; AVX1-NEXT:    orq %rcx, %rax
 ; AVX1-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/bitcast-setcc-256.ll b/llvm/test/CodeGen/X86/bitcast-setcc-256.ll
index 656f1e031a7cbe..dfea5dcc6a25ba 100644
--- a/llvm/test/CodeGen/X86/bitcast-setcc-256.ll
+++ b/llvm/test/CodeGen/X86/bitcast-setcc-256.ll
@@ -158,7 +158,7 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b) {
 ; SSE2-SSSE3-NEXT:    pcmpgtb %xmm3, %xmm1
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm1, %eax
 ; SSE2-SSSE3-NEXT:    shll $16, %eax
-; SSE2-SSSE3-NEXT:    orl %ecx, %eax
+; SSE2-SSSE3-NEXT:    addl %ecx, %eax
 ; SSE2-SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: v32i8:
@@ -170,7 +170,7 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b) {
 ; AVX1-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    shll $16, %eax
-; AVX1-NEXT:    orl %ecx, %eax
+; AVX1-NEXT:    addl %ecx, %eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -307,7 +307,7 @@ define void @bitcast_32i8_store(ptr %p, <32 x i8> %a0) {
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm0, %eax
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm1, %ecx
 ; SSE2-SSSE3-NEXT:    shll $16, %ecx
-; SSE2-SSSE3-NEXT:    orl %eax, %ecx
+; SSE2-SSSE3-NEXT:    addl %eax, %ecx
 ; SSE2-SSSE3-NEXT:    movl %ecx, (%rdi)
 ; SSE2-SSSE3-NEXT:    retq
 ;
@@ -317,7 +317,7 @@ define void @bitcast_32i8_store(ptr %p, <32 x i8> %a0) {
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %ecx
 ; AVX1-NEXT:    shll $16, %ecx
-; AVX1-NEXT:    orl %eax, %ecx
+; AVX1-NEXT:    addl %eax, %ecx
 ; AVX1-NEXT:    movl %ecx, (%rdi)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/bitcast-setcc-512.ll b/llvm/test/CodeGen/X86/bitcast-setcc-512.ll
index 58bf0b607eb522..5f5716a9085332 100644
--- a/llvm/test/CodeGen/X86/bitcast-setcc-512.ll
+++ b/llvm/test/CodeGen/X86/bitcast-setcc-512.ll
@@ -17,7 +17,7 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b) {
 ; SSE-NEXT:    packsswb %xmm3, %xmm2
 ; SSE-NEXT:    pmovmskb %xmm2, %eax
 ; SSE-NEXT:    shll $16, %eax
-; SSE-NEXT:    orl %ecx, %eax
+; SSE-NEXT:    addl %ecx, %eax
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: v32i16:
@@ -35,7 +35,7 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b) {
 ; AVX1-NEXT:    vpacksswb %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    shll $16, %eax
-; AVX1-NEXT:    orl %ecx, %eax
+; AVX1-NEXT:    addl %ecx, %eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -62,7 +62,7 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b) {
 ; AVX512F-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; AVX512F-NEXT:    kmovw %k0, %eax
 ; AVX512F-NEXT:    shll $16, %eax
-; AVX512F-NEXT:    orl %ecx, %eax
+; AVX512F-NEXT:    addl %ecx, %eax
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
@@ -211,15 +211,15 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) {
 ; SSE-NEXT:    pcmpgtb %xmm5, %xmm1
 ; SSE-NEXT:    pmovmskb %xmm1, %ecx
 ; SSE-NEXT:    shll $16, %ecx
-; SSE-NEXT:    orl %eax, %ecx
+; SSE-NEXT:    addl %eax, %ecx
 ; SSE-NEXT:    pcmpgtb %xmm6, %xmm2
 ; SSE-NEXT:    pmovmskb %xmm2, %edx
 ; SSE-NEXT:    pcmpgtb %xmm7, %xmm3
 ; SSE-NEXT:    pmovmskb %xmm3, %eax
 ; SSE-NEXT:    shll $16, %eax
-; SSE-NEXT:    orl %edx, %eax
+; SSE-NEXT:    addl %edx, %eax
 ; SSE-NEXT:    shlq $32, %rax
-; SSE-NEXT:    orq %rcx, %rax
+; SSE-NEXT:    addq %rcx, %rax
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: v64i8:
@@ -231,7 +231,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) {
 ; AVX1-NEXT:    vpcmpgtb %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %ecx
 ; AVX1-NEXT:    shll $16, %ecx
-; AVX1-NEXT:    orl %eax, %ecx
+; AVX1-NEXT:    addl %eax, %ecx
 ; AVX1-NEXT:    vpcmpgtb %xmm3, %xmm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %edx
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm0
@@ -239,9 +239,9 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) {
 ; AVX1-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    shll $16, %eax
-; AVX1-NEXT:    orl %edx, %eax
+; AVX1-NEXT:    addl %edx, %eax
 ; AVX1-NEXT:    shlq $32, %rax
-; AVX1-NEXT:    orq %rcx, %rax
+; AVX1-NEXT:    addq %rcx, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -252,7 +252,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) {
 ; AVX2-NEXT:    vpcmpgtb %ymm3, %ymm1, %ymm0
 ; AVX2-NEXT:    vpmovmskb %ymm0, %eax
 ; AVX2-NEXT:    shlq $32, %rax
-; AVX2-NEXT:    orq %rcx, %rax
+; AVX2-NEXT:    addq %rcx, %rax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
@@ -265,7 +265,7 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) {
 ; AVX512F-NEXT:    vpmovmskb %ymm0, %ecx
 ; AVX512F-NEXT:    vpmovmskb %ymm2, %eax
 ; AVX512F-NEXT:    shlq $32, %rax
-; AVX512F-NEXT:    orq %rcx, %rax
+; AVX512F-NEXT:    addq %rcx, %rax
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
@@ -410,13 +410,13 @@ define void @bitcast_64i8_store(ptr %p, <64 x i8> %a0) {
 ; SSE-NEXT:    pmovmskb %xmm0, %eax
 ; SSE-NEXT:    pmovmskb %xmm1, %ecx
 ; SSE-NEXT:    shll $16, %ecx
-; SSE-NEXT:    orl %eax, %ecx
+; SSE-NEXT:    addl %eax, %ecx
 ; SSE-NEXT:    pmovmskb %xmm2, %eax
 ; SSE-NEXT:    pmovmskb %xmm3, %edx
 ; SSE-NEXT:    shll $16, %edx
-; SSE-NEXT:    orl %eax, %edx
+; SSE-NEXT:    addl %eax, %edx
 ; SSE-NEXT:    shlq $32, %rdx
-; SSE-NEXT:    orq %rcx, %rdx
+; SSE-NEXT:    addq %rcx, %rdx
 ; SSE-NEXT:    movq %rdx, (%rdi)
 ; SSE-NEXT:    retq
 ;
@@ -426,14 +426,14 @@ define void @bitcast_64i8_store(ptr %p, <64 x i8> %a0) {
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %ecx
 ; AVX1-NEXT:    shll $16, %ecx
-; AVX1-NEXT:    orl %eax, %ecx
+; AVX1-NEXT:    addl %eax, %ecx
 ; AVX1-NEXT:    vpmovmskb %xmm1, %eax
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %edx
 ; AVX1-NEXT:    shll $16, %edx
-; AVX1-NEXT:    orl %eax, %edx
+; AVX1-NEXT:    addl %eax, %edx
 ; AVX1-NEXT:    shlq $32, %rdx
-; AVX1-NEXT:    orq %rcx, %rdx
+; AVX1-NEXT:    addq %rcx, %rdx
 ; AVX1-NEXT:    movq %rdx, (%rdi)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -443,7 +443,7 @@ define void @bitcast_64i8_store(ptr %p, <64 x i8> %a0) {
 ; AVX2-NEXT:    vpmovmskb %ymm1, %eax
 ; AVX2-NEXT:    shlq $32, %rax
 ; AVX2-NEXT:    vpmovmskb %ymm0, %ecx
-; AVX2-NEXT:    orq %rax, %rcx
+; AVX2-NEXT:    addq %rax, %rcx
 ; AVX2-NEXT:    movq %rcx, (%rdi)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -491,7 +491,7 @@ define void @bitcast_32i16_store(ptr %p, <32 x i16> %a0) {
 ; SSE-NEXT:    packsswb %xmm3, %xmm2
 ; SSE-NEXT:    pmovmskb %xmm2, %ecx
 ; SSE-NEXT:    shll $16, %ecx
-; SSE-NEXT:    orl %eax, %ecx
+; SSE-NEXT:    addl %eax, %ecx
 ; SSE-NEXT:    movl %ecx, (%rdi)
 ; SSE-NEXT:    retq
 ;
@@ -504,7 +504,7 @@ define void @bitcast_32i16_store(ptr %p, <32 x i16> %a0) {
 ; AVX1-NEXT:    vpacksswb %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %ecx
 ; AVX1-NEXT:    shll $16, %ecx
-; AVX1-NEXT:    orl %eax, %ecx
+; AVX1-NEXT:    addl %eax, %ecx
 ; AVX1-NEXT:    movl %ecx, (%rdi)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
index 501e73c46af9cf..ba96ea16f439a5 100644
--- a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
+++ b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
@@ -985,13 +985,13 @@ define i32 @bitcast_v64i8_to_v2i32(<64 x i8> %a0) nounwind {
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm0, %eax
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm1, %ecx
 ; SSE2-SSSE3-NEXT:    shll $16, %ecx
-; SSE2-SSSE3-NEXT:    orl %eax, %ecx
+; SSE2-SSSE3-NEXT:    addl %eax, %ecx
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm2, %eax
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm3, %edx
 ; SSE2-SSSE3-NEXT:    shll $16, %edx
-; SSE2-SSSE3-NEXT:    orl %eax, %edx
+; SSE2-SSSE3-NEXT:    addl %eax, %edx
 ; SSE2-SSSE3-NEXT:    shlq $32, %rdx
-; SSE2-SSSE3-NEXT:    orq %rcx, %rdx
+; SSE2-SSSE3-NEXT:    addq %rcx, %rdx
 ; SSE2-SSSE3-NEXT:    movq %rdx, %xmm0
 ; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
 ; SSE2-SSSE3-NEXT:    movd %xmm0, %eax
@@ -1003,11 +1003,11 @@ define i32 @bitcast_v64i8_to_v2i32(<64 x i8> %a0) nounwind {
 ; SSE41-NEXT:    pmovmskb %xmm2, %eax
 ; SSE41-NEXT:    pmovmskb %xmm3, %ecx
 ; SSE41-NEXT:    shll $16, %ecx
-; SSE41-NEXT:    orl %eax, %ecx
+; SSE41-NEXT:    addl %eax, %ecx
 ; SSE41-NEXT:    pmovmskb %xmm0, %edx
 ; SSE41-NEXT:    pmovmskb %xmm1, %eax
 ; SSE41-NEXT:    shll $16, %eax
-; SSE41-NEXT:    orl %edx, %eax
+; SSE41-NEXT:    addl %edx, %eax
 ; SSE41-NEXT:    addl %ecx, %eax
 ; SSE41-NEXT:    retq
 ;
@@ -1017,12 +1017,12 @@ define i32 @bitcast_v64i8_to_v2i32(<64 x i8> %a0) nounwind {
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
 ; AVX1-NEXT:    vpmovmskb %xmm1, %ecx
 ; AVX1-NEXT:    shll $16, %ecx
-; AVX1-NEXT:    orl %eax, %ecx
+; AVX1-NEXT:    addl %eax, %ecx
 ; AVX1-NEXT:    vpmovmskb %xmm0, %edx
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    shll $16, %eax
-; AVX1-NEXT:    orl %edx, %eax
+; AVX1-NEXT:    addl %edx, %eax
 ; AVX1-NEXT:    addl %ecx, %eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1109,23 +1109,23 @@ define i64 @bitcast_v128i8_to_v2i64(<128 x i8> %a0) nounwind {
 ; SSE-NEXT:    pmovmskb %xmm4, %eax
 ; SSE-NEXT:    pmovmskb %xmm5, %ecx
 ; SSE-NEXT:    shll $16, %ecx
-; SSE-NEXT:    orl %eax, %ecx
+; SSE-NEXT:    addl %eax, %ecx
 ; SSE-NEXT:    pmovmskb %xmm6, %eax
 ; SSE-NEXT:    pmovmskb %xmm7, %edx
 ; SSE-NEXT:    shll $16, %edx
-; SSE-NEXT:    orl %eax, %edx
+; SSE-NEXT:    addl %eax, %edx
 ; SSE-NEXT:    shlq $32, %rdx
-; SSE-NEXT:    orq %rcx, %rdx
+; SSE-NEXT:    addq %rcx, %rdx
 ; SSE-NEXT:    pmovmskb %xmm0, %eax
 ; SSE-NEXT:    pmovmskb %xmm1, %ecx
 ; SSE-NEXT:    shll $16, %ecx
-; SSE-NEXT:    orl %eax, %ecx
+; SSE-NEXT:    addl %eax, %ecx
 ; SSE-NEXT:    pmovmskb %xmm2, %esi
 ; SSE-NEXT:    pmovmskb %xmm3, %eax
 ; SSE-NEXT:    shll $16, %eax
-; SSE-NEXT:    orl %esi, %eax
+; SSE-NEXT:    addl %esi, %eax
 ; SSE-NEXT:    shlq $32, %rax
-; SSE-NEXT:    orq %rcx, %rax
+; SSE-NEXT:    addq %rcx, %rax
 ; SSE-NEXT:    addq %rdx, %rax
 ; SSE-NEXT:    retq
 ;
@@ -1135,26 +1135,26 @@ define i64 @bitcast_v128i8_to_v2i64(<128 x i8> %a0) nounwind {
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
 ; AVX1-NEXT:    vpmovmskb %xmm2, %edx
 ; AVX1-NEXT:    shll $16, %edx
-; AVX1-NEXT:    orl %eax, %edx
+; AVX1-NEXT:    addl %eax, %edx
 ; AVX1-NEXT:    vpmovmskb %xmm3, %eax
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
 ; AVX1-NEXT:    vpmovmskb %xmm2, %ecx
 ; AVX1-NEXT:    shll $16, %ecx
-; AVX1-NEXT:    orl %eax, %ecx
+; AVX1-NEXT:    addl %eax, %ecx
 ; AVX1-NEXT:    shlq $32, %rcx
-; AVX1-NEXT:    orq %rdx, %rcx
+; AVX1-NEXT:    addq %rdx, %rcx
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %edx
 ; AVX1-NEXT:    shll $16, %edx
-; AVX1-NEXT:    orl %eax, %edx
+; AVX1-NEXT:    addl %eax, %edx
 ; AVX1-NEXT:    vpmovmskb %xmm1, %esi
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    shll $16, %eax
-; AVX1-NEXT:    orl %esi, %eax
+; AVX1-NEXT:    addl %esi, %eax
 ; AVX1-NEXT:    shlq $32, %rax
-; AVX1-NEXT:    orq %rdx, %rax
+; AVX1-NEXT:    addq %rdx, %rax
 ; AVX1-NEXT:    addq %rcx, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1164,11 +1164,11 @@ define i64 @bitcast_v128i8_to_v2i64(<128 x i8> %a0) nounwind {
 ; AVX2-NEXT:    vpmovmskb %ymm3, %eax
 ; AVX2-NEXT:    shlq $32, %rax
 ; AVX2-NEXT:    vpmovmskb %ymm2, %ecx
-; AVX2-NEXT:    orq %rax, %rcx
+; AVX2-NEXT:    addq %rax, %rcx
 ; AVX2-NEXT:    vpmovmskb %ymm1, %edx
 ; AVX2-NEXT:    shlq $32, %rdx
 ; AVX2-NEXT:    vpmovmskb %ymm0, %eax
-; AVX2-NEXT:    orq %rdx, %rax
+; AVX2-NEXT:    addq %rdx, %rax
 ; AVX2-NEXT:    addq %rcx, %rax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1198,29 +1198,29 @@ define i1 @trunc_v128i8_cmp(<128 x i8> %a0) nounwind {
 ; SSE2-SSSE3-NEXT:    psllw $7, %xmm1
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm1, %ecx
 ; SSE2-SSSE3-NEXT:    shll $16, %ecx
-; SSE2-SSSE3-NEXT:    orl %eax, %ecx
+; SSE2-SSSE3-NEXT:    addl %eax, %ecx
 ; SSE2-SSSE3-NEXT:    psllw $7, %xmm2
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm2, %edx
 ; SSE2-SSSE3-NEXT:    psllw $7, %xmm3
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm3, %eax
 ; SSE2-SSSE3-NEXT:    shll $16, %eax
-; SSE2-SSSE3-NEXT:    orl %edx, %eax
+; SSE2-SSSE3-NEXT:    addl %edx, %eax
 ; SSE2-SSSE3-NEXT:    shlq $32, %rax
-; SSE2-SSSE3-NEXT:    orq %rcx, %rax
+; SSE2-SSSE3-NEXT:    addq %rcx, %rax
 ; SSE2-SSSE3-NEXT:    psllw $7, %xmm4
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm4, %ecx
 ; SSE2-SSSE3-NEXT:    psllw $7, %xmm5
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm5, %edx
 ; SSE2-SSSE3-NEXT:    shll $16, %edx
-; SSE2-SSSE3-NEXT:    orl %ecx, %edx
+; SSE2-SSSE3-NEXT:    addl %ecx, %edx
 ; SSE2-SSSE3-NEXT:    psllw $7, %xmm6
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm6, %ecx
 ; SSE2-SSSE3-NEXT:    psllw $7, %xmm7
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm7, %esi
 ; SSE2-SSSE3-NEXT:    shll $16, %esi
-; SSE2-SSSE3-NEXT:    orl %ecx, %esi
+; SSE2-SSSE3-NEXT:    addl %ecx, %esi
 ; SSE2-SSSE3-NEXT:    shlq $32, %rsi
-; SSE2-SSSE3-NEXT:    orq %rdx, %rsi
+; SSE2-SSSE3-NEXT:    addq %rdx, %rsi
 ; SSE2-SSSE3-NEXT:    movq %rsi, %xmm0
 ; SSE2-SSSE3-NEXT:    movq %rax, %xmm1
 ; SSE2-SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
@@ -1238,29 +1238,29 @@ define i1 @trunc_v128i8_cmp(<128 x i8> %a0) nounwind {
 ; SSE41-NEXT:    psllw $7, %xmm1
 ; SSE41-NEXT:    pmovmskb %xmm1, %ecx
 ; SSE41-NEXT:    shll $16, %ecx
-; SSE41-NEXT:    orl %eax, %ecx
+; SSE41-NEXT:    addl %eax, %ecx
 ; SSE41-NEXT:    psllw $7, %xmm2
 ; SSE41-NEXT:    pmovmskb %xmm2, %edx
 ; SSE41-NEXT:    psllw $7, %xmm3
 ; SSE41-NEXT:    pmovmskb %xmm3, %eax
 ; SSE41-NEXT:    shll $16, %eax
-; SSE41-NEXT:    orl %edx, %eax
+; SSE41-NEXT:    addl %edx, %eax
 ; SSE41-NEXT:    shlq $32, %rax
-; SSE41-NEXT:    orq %rcx, %rax
+; SSE41-NEXT:    addq %rcx, %rax
 ; SSE41-NEXT:    psllw $7, %xmm4
 ; SSE41-NEXT:    pmovmskb %xmm4, %ecx
 ; SSE41-NEXT:    psllw $7, %xmm5
 ; SSE41-NEXT:    pmovmskb %xmm5, %edx
 ; SSE41-NEXT:    shll $16, %edx
-; SSE41-NEXT:    orl %ecx, %edx
+; SSE41-NEXT:    addl %ecx, %edx
 ; SSE41-NEXT:    psllw $7, %xmm6
 ; SSE41-NEXT:    pmovmskb %xmm6, %ecx
 ; SSE41-NEXT:    psllw $7, %xmm7
 ; SSE41-NEXT:    pmovmskb %xmm7, %esi
 ; SSE41-NEXT:    shll $16, %esi
-; SSE41-NEXT:    orl %ecx, %esi
+; SSE41-NEXT:    addl %ecx, %esi
 ; SSE41-NEXT:    shlq $32, %rsi
-; SSE41-NEXT:    orq %rdx, %rsi
+; SSE41-NEXT:    addq %rdx, %rsi
 ; SSE41-NEXT:    movq %rsi, %xmm0
 ; SSE41-NEXT:    movq %rax, %xmm1
 ; SSE41-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
@@ -1277,32 +1277,32 @@ define i1 @trunc_v128i8_cmp(<128 x i8> %a0) nounwind {
 ; AVX1-NEXT:    vpsllw $7, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %ecx
 ; AVX1-NEXT:    shll $16, %ecx
-; AVX1-NEXT:    orl %eax, %ecx
+; AVX1-NEXT:    addl %eax, %ecx
 ; AVX1-NEXT:    vpsllw $7, %xmm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %edx
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm0
 ; AVX1-NEXT:    vpsllw $7, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    shll $16, %eax
-; AVX1-NEXT:    orl %edx, %eax
+; AVX1-NEXT:    addl %edx, %eax
 ; AVX1-NEXT:    shlq $32, %rax
-; AVX1-NEXT:    orq %rcx, %rax
+; AVX1-NEXT:    addq %rcx, %rax
 ; AVX1-NEXT:    vpsllw $7, %xmm2, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %ecx
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm0
 ; AVX1-NEXT:    vpsllw $7, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %edx
 ; AVX1-NEXT:    shll $16, %edx
-; AVX1-NEXT:    orl %ecx, %edx
+; AVX1-NEXT:    addl %ecx, %edx
 ; AVX1-NEXT:    vpsllw $7, %xmm3, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %ecx
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm0
 ; AVX1-NEXT:    vpsllw $7, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %esi
 ; AVX1-NEXT:    shll $16, %esi
-; AVX1-NEXT:    orl %ecx, %esi
+; AVX1-NEXT:    addl %ecx, %esi
 ; AVX1-NEXT:    shlq $32, %rsi
-; AVX1-NEXT:    orq %rdx, %rsi
+; AVX1-NEXT:    addq %rdx, %rsi
 ; AVX1-NEXT:    vmovq %rsi, %xmm0
 ; AVX1-NEXT:    vmovq %rax, %xmm1
 ; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
@@ -1319,13 +1319,13 @@ define i1 @trunc_v128i8_cmp(<128 x i8> %a0) nounwind {
 ; AVX2-NEXT:    shlq $32, %rax
 ; AVX2-NEXT:    vpsllw $7, %ymm0, %ymm0
 ; AVX2-NEXT:    vpmovmskb %ymm0, %ecx
-; AVX2-NEXT:    orq %rax, %rcx
+; AVX2-NEXT:    addq %rax, %rcx
 ; AVX2-NEXT:    vpsllw $7, %ymm3, %ymm0
 ; AVX2-NEXT:    vpmovmskb %ymm0, %eax
 ; AVX2-NEXT:    shlq $32, %rax
 ; AVX2-NEXT:    vpsllw $7, %ymm2, %ymm0
 ; AVX2-NEXT:    vpmovmskb %ymm0, %edx
-; AVX2-NEXT:    orq %rax, %rdx
+; AVX2-NEXT:    addq %rax, %rdx
 ; AVX2-NEXT:    vmovq %rdx, %xmm0
 ; AVX2-NEXT:    vmovq %rcx, %xmm1
 ; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
diff --git a/llvm/test/CodeGen/X86/bitreverse.ll b/llvm/test/CodeGen/X86/bitreverse.ll
index 26b1d64874e590..416b8876b8f06e 100644
--- a/llvm/test/CodeGen/X86/bitreverse.ll
+++ b/llvm/test/CodeGen/X86/bitreverse.ll
@@ -20,7 +20,7 @@ define <2 x i16> @test_bitreverse_v2i16(<2 x i16> %a) nounwind {
 ; X86-NEXT:    shll $4, %edx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $3855, %eax # imm = 0xF0F
-; X86-NEXT:    orl %edx, %eax
+; X86-NEXT:    addl %edx, %eax
 ; X86-NEXT:    movl %eax, %edx
 ; X86-NEXT:    andl $13107, %edx # imm = 0x3333
 ; X86-NEXT:    shrl $2, %eax
@@ -37,7 +37,7 @@ define <2 x i16> @test_bitreverse_v2i16(<2 x i16> %a) nounwind {
 ; X86-NEXT:    shll $4, %edx
 ; X86-NEXT:    shrl $4, %ecx
 ; X86-NEXT:    andl $3855, %ecx # imm = 0xF0F
-; X86-NEXT:    orl %edx, %ecx
+; X86-NEXT:    addl %edx, %ecx
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    andl $13107, %edx # imm = 0x3333
 ; X86-NEXT:    shrl $2, %ecx
@@ -108,7 +108,7 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
 ; X86-NEXT:    shll $4, %edx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %edx, %eax
+; X86-NEXT:    addl %edx, %eax
 ; X86-NEXT:    movl %eax, %edx
 ; X86-NEXT:    andl $858993459, %edx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -125,7 +125,7 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
 ; X86-NEXT:    shll $4, %edx
 ; X86-NEXT:    shrl $4, %ecx
 ; X86-NEXT:    andl $252645135, %ecx # imm = 0xF0F0F0F
-; X86-NEXT:    orl %edx, %ecx
+; X86-NEXT:    addl %edx, %ecx
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    andl $858993459, %edx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %ecx
@@ -147,7 +147,7 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
 ; X64-NEXT:    andq %rcx, %rax
 ; X64-NEXT:    andq %rcx, %rdi
 ; X64-NEXT:    shlq $4, %rdi
-; X64-NEXT:    orq %rax, %rdi
+; X64-NEXT:    addq %rax, %rdi
 ; X64-NEXT:    movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
 ; X64-NEXT:    movq %rdi, %rcx
 ; X64-NEXT:    andq %rax, %rcx
@@ -179,7 +179,7 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
 ; GFNI-NEXT:    andq %rcx, %rax
 ; GFNI-NEXT:    andq %rcx, %rdi
 ; GFNI-NEXT:    shlq $4, %rdi
-; GFNI-NEXT:    orq %rax, %rdi
+; GFNI-NEXT:    addq %rax, %rdi
 ; GFNI-NEXT:    movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
 ; GFNI-NEXT:    movq %rdi, %rcx
 ; GFNI-NEXT:    andq %rax, %rcx
@@ -209,7 +209,7 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -231,7 +231,7 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind {
 ; X64-NEXT:    shll $4, %eax
 ; X64-NEXT:    shrl $4, %edi
 ; X64-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; X64-NEXT:    orl %eax, %edi
+; X64-NEXT:    addl %eax, %edi
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    andl $858993459, %eax # imm = 0x33333333
 ; X64-NEXT:    shrl $2, %edi
@@ -260,7 +260,7 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind {
 ; GFNI-NEXT:    shll $4, %eax
 ; GFNI-NEXT:    shrl $4, %edi
 ; GFNI-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; GFNI-NEXT:    orl %eax, %edi
+; GFNI-NEXT:    addl %eax, %edi
 ; GFNI-NEXT:    movl %edi, %eax
 ; GFNI-NEXT:    andl $858993459, %eax # imm = 0x33333333
 ; GFNI-NEXT:    shrl $2, %edi
@@ -288,7 +288,7 @@ define i24 @test_bitreverse_i24(i24 %a) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -311,7 +311,7 @@ define i24 @test_bitreverse_i24(i24 %a) nounwind {
 ; X64-NEXT:    shll $4, %eax
 ; X64-NEXT:    shrl $4, %edi
 ; X64-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; X64-NEXT:    orl %eax, %edi
+; X64-NEXT:    addl %eax, %edi
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    andl $858993459, %eax # imm = 0x33333333
 ; X64-NEXT:    shrl $2, %edi
@@ -342,7 +342,7 @@ define i24 @test_bitreverse_i24(i24 %a) nounwind {
 ; GFNI-NEXT:    shll $4, %eax
 ; GFNI-NEXT:    shrl $4, %edi
 ; GFNI-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; GFNI-NEXT:    orl %eax, %edi
+; GFNI-NEXT:    addl %eax, %edi
 ; GFNI-NEXT:    movl %edi, %eax
 ; GFNI-NEXT:    andl $858993459, %eax # imm = 0x33333333
 ; GFNI-NEXT:    shrl $2, %edi
@@ -371,7 +371,7 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $3855, %eax # imm = 0xF0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $13107, %ecx # imm = 0x3333
 ; X86-NEXT:    shrl $2, %eax
@@ -394,7 +394,7 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
 ; X64-NEXT:    shll $4, %eax
 ; X64-NEXT:    shrl $4, %edi
 ; X64-NEXT:    andl $3855, %edi # imm = 0xF0F
-; X64-NEXT:    orl %eax, %edi
+; X64-NEXT:    addl %eax, %edi
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    andl $13107, %eax # imm = 0x3333
 ; X64-NEXT:    shrl $2, %edi
@@ -425,7 +425,7 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
 ; GFNI-NEXT:    shll $4, %eax
 ; GFNI-NEXT:    shrl $4, %edi
 ; GFNI-NEXT:    andl $3855, %edi # imm = 0xF0F
-; GFNI-NEXT:    orl %eax, %edi
+; GFNI-NEXT:    addl %eax, %edi
 ; GFNI-NEXT:    movl %edi, %eax
 ; GFNI-NEXT:    andl $13107, %eax # imm = 0x3333
 ; GFNI-NEXT:    shrl $2, %edi
@@ -454,13 +454,13 @@ define i8 @test_bitreverse_i8(i8 %a) {
 ; X86-NEXT:    shlb $2, %cl
 ; X86-NEXT:    shrb $2, %al
 ; X86-NEXT:    andb $51, %al
-; X86-NEXT:    orb %cl, %al
+; X86-NEXT:    addb %cl, %al
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andb $85, %cl
 ; X86-NEXT:    addb %cl, %cl
 ; X86-NEXT:    shrb %al
 ; X86-NEXT:    andb $85, %al
-; X86-NEXT:    orb %cl, %al
+; X86-NEXT:    addb %cl, %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_bitreverse_i8:
@@ -471,13 +471,13 @@ define i8 @test_bitreverse_i8(i8 %a) {
 ; X64-NEXT:    shlb $2, %al
 ; X64-NEXT:    shrb $2, %dil
 ; X64-NEXT:    andb $51, %dil
-; X64-NEXT:    orb %dil, %al
+; X64-NEXT:    addb %dil, %al
 ; X64-NEXT:    movl %eax, %ecx
 ; X64-NEXT:    andb $85, %cl
 ; X64-NEXT:    addb %cl, %cl
 ; X64-NEXT:    shrb %al
 ; X64-NEXT:    andb $85, %al
-; X64-NEXT:    orb %cl, %al
+; X64-NEXT:    addb %cl, %al
 ; X64-NEXT:    retq
 ;
 ; X86XOP-LABEL: test_bitreverse_i8:
@@ -496,13 +496,13 @@ define i8 @test_bitreverse_i8(i8 %a) {
 ; GFNI-NEXT:    shlb $2, %al
 ; GFNI-NEXT:    shrb $2, %dil
 ; GFNI-NEXT:    andb $51, %dil
-; GFNI-NEXT:    orb %dil, %al
+; GFNI-NEXT:    addb %dil, %al
 ; GFNI-NEXT:    movl %eax, %ecx
 ; GFNI-NEXT:    andb $85, %cl
 ; GFNI-NEXT:    addb %cl, %cl
 ; GFNI-NEXT:    shrb %al
 ; GFNI-NEXT:    andb $85, %al
-; GFNI-NEXT:    orb %cl, %al
+; GFNI-NEXT:    addb %cl, %al
 ; GFNI-NEXT:    retq
   %b = call i8 @llvm.bitreverse.i8(i8 %a)
   ret i8 %b
@@ -522,12 +522,12 @@ define i4 @test_bitreverse_i4(i4 %a) {
 ; X86-NEXT:    movb %cl, %ah
 ; X86-NEXT:    shlb $3, %ah
 ; X86-NEXT:    andb $8, %ah
-; X86-NEXT:    orb %dl, %ah
+; X86-NEXT:    addb %dl, %ah
 ; X86-NEXT:    shrb %cl
 ; X86-NEXT:    andb $2, %cl
-; X86-NEXT:    orb %ah, %cl
 ; X86-NEXT:    shrb $3, %al
-; X86-NEXT:    orb %cl, %al
+; X86-NEXT:    addb %cl, %al
+; X86-NEXT:    addb %ah, %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_bitreverse_i4:
@@ -539,12 +539,12 @@ define i4 @test_bitreverse_i4(i4 %a) {
 ; X64-NEXT:    andb $4, %cl
 ; X64-NEXT:    leal (,%rdi,8), %edx
 ; X64-NEXT:    andb $8, %dl
-; X64-NEXT:    orb %cl, %dl
+; X64-NEXT:    addb %cl, %dl
 ; X64-NEXT:    shrb %dil
 ; X64-NEXT:    andb $2, %dil
-; X64-NEXT:    orb %dil, %dl
 ; X64-NEXT:    shrb $3, %al
-; X64-NEXT:    orb %dl, %al
+; X64-NEXT:    addb %dil, %al
+; X64-NEXT:    addb %dl, %al
 ; X64-NEXT:    retq
 ;
 ; X86XOP-LABEL: test_bitreverse_i4:
@@ -565,12 +565,12 @@ define i4 @test_bitreverse_i4(i4 %a) {
 ; GFNI-NEXT:    andb $4, %cl
 ; GFNI-NEXT:    leal (,%rdi,8), %edx
 ; GFNI-NEXT:    andb $8, %dl
-; GFNI-NEXT:    orb %cl, %dl
+; GFNI-NEXT:    addb %cl, %dl
 ; GFNI-NEXT:    shrb %dil
 ; GFNI-NEXT:    andb $2, %dil
-; GFNI-NEXT:    orb %dil, %dl
 ; GFNI-NEXT:    shrb $3, %al
-; GFNI-NEXT:    orb %dl, %al
+; GFNI-NEXT:    addb %dil, %al
+; GFNI-NEXT:    addb %dl, %al
 ; GFNI-NEXT:    retq
   %b = call i4 @llvm.bitreverse.i4(i4 %a)
   ret i4 %b
@@ -723,7 +723,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X86-NEXT:    shll $4, %ebp
 ; X86-NEXT:    shrl $4, %ebx
 ; X86-NEXT:    andl $252645135, %ebx # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ebp, %ebx
+; X86-NEXT:    addl %ebp, %ebx
 ; X86-NEXT:    movl %ebx, %ebp
 ; X86-NEXT:    andl $858993459, %ebp # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %ebx
@@ -740,7 +740,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X86-NEXT:    shll $4, %ebx
 ; X86-NEXT:    shrl $4, %edi
 ; X86-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ebx, %edi
+; X86-NEXT:    addl %ebx, %edi
 ; X86-NEXT:    movl %edi, %ebx
 ; X86-NEXT:    andl $858993459, %ebx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %edi
@@ -758,7 +758,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X86-NEXT:    shll $4, %edi
 ; X86-NEXT:    shrl $4, %esi
 ; X86-NEXT:    andl $252645135, %esi # imm = 0xF0F0F0F
-; X86-NEXT:    orl %edi, %esi
+; X86-NEXT:    addl %edi, %esi
 ; X86-NEXT:    movl %esi, %edi
 ; X86-NEXT:    andl $858993459, %edi # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %esi
@@ -775,7 +775,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X86-NEXT:    shll $4, %esi
 ; X86-NEXT:    shrl $4, %edx
 ; X86-NEXT:    andl $252645135, %edx # imm = 0xF0F0F0F
-; X86-NEXT:    orl %esi, %edx
+; X86-NEXT:    addl %esi, %edx
 ; X86-NEXT:    movl %edx, %esi
 ; X86-NEXT:    andl $858993459, %esi # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %edx
@@ -793,7 +793,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X86-NEXT:    shll $4, %edx
 ; X86-NEXT:    shrl $4, %ecx
 ; X86-NEXT:    andl $252645135, %ecx # imm = 0xF0F0F0F
-; X86-NEXT:    orl %edx, %ecx
+; X86-NEXT:    addl %edx, %ecx
 ; X86-NEXT:    movl %ecx, %edx
 ; X86-NEXT:    andl $858993459, %edx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %ecx
@@ -811,7 +811,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -830,7 +830,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -849,7 +849,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -868,7 +868,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -887,7 +887,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -905,7 +905,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -924,7 +924,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -943,7 +943,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -962,7 +962,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -981,7 +981,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -1000,7 +1000,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -1019,7 +1019,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -1128,7 +1128,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X64-NEXT:    andq %r11, %r10
 ; X64-NEXT:    andq %r11, %rdi
 ; X64-NEXT:    shlq $4, %rdi
-; X64-NEXT:    orq %r10, %rdi
+; X64-NEXT:    addq %r10, %rdi
 ; X64-NEXT:    movabsq $3689348814741910323, %r10 # imm = 0x3333333333333333
 ; X64-NEXT:    movq %rdi, %r14
 ; X64-NEXT:    andq %r10, %r14
@@ -1147,7 +1147,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X64-NEXT:    andq %r11, %r14
 ; X64-NEXT:    andq %r11, %rbx
 ; X64-NEXT:    shlq $4, %rbx
-; X64-NEXT:    orq %r14, %rbx
+; X64-NEXT:    addq %r14, %rbx
 ; X64-NEXT:    movq %rbx, %r14
 ; X64-NEXT:    andq %r10, %r14
 ; X64-NEXT:    shrq $2, %rbx
@@ -1166,7 +1166,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X64-NEXT:    andq %r11, %r13
 ; X64-NEXT:    andq %r11, %r15
 ; X64-NEXT:    shlq $4, %r15
-; X64-NEXT:    orq %r13, %r15
+; X64-NEXT:    addq %r13, %r15
 ; X64-NEXT:    movq %r15, %r13
 ; X64-NEXT:    andq %r10, %r13
 ; X64-NEXT:    shrq $2, %r15
@@ -1184,7 +1184,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X64-NEXT:    andq %r11, %r13
 ; X64-NEXT:    andq %r11, %r12
 ; X64-NEXT:    shlq $4, %r12
-; X64-NEXT:    orq %r13, %r12
+; X64-NEXT:    addq %r13, %r12
 ; X64-NEXT:    movq %r12, %r13
 ; X64-NEXT:    andq %r10, %r13
 ; X64-NEXT:    shrq $2, %r12
@@ -1202,7 +1202,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X64-NEXT:    andq %r11, %r13
 ; X64-NEXT:    andq %r11, %r9
 ; X64-NEXT:    shlq $4, %r9
-; X64-NEXT:    orq %r13, %r9
+; X64-NEXT:    addq %r13, %r9
 ; X64-NEXT:    movq %r9, %r13
 ; X64-NEXT:    andq %r10, %r13
 ; X64-NEXT:    shrq $2, %r9
@@ -1220,7 +1220,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X64-NEXT:    andq %r11, %r13
 ; X64-NEXT:    andq %r11, %r8
 ; X64-NEXT:    shlq $4, %r8
-; X64-NEXT:    orq %r13, %r8
+; X64-NEXT:    addq %r13, %r8
 ; X64-NEXT:    movq %r8, %r13
 ; X64-NEXT:    andq %r10, %r13
 ; X64-NEXT:    shrq $2, %r8
@@ -1238,7 +1238,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X64-NEXT:    andq %r11, %r13
 ; X64-NEXT:    andq %r11, %rcx
 ; X64-NEXT:    shlq $4, %rcx
-; X64-NEXT:    orq %r13, %rcx
+; X64-NEXT:    addq %r13, %rcx
 ; X64-NEXT:    movq %rcx, %r13
 ; X64-NEXT:    andq %r10, %r13
 ; X64-NEXT:    shrq $2, %rcx
@@ -1256,7 +1256,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X64-NEXT:    andq %r11, %r13
 ; X64-NEXT:    andq %r11, %rdx
 ; X64-NEXT:    shlq $4, %rdx
-; X64-NEXT:    orq %r13, %rdx
+; X64-NEXT:    addq %r13, %rdx
 ; X64-NEXT:    movq %rdx, %r13
 ; X64-NEXT:    andq %r10, %r13
 ; X64-NEXT:    shrq $2, %rdx
@@ -1274,7 +1274,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; X64-NEXT:    andq %r11, %r13
 ; X64-NEXT:    andq %r11, %rsi
 ; X64-NEXT:    shlq $4, %rsi
-; X64-NEXT:    orq %r13, %rsi
+; X64-NEXT:    addq %r13, %rsi
 ; X64-NEXT:    movq %rsi, %r11
 ; X64-NEXT:    andq %r10, %r11
 ; X64-NEXT:    shrq $2, %rsi
@@ -1429,7 +1429,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; GFNI-NEXT:    andq %r11, %r10
 ; GFNI-NEXT:    andq %r11, %rdi
 ; GFNI-NEXT:    shlq $4, %rdi
-; GFNI-NEXT:    orq %r10, %rdi
+; GFNI-NEXT:    addq %r10, %rdi
 ; GFNI-NEXT:    movabsq $3689348814741910323, %r10 # imm = 0x3333333333333333
 ; GFNI-NEXT:    movq %rdi, %r14
 ; GFNI-NEXT:    andq %r10, %r14
@@ -1448,7 +1448,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; GFNI-NEXT:    andq %r11, %r14
 ; GFNI-NEXT:    andq %r11, %rbx
 ; GFNI-NEXT:    shlq $4, %rbx
-; GFNI-NEXT:    orq %r14, %rbx
+; GFNI-NEXT:    addq %r14, %rbx
 ; GFNI-NEXT:    movq %rbx, %r14
 ; GFNI-NEXT:    andq %r10, %r14
 ; GFNI-NEXT:    shrq $2, %rbx
@@ -1467,7 +1467,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; GFNI-NEXT:    andq %r11, %r13
 ; GFNI-NEXT:    andq %r11, %r15
 ; GFNI-NEXT:    shlq $4, %r15
-; GFNI-NEXT:    orq %r13, %r15
+; GFNI-NEXT:    addq %r13, %r15
 ; GFNI-NEXT:    movq %r15, %r13
 ; GFNI-NEXT:    andq %r10, %r13
 ; GFNI-NEXT:    shrq $2, %r15
@@ -1485,7 +1485,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; GFNI-NEXT:    andq %r11, %r13
 ; GFNI-NEXT:    andq %r11, %r12
 ; GFNI-NEXT:    shlq $4, %r12
-; GFNI-NEXT:    orq %r13, %r12
+; GFNI-NEXT:    addq %r13, %r12
 ; GFNI-NEXT:    movq %r12, %r13
 ; GFNI-NEXT:    andq %r10, %r13
 ; GFNI-NEXT:    shrq $2, %r12
@@ -1503,7 +1503,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; GFNI-NEXT:    andq %r11, %r13
 ; GFNI-NEXT:    andq %r11, %r9
 ; GFNI-NEXT:    shlq $4, %r9
-; GFNI-NEXT:    orq %r13, %r9
+; GFNI-NEXT:    addq %r13, %r9
 ; GFNI-NEXT:    movq %r9, %r13
 ; GFNI-NEXT:    andq %r10, %r13
 ; GFNI-NEXT:    shrq $2, %r9
@@ -1521,7 +1521,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; GFNI-NEXT:    andq %r11, %r13
 ; GFNI-NEXT:    andq %r11, %r8
 ; GFNI-NEXT:    shlq $4, %r8
-; GFNI-NEXT:    orq %r13, %r8
+; GFNI-NEXT:    addq %r13, %r8
 ; GFNI-NEXT:    movq %r8, %r13
 ; GFNI-NEXT:    andq %r10, %r13
 ; GFNI-NEXT:    shrq $2, %r8
@@ -1539,7 +1539,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; GFNI-NEXT:    andq %r11, %r13
 ; GFNI-NEXT:    andq %r11, %rcx
 ; GFNI-NEXT:    shlq $4, %rcx
-; GFNI-NEXT:    orq %r13, %rcx
+; GFNI-NEXT:    addq %r13, %rcx
 ; GFNI-NEXT:    movq %rcx, %r13
 ; GFNI-NEXT:    andq %r10, %r13
 ; GFNI-NEXT:    shrq $2, %rcx
@@ -1557,7 +1557,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; GFNI-NEXT:    andq %r11, %r13
 ; GFNI-NEXT:    andq %r11, %rdx
 ; GFNI-NEXT:    shlq $4, %rdx
-; GFNI-NEXT:    orq %r13, %rdx
+; GFNI-NEXT:    addq %r13, %rdx
 ; GFNI-NEXT:    movq %rdx, %r13
 ; GFNI-NEXT:    andq %r10, %r13
 ; GFNI-NEXT:    shrq $2, %rdx
@@ -1575,7 +1575,7 @@ define i528 @large_promotion(i528 %A) nounwind {
 ; GFNI-NEXT:    andq %r11, %r13
 ; GFNI-NEXT:    andq %r11, %rsi
 ; GFNI-NEXT:    shlq $4, %rsi
-; GFNI-NEXT:    orq %r13, %rsi
+; GFNI-NEXT:    addq %r13, %rsi
 ; GFNI-NEXT:    movq %rsi, %r11
 ; GFNI-NEXT:    andq %r10, %r11
 ; GFNI-NEXT:    shrq $2, %rsi
diff --git a/llvm/test/CodeGen/X86/bitselect.ll b/llvm/test/CodeGen/X86/bitselect.ll
index 2922113b14ea90..fc510ad21a0ab1 100644
--- a/llvm/test/CodeGen/X86/bitselect.ll
+++ b/llvm/test/CodeGen/X86/bitselect.ll
@@ -23,7 +23,7 @@ define i8 @bitselect_i8(i8 %a, i8 %b, i8 %m) nounwind {
 ; X64-NEXT:    movl %edx, %eax
 ; X64-NEXT:    notb %al
 ; X64-NEXT:    andb %dil, %al
-; X64-NEXT:    orb %sil, %al
+; X64-NEXT:    addb %sil, %al
 ; X64-NEXT:    retq
   %not = xor i8 %m, -1
   %ma = and i8 %a, %not
@@ -45,11 +45,12 @@ define i16 @bitselect_i16(i16 %a, i16 %b, i16 %m) nounwind {
 ;
 ; X64-NOBMI-LABEL: bitselect_i16:
 ; X64-NOBMI:       # %bb.0:
-; X64-NOBMI-NEXT:    movl %edx, %eax
+; X64-NOBMI-NEXT:    # kill: def $edx killed $edx def $rdx
+; X64-NOBMI-NEXT:    # kill: def $esi killed $esi def $rsi
 ; X64-NOBMI-NEXT:    andl %edx, %esi
-; X64-NOBMI-NEXT:    notl %eax
-; X64-NOBMI-NEXT:    andl %edi, %eax
-; X64-NOBMI-NEXT:    orl %esi, %eax
+; X64-NOBMI-NEXT:    notl %edx
+; X64-NOBMI-NEXT:    andl %edi, %edx
+; X64-NOBMI-NEXT:    leal (%rdx,%rsi), %eax
 ; X64-NOBMI-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NOBMI-NEXT:    retq
 ;
@@ -57,7 +58,7 @@ define i16 @bitselect_i16(i16 %a, i16 %b, i16 %m) nounwind {
 ; X64-BMI:       # %bb.0:
 ; X64-BMI-NEXT:    andnl %edi, %edx, %eax
 ; X64-BMI-NEXT:    andl %edx, %esi
-; X64-BMI-NEXT:    orl %esi, %eax
+; X64-BMI-NEXT:    addl %esi, %eax
 ; X64-BMI-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-BMI-NEXT:    retq
   %not = xor i16 %m, -1
@@ -89,7 +90,7 @@ define i32 @bitselect_i32(i32 %a, i32 %b, i32 %m) nounwind {
 ; X64-BMI:       # %bb.0:
 ; X64-BMI-NEXT:    andnl %edi, %edx, %eax
 ; X64-BMI-NEXT:    andl %edx, %esi
-; X64-BMI-NEXT:    orl %esi, %eax
+; X64-BMI-NEXT:    addl %esi, %eax
 ; X64-BMI-NEXT:    retq
   %not = xor i32 %m, -1
   %ma = and i32 %a, %not
@@ -127,7 +128,7 @@ define i64 @bitselect_i64(i64 %a, i64 %b, i64 %m) nounwind {
 ; X64-BMI:       # %bb.0:
 ; X64-BMI-NEXT:    andnq %rdi, %rdx, %rax
 ; X64-BMI-NEXT:    andq %rdx, %rsi
-; X64-BMI-NEXT:    orq %rsi, %rax
+; X64-BMI-NEXT:    addq %rsi, %rax
 ; X64-BMI-NEXT:    retq
   %not = xor i64 %m, -1
   %ma = and i64 %a, %not
@@ -189,9 +190,9 @@ define i128 @bitselect_i128(i128 %a, i128 %b, i128 %m) nounwind {
 ; X64-BMI-NEXT:    andnq %rsi, %r9, %rsi
 ; X64-BMI-NEXT:    andnq %rdi, %r8, %rax
 ; X64-BMI-NEXT:    andq %r9, %rcx
-; X64-BMI-NEXT:    orq %rcx, %rsi
+; X64-BMI-NEXT:    addq %rcx, %rsi
 ; X64-BMI-NEXT:    andq %r8, %rdx
-; X64-BMI-NEXT:    orq %rdx, %rax
+; X64-BMI-NEXT:    addq %rdx, %rax
 ; X64-BMI-NEXT:    movq %rsi, %rdx
 ; X64-BMI-NEXT:    retq
   %not = xor i128 %m, -1
@@ -228,7 +229,7 @@ define i32 @bitselect_constants_i32(i32 %m) nounwind {
 ; X64-BMI-NEXT:    notl %eax
 ; X64-BMI-NEXT:    andl $52, %eax
 ; X64-BMI-NEXT:    andl $-6553, %edi # imm = 0xE667
-; X64-BMI-NEXT:    orl %edi, %eax
+; X64-BMI-NEXT:    addl %edi, %eax
 ; X64-BMI-NEXT:    retq
   %not = xor i32 %m, -1
   %ma = and i32 52, %not
diff --git a/llvm/test/CodeGen/X86/bool-math.ll b/llvm/test/CodeGen/X86/bool-math.ll
index b73af677bc6cdd..79ead34fd83a66 100644
--- a/llvm/test/CodeGen/X86/bool-math.ll
+++ b/llvm/test/CodeGen/X86/bool-math.ll
@@ -14,7 +14,7 @@ define i32 @sub_zext_cmp_mask_same_size_result(i32 %x) {
 ; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    andl $1, %eax
-; X32-NEXT:    orl $-28, %eax
+; X32-NEXT:    addl $-28, %eax
 ; X32-NEXT:    retl
   %a = and i32 %x, 1
   %c = icmp eq i32 %a, 0
@@ -35,7 +35,7 @@ define i32 @sub_zext_cmp_mask_wider_result(i8 %x) {
 ; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    andl $1, %eax
-; X32-NEXT:    orl $26, %eax
+; X32-NEXT:    addl $26, %eax
 ; X32-NEXT:    retl
   %a = and i8 %x, 1
   %c = icmp eq i8 %a, 0
@@ -57,7 +57,7 @@ define i8 @sub_zext_cmp_mask_narrower_result(i32 %x) {
 ; X32:       # %bb.0:
 ; X32-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    andb $1, %al
-; X32-NEXT:    orb $46, %al
+; X32-NEXT:    addb $46, %al
 ; X32-NEXT:    retl
   %a = and i32 %x, 1
   %c = icmp eq i32 %a, 0
@@ -143,7 +143,7 @@ define i32 @low_bit_select_constants_bigger_false_same_size_result(i32 %x) {
 ; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    andl $1, %eax
-; X32-NEXT:    orl $42, %eax
+; X32-NEXT:    addl $42, %eax
 ; X32-NEXT:    retl
   %a = and i32 %x, 1
   %c = icmp eq i32 %a, 0
@@ -163,7 +163,7 @@ define i64 @low_bit_select_constants_bigger_false_wider_result(i32 %x) {
 ; X32:       # %bb.0:
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    andl $1, %eax
-; X32-NEXT:    orl $26, %eax
+; X32-NEXT:    addl $26, %eax
 ; X32-NEXT:    xorl %edx, %edx
 ; X32-NEXT:    retl
   %a = and i32 %x, 1
@@ -185,7 +185,7 @@ define i16 @low_bit_select_constants_bigger_false_narrower_result(i32 %x) {
 ; X32:       # %bb.0:
 ; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    andl $1, %eax
-; X32-NEXT:    orl $36, %eax
+; X32-NEXT:    addl $36, %eax
 ; X32-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X32-NEXT:    retl
   %a = and i32 %x, 1
diff --git a/llvm/test/CodeGen/X86/bool-vector.ll b/llvm/test/CodeGen/X86/bool-vector.ll
index e4deb878aa461d..af87fb0377c52e 100644
--- a/llvm/test/CodeGen/X86/bool-vector.ll
+++ b/llvm/test/CodeGen/X86/bool-vector.ll
@@ -16,12 +16,12 @@ define i32 @PR15215_bad(<4 x i32> %input) {
 ; X86-NEXT:    shlb $3, %ah
 ; X86-NEXT:    andb $1, %cl
 ; X86-NEXT:    shlb $2, %cl
-; X86-NEXT:    orb %ah, %cl
+; X86-NEXT:    addb %ah, %cl
 ; X86-NEXT:    addb %dl, %dl
 ; X86-NEXT:    andb $1, %al
-; X86-NEXT:    orb %dl, %al
+; X86-NEXT:    addb %dl, %al
 ; X86-NEXT:    andb $3, %al
-; X86-NEXT:    orb %cl, %al
+; X86-NEXT:    addb %cl, %al
 ; X86-NEXT:    movzbl %al, %eax
 ; X86-NEXT:    andl $15, %eax
 ; X86-NEXT:    retl
@@ -31,12 +31,12 @@ define i32 @PR15215_bad(<4 x i32> %input) {
 ; X64-NEXT:    shlb $3, %cl
 ; X64-NEXT:    andb $1, %dl
 ; X64-NEXT:    shlb $2, %dl
-; X64-NEXT:    orb %cl, %dl
+; X64-NEXT:    addb %cl, %dl
 ; X64-NEXT:    addb %sil, %sil
 ; X64-NEXT:    andb $1, %dil
-; X64-NEXT:    orb %sil, %dil
+; X64-NEXT:    addb %sil, %dil
 ; X64-NEXT:    andb $3, %dil
-; X64-NEXT:    orb %dl, %dil
+; X64-NEXT:    addb %dl, %dil
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    andl $15, %eax
 ; X64-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/bswap.ll b/llvm/test/CodeGen/X86/bswap.ll
index 81eac5676bb5c3..fc47013e1c571a 100644
--- a/llvm/test/CodeGen/X86/bswap.ll
+++ b/llvm/test/CodeGen/X86/bswap.ll
@@ -159,7 +159,7 @@ define i64 @not_bswap() {
 ; CHECK-NEXT:    movl %eax, %ecx
 ; CHECK-NEXT:    shrl $8, %ecx
 ; CHECK-NEXT:    shll $8, %eax
-; CHECK-NEXT:    orl %ecx, %eax
+; CHECK-NEXT:    addl %ecx, %eax
 ; CHECK-NEXT:    xorl %edx, %edx
 ; CHECK-NEXT:    retl
 ;
@@ -169,7 +169,7 @@ define i64 @not_bswap() {
 ; CHECK64-NEXT:    movl %eax, %ecx
 ; CHECK64-NEXT:    shrl $8, %ecx
 ; CHECK64-NEXT:    shll $8, %eax
-; CHECK64-NEXT:    orl %ecx, %eax
+; CHECK64-NEXT:    addl %ecx, %eax
 ; CHECK64-NEXT:    retq
   %init = load i16, ptr @var16
   %big = zext i16 %init to i64
diff --git a/llvm/test/CodeGen/X86/bswap_tree2.ll b/llvm/test/CodeGen/X86/bswap_tree2.ll
index 98b51460207c31..01550001a2c163 100644
--- a/llvm/test/CodeGen/X86/bswap_tree2.ll
+++ b/llvm/test/CodeGen/X86/bswap_tree2.ll
@@ -58,8 +58,8 @@ define i32 @test2(i32 %x) nounwind {
 ; CHECK-NEXT:    andl $65280, %edx # imm = 0xFF00
 ; CHECK-NEXT:    andl $-16777216, %ecx # imm = 0xFF000000
 ; CHECK-NEXT:    andl $16711680, %eax # imm = 0xFF0000
-; CHECK-NEXT:    orl %ecx, %eax
-; CHECK-NEXT:    orl %edx, %eax
+; CHECK-NEXT:    addl %ecx, %eax
+; CHECK-NEXT:    addl %edx, %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: test2:
@@ -71,8 +71,8 @@ define i32 @test2(i32 %x) nounwind {
 ; CHECK64-NEXT:    andl $65280, %ecx # imm = 0xFF00
 ; CHECK64-NEXT:    andl $-16777216, %edi # imm = 0xFF000000
 ; CHECK64-NEXT:    andl $16711680, %eax # imm = 0xFF0000
-; CHECK64-NEXT:    orl %edi, %eax
-; CHECK64-NEXT:    orl %ecx, %eax
+; CHECK64-NEXT:    addl %edi, %eax
+; CHECK64-NEXT:    addl %ecx, %eax
 ; CHECK64-NEXT:    retq
   %byte1 = lshr i32 %x, 8
   %byte0 = shl  i32 %x, 8
@@ -109,8 +109,8 @@ define i32 @test3(float %x) nounwind {
 ; CHECK-NEXT:    andl $65280, %ecx # imm = 0xFF00
 ; CHECK-NEXT:    andl $-16777216, %edx # imm = 0xFF000000
 ; CHECK-NEXT:    andl $16711680, %eax # imm = 0xFF0000
-; CHECK-NEXT:    orl %edx, %eax
-; CHECK-NEXT:    orl %ecx, %eax
+; CHECK-NEXT:    addl %edx, %eax
+; CHECK-NEXT:    addl %ecx, %eax
 ; CHECK-NEXT:    addl $8, %esp
 ; CHECK-NEXT:    retl
 ;
@@ -124,8 +124,8 @@ define i32 @test3(float %x) nounwind {
 ; CHECK64-NEXT:    andl $65280, %ecx # imm = 0xFF00
 ; CHECK64-NEXT:    andl $-16777216, %edx # imm = 0xFF000000
 ; CHECK64-NEXT:    andl $16711680, %eax # imm = 0xFF0000
-; CHECK64-NEXT:    orl %edx, %eax
-; CHECK64-NEXT:    orl %ecx, %eax
+; CHECK64-NEXT:    addl %edx, %eax
+; CHECK64-NEXT:    addl %ecx, %eax
 ; CHECK64-NEXT:    retq
   %integer = fptosi float %x to i32
   %byte0 = shl  i32 %integer, 8
diff --git a/llvm/test/CodeGen/X86/buildvec-insertvec.ll b/llvm/test/CodeGen/X86/buildvec-insertvec.ll
index ae70b6a5a46656..8ad6ef8e0ccc54 100644
--- a/llvm/test/CodeGen/X86/buildvec-insertvec.ll
+++ b/llvm/test/CodeGen/X86/buildvec-insertvec.ll
@@ -661,7 +661,7 @@ define <16 x i8> @test_buildvector_v16i8_register_zero_2(i8 %a2, i8 %a3, i8 %a6,
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    shll $8, %esi
 ; SSE2-NEXT:    movzbl %dil, %eax
-; SSE2-NEXT:    orl %esi, %eax
+; SSE2-NEXT:    addl %esi, %eax
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    pinsrw $1, %eax, %xmm0
 ; SSE2-NEXT:    movzbl %dl, %eax
@@ -831,7 +831,7 @@ define void @pr59781(ptr %in, ptr %out) {
 ; CHECK-NEXT:    movzwl (%rdi), %eax
 ; CHECK-NEXT:    movzbl 2(%rdi), %ecx
 ; CHECK-NEXT:    shll $16, %ecx
-; CHECK-NEXT:    orq %rax, %rcx
+; CHECK-NEXT:    addq %rax, %rcx
 ; CHECK-NEXT:    movq %rcx, (%rsi)
 ; CHECK-NEXT:    retq
   %bf.load = load i24, ptr %in, align 8
diff --git a/llvm/test/CodeGen/X86/clz.ll b/llvm/test/CodeGen/X86/clz.ll
index 92cbc165902473..c70b205eef6880 100644
--- a/llvm/test/CodeGen/X86/clz.ll
+++ b/llvm/test/CodeGen/X86/clz.ll
@@ -1246,7 +1246,7 @@ define i64 @ctlz_i64_zero_test_knownneverzero(i64 %n) {
 ; X86-CLZ-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-CLZ-NEXT:    orl $1, %eax
 ; X86-CLZ-NEXT:    lzcntl %eax, %eax
-; X86-CLZ-NEXT:    orl $32, %eax
+; X86-CLZ-NEXT:    addl $32, %eax
 ; X86-CLZ-NEXT:    xorl %edx, %edx
 ; X86-CLZ-NEXT:    retl
 ; X86-CLZ-NEXT:  .LBB21_1:
@@ -1275,7 +1275,7 @@ define i64 @ctlz_i64_zero_test_knownneverzero(i64 %n) {
 ; X86-FASTLZCNT-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-FASTLZCNT-NEXT:    orl $1, %eax
 ; X86-FASTLZCNT-NEXT:    lzcntl %eax, %eax
-; X86-FASTLZCNT-NEXT:    orl $32, %eax
+; X86-FASTLZCNT-NEXT:    addl $32, %eax
 ; X86-FASTLZCNT-NEXT:    xorl %edx, %edx
 ; X86-FASTLZCNT-NEXT:    retl
 ; X86-FASTLZCNT-NEXT:  .LBB21_1:
@@ -1299,7 +1299,7 @@ define i64 @cttz_i64_zero_test_knownneverzero(i64 %n) {
 ; X86-NOCMOV-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
 ; X86-NOCMOV-NEXT:    orl {{[0-9]+}}(%esp), %eax
 ; X86-NOCMOV-NEXT:    rep bsfl %eax, %eax
-; X86-NOCMOV-NEXT:    orl $32, %eax
+; X86-NOCMOV-NEXT:    addl $32, %eax
 ; X86-NOCMOV-NEXT:    xorl %edx, %edx
 ; X86-NOCMOV-NEXT:    retl
 ; X86-NOCMOV-NEXT:  .LBB22_1:
@@ -1314,7 +1314,7 @@ define i64 @cttz_i64_zero_test_knownneverzero(i64 %n) {
 ; X86-CMOV-NEXT:    orl {{[0-9]+}}(%esp), %eax
 ; X86-CMOV-NEXT:    rep bsfl %ecx, %edx
 ; X86-CMOV-NEXT:    rep bsfl %eax, %eax
-; X86-CMOV-NEXT:    orl $32, %eax
+; X86-CMOV-NEXT:    addl $32, %eax
 ; X86-CMOV-NEXT:    testl %ecx, %ecx
 ; X86-CMOV-NEXT:    cmovnel %edx, %eax
 ; X86-CMOV-NEXT:    xorl %edx, %edx
@@ -1336,7 +1336,7 @@ define i64 @cttz_i64_zero_test_knownneverzero(i64 %n) {
 ; X86-CLZ-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
 ; X86-CLZ-NEXT:    orl {{[0-9]+}}(%esp), %eax
 ; X86-CLZ-NEXT:    tzcntl %eax, %eax
-; X86-CLZ-NEXT:    orl $32, %eax
+; X86-CLZ-NEXT:    addl $32, %eax
 ; X86-CLZ-NEXT:    xorl %edx, %edx
 ; X86-CLZ-NEXT:    retl
 ; X86-CLZ-NEXT:  .LBB22_1:
@@ -1367,7 +1367,7 @@ define i64 @cttz_i64_zero_test_knownneverzero(i64 %n) {
 ; X86-FASTLZCNT-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
 ; X86-FASTLZCNT-NEXT:    orl {{[0-9]+}}(%esp), %eax
 ; X86-FASTLZCNT-NEXT:    tzcntl %eax, %eax
-; X86-FASTLZCNT-NEXT:    orl $32, %eax
+; X86-FASTLZCNT-NEXT:    addl $32, %eax
 ; X86-FASTLZCNT-NEXT:    xorl %edx, %edx
 ; X86-FASTLZCNT-NEXT:    retl
 ; X86-FASTLZCNT-NEXT:  .LBB22_1:
diff --git a/llvm/test/CodeGen/X86/combine-bitreverse.ll b/llvm/test/CodeGen/X86/combine-bitreverse.ll
index 9f81fab54a49d0..0503a94f2a6b57 100644
--- a/llvm/test/CodeGen/X86/combine-bitreverse.ll
+++ b/llvm/test/CodeGen/X86/combine-bitreverse.ll
@@ -50,7 +50,7 @@ define i32 @test_bitreverse_srli_bitreverse(i32 %a0) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -68,7 +68,7 @@ define i32 @test_bitreverse_srli_bitreverse(i32 %a0) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645120, %eax # imm = 0xF0F0F00
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993424, %ecx # imm = 0x33333310
 ; X86-NEXT:    shrl $2, %eax
@@ -90,7 +90,7 @@ define i32 @test_bitreverse_srli_bitreverse(i32 %a0) nounwind {
 ; X64-NEXT:    shll $4, %eax
 ; X64-NEXT:    shrl $4, %edi
 ; X64-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; X64-NEXT:    orl %eax, %edi
+; X64-NEXT:    addl %eax, %edi
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    andl $858993459, %eax # imm = 0x33333333
 ; X64-NEXT:    shrl $2, %edi
@@ -108,7 +108,7 @@ define i32 @test_bitreverse_srli_bitreverse(i32 %a0) nounwind {
 ; X64-NEXT:    shll $4, %ecx
 ; X64-NEXT:    shrl $4, %eax
 ; X64-NEXT:    andl $252645120, %eax # imm = 0xF0F0F00
-; X64-NEXT:    orl %ecx, %eax
+; X64-NEXT:    addl %ecx, %eax
 ; X64-NEXT:    movl %eax, %ecx
 ; X64-NEXT:    andl $858993424, %ecx # imm = 0x33333310
 ; X64-NEXT:    shrl $2, %eax
@@ -136,7 +136,7 @@ define i64 @test_bitreverse_srli_bitreverse_i64(i64 %a) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -154,7 +154,7 @@ define i64 @test_bitreverse_srli_bitreverse_i64(i64 %a) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645127, %eax # imm = 0xF0F0F07
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -177,7 +177,7 @@ define i64 @test_bitreverse_srli_bitreverse_i64(i64 %a) nounwind {
 ; X64-NEXT:    andq %rcx, %rax
 ; X64-NEXT:    andq %rcx, %rdi
 ; X64-NEXT:    shlq $4, %rdi
-; X64-NEXT:    orq %rax, %rdi
+; X64-NEXT:    addq %rax, %rdi
 ; X64-NEXT:    movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
 ; X64-NEXT:    movq %rdi, %rcx
 ; X64-NEXT:    andq %rax, %rcx
@@ -198,7 +198,7 @@ define i64 @test_bitreverse_srli_bitreverse_i64(i64 %a) nounwind {
 ; X64-NEXT:    movabsq $1085102557958766592, %rdx # imm = 0xF0F0F0700000000
 ; X64-NEXT:    andq %rax, %rdx
 ; X64-NEXT:    shlq $4, %rcx
-; X64-NEXT:    orq %rdx, %rcx
+; X64-NEXT:    addq %rdx, %rcx
 ; X64-NEXT:    movabsq $3689348813882916864, %rax # imm = 0x3333333300000000
 ; X64-NEXT:    andq %rcx, %rax
 ; X64-NEXT:    shrq $2, %rcx
@@ -229,7 +229,7 @@ define i32 @test_bitreverse_shli_bitreverse(i32 %a0) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -240,14 +240,14 @@ define i32 @test_bitreverse_shli_bitreverse(i32 %a0) nounwind {
 ; X86-NEXT:    shll $6, %ecx
 ; X86-NEXT:    andl $-1431655808, %ecx # imm = 0xAAAAAA80
 ; X86-NEXT:    shll $8, %eax
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    bswapl %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $986895, %ecx # imm = 0xF0F0F
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $135204623, %eax # imm = 0x80F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $3355443, %ecx # imm = 0x333333
 ; X86-NEXT:    shrl $2, %eax
@@ -269,7 +269,7 @@ define i32 @test_bitreverse_shli_bitreverse(i32 %a0) nounwind {
 ; X64-NEXT:    shll $4, %eax
 ; X64-NEXT:    shrl $4, %edi
 ; X64-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; X64-NEXT:    orl %eax, %edi
+; X64-NEXT:    addl %eax, %edi
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    andl $858993459, %eax # imm = 0x33333333
 ; X64-NEXT:    shrl $2, %edi
@@ -280,14 +280,14 @@ define i32 @test_bitreverse_shli_bitreverse(i32 %a0) nounwind {
 ; X64-NEXT:    shll $6, %eax
 ; X64-NEXT:    andl $-1431655808, %eax # imm = 0xAAAAAA80
 ; X64-NEXT:    shll $8, %ecx
-; X64-NEXT:    orl %eax, %ecx
+; X64-NEXT:    addl %eax, %ecx
 ; X64-NEXT:    bswapl %ecx
 ; X64-NEXT:    movl %ecx, %eax
 ; X64-NEXT:    andl $986895, %eax # imm = 0xF0F0F
 ; X64-NEXT:    shll $4, %eax
 ; X64-NEXT:    shrl $4, %ecx
 ; X64-NEXT:    andl $135204623, %ecx # imm = 0x80F0F0F
-; X64-NEXT:    orl %eax, %ecx
+; X64-NEXT:    addl %eax, %ecx
 ; X64-NEXT:    movl %ecx, %eax
 ; X64-NEXT:    andl $3355443, %eax # imm = 0x333333
 ; X64-NEXT:    shrl $2, %ecx
@@ -315,7 +315,7 @@ define i64 @test_bitreverse_shli_bitreverse_i64(i64 %a) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
 ; X86-NEXT:    shrl $2, %eax
@@ -331,7 +331,7 @@ define i64 @test_bitreverse_shli_bitreverse_i64(i64 %a) nounwind {
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    shrl $4, %eax
 ; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    andl $590558003, %ecx # imm = 0x23333333
 ; X86-NEXT:    shrl $2, %eax
@@ -353,7 +353,7 @@ define i64 @test_bitreverse_shli_bitreverse_i64(i64 %a) nounwind {
 ; X64-NEXT:    shll $4, %eax
 ; X64-NEXT:    shrl $4, %edi
 ; X64-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; X64-NEXT:    orl %eax, %edi
+; X64-NEXT:    addl %eax, %edi
 ; X64-NEXT:    movl %edi, %eax
 ; X64-NEXT:    andl $858993459, %eax # imm = 0x33333333
 ; X64-NEXT:    shrl $2, %edi
@@ -371,7 +371,7 @@ define i64 @test_bitreverse_shli_bitreverse_i64(i64 %a) nounwind {
 ; X64-NEXT:    shll $4, %ecx
 ; X64-NEXT:    shrl $4, %eax
 ; X64-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; X64-NEXT:    orl %ecx, %eax
+; X64-NEXT:    addl %ecx, %eax
 ; X64-NEXT:    movl %eax, %ecx
 ; X64-NEXT:    andl $590558003, %ecx # imm = 0x23333333
 ; X64-NEXT:    shrl $2, %eax
diff --git a/llvm/test/CodeGen/X86/combine-bswap.ll b/llvm/test/CodeGen/X86/combine-bswap.ll
index 1f074c877f3ae1..03e9d83d2ea8af 100644
--- a/llvm/test/CodeGen/X86/combine-bswap.ll
+++ b/llvm/test/CodeGen/X86/combine-bswap.ll
@@ -448,7 +448,7 @@ define i64 @test_bswap64_shift48_multiuse(i64 %a0, ptr %a1) {
 ; X64-NEXT:    shlq $48, %rdi
 ; X64-NEXT:    movq %rdi, %rax
 ; X64-NEXT:    bswapq %rax
-; X64-NEXT:    orq %rax, %rdi
+; X64-NEXT:    addq %rax, %rdi
 ; X64-NEXT:    movq %rdi, (%rsi)
 ; X64-NEXT:    retq
   %s = shl i64 %a0, 48
diff --git a/llvm/test/CodeGen/X86/combine-fneg.ll b/llvm/test/CodeGen/X86/combine-fneg.ll
index e4a07348dc96c5..4a7712ef61fa55 100644
--- a/llvm/test/CodeGen/X86/combine-fneg.ll
+++ b/llvm/test/CodeGen/X86/combine-fneg.ll
@@ -124,9 +124,9 @@ define <2 x float> @fneg_bitcast(i64 %i) nounwind {
 ; X86-SSE1-NEXT:    subl $16, %esp
 ; X86-SSE1-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
 ; X86-SSE1-NEXT:    movl 12(%ebp), %ecx
-; X86-SSE1-NEXT:    xorl %eax, %ecx
+; X86-SSE1-NEXT:    addl %eax, %ecx
 ; X86-SSE1-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE1-NEXT:    xorl 8(%ebp), %eax
+; X86-SSE1-NEXT:    addl 8(%ebp), %eax
 ; X86-SSE1-NEXT:    movl %eax, (%esp)
 ; X86-SSE1-NEXT:    movaps (%esp), %xmm0
 ; X86-SSE1-NEXT:    movl %ebp, %esp
@@ -137,9 +137,9 @@ define <2 x float> @fneg_bitcast(i64 %i) nounwind {
 ; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SSE2-NEXT:    xorl %eax, %ecx
+; X86-SSE2-NEXT:    addl %eax, %ecx
 ; X86-SSE2-NEXT:    movd %ecx, %xmm1
-; X86-SSE2-NEXT:    xorl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT:    addl {{[0-9]+}}(%esp), %eax
 ; X86-SSE2-NEXT:    movd %eax, %xmm0
 ; X86-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; X86-SSE2-NEXT:    retl
diff --git a/llvm/test/CodeGen/X86/combine-rotates.ll b/llvm/test/CodeGen/X86/combine-rotates.ll
index 65d74c8f262a31..42454d6b578efa 100644
--- a/llvm/test/CodeGen/X86/combine-rotates.ll
+++ b/llvm/test/CodeGen/X86/combine-rotates.ll
@@ -441,7 +441,7 @@ define i5 @rotl_merge_i5(i5 %x) {
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    andb $24, %al
 ; CHECK-NEXT:    shrb $3, %al
-; CHECK-NEXT:    orb %cl, %al
+; CHECK-NEXT:    addb %cl, %al
 ; CHECK-NEXT:    retq
   %r1 = call i5 @llvm.fshl.i5(i5 %x, i5 %x, i5 -1)
   %r2 = call i5 @llvm.fshl.i5(i5 %r1, i5 %r1, i5 1)
diff --git a/llvm/test/CodeGen/X86/commute-two-addr.ll b/llvm/test/CodeGen/X86/commute-two-addr.ll
index 9a3ac44a195703..f2012056d0acf0 100644
--- a/llvm/test/CodeGen/X86/commute-two-addr.ll
+++ b/llvm/test/CodeGen/X86/commute-two-addr.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; The register allocator can commute two-address instructions to avoid
 ; insertion of register-register copies.
 
@@ -12,10 +13,19 @@ declare void @ext(i32)
 
 define i32 @t1(i32 %X, i32 %Y) nounwind {
 ; LINUX-LABEL: t1:
-; LINUX: movl 4(%esp), %eax
-; LINUX: movl 8(%esp), %ecx
-; LINUX: addl %eax, %ecx
-; LINUX: movl %ecx, G
+; LINUX:       # %bb.0:
+; LINUX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; LINUX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; LINUX-NEXT:    addl %eax, %ecx
+; LINUX-NEXT:    movl %ecx, G
+; LINUX-NEXT:    retl
+;
+; DARWIN-LABEL: t1:
+; DARWIN:       ## %bb.0:
+; DARWIN-NEXT:    movl %edi, %eax
+; DARWIN-NEXT:    addl %edi, %esi
+; DARWIN-NEXT:    movl %esi, _G(%rip)
+; DARWIN-NEXT:    retq
         %Z = add i32 %X, %Y             ; <i32> [#uses=1]
         store i32 %Z, ptr @G
         ret i32 %X
@@ -23,10 +33,19 @@ define i32 @t1(i32 %X, i32 %Y) nounwind {
 
 define i32 @t2(i32 %X, i32 %Y) nounwind {
 ; LINUX-LABEL: t2:
-; LINUX: movl 4(%esp), %eax
-; LINUX: movl 8(%esp), %ecx
-; LINUX: xorl %eax, %ecx
-; LINUX: movl %ecx, G
+; LINUX:       # %bb.0:
+; LINUX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; LINUX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; LINUX-NEXT:    xorl %eax, %ecx
+; LINUX-NEXT:    movl %ecx, G
+; LINUX-NEXT:    retl
+;
+; DARWIN-LABEL: t2:
+; DARWIN:       ## %bb.0:
+; DARWIN-NEXT:    movl %edi, %eax
+; DARWIN-NEXT:    xorl %edi, %esi
+; DARWIN-NEXT:    movl %esi, _G(%rip)
+; DARWIN-NEXT:    retq
         %Z = xor i32 %X, %Y             ; <i32> [#uses=1]
         store i32 %Z, ptr @G
         ret i32 %X
@@ -36,12 +55,42 @@ define i32 @t2(i32 %X, i32 %Y) nounwind {
 %0 = type { i64, i32 }
 
 define %0 @t3(i32 %lb, i8 zeroext %has_lb, i8 zeroext %lb_inclusive, i32 %ub, i8 zeroext %has_ub, i8 zeroext %ub_inclusive) nounwind {
-entry:
+; LINUX-LABEL: t3:
+; LINUX:       # %bb.0: # %entry
+; LINUX-NEXT:    pushl %edi
+; LINUX-NEXT:    pushl %esi
+; LINUX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; LINUX-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; LINUX-NEXT:    movzbl {{[0-9]+}}(%esp), %esi
+; LINUX-NEXT:    movzbl {{[0-9]+}}(%esp), %edi
+; LINUX-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; LINUX-NEXT:    shll $8, %edi
+; LINUX-NEXT:    shll $16, %esi
+; LINUX-NEXT:    addl %ecx, %esi
+; LINUX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; LINUX-NEXT:    shll $24, %ecx
+; LINUX-NEXT:    addl %edi, %ecx
+; LINUX-NEXT:    addl %esi, %ecx
+; LINUX-NEXT:    popl %esi
+; LINUX-NEXT:    popl %edi
+; LINUX-NEXT:    retl
+;
 ; DARWIN-LABEL: t3:
-; DARWIN: shlq $32, %rcx
-; DARWIN-NEXT: orq %rcx, %rax
-; DARWIN-NEXT: shll $8
-; DARWIN-NOT: leaq
+; DARWIN:       ## %bb.0: ## %entry
+; DARWIN-NEXT:    ## kill: def $r9d killed $r9d def $r9
+; DARWIN-NEXT:    ## kill: def $r8d killed $r8d def $r8
+; DARWIN-NEXT:    ## kill: def $ecx killed $ecx def $rcx
+; DARWIN-NEXT:    movl %edi, %eax
+; DARWIN-NEXT:    shlq $32, %rcx
+; DARWIN-NEXT:    addq %rcx, %rax
+; DARWIN-NEXT:    shll $8, %r8d
+; DARWIN-NEXT:    shll $16, %edx
+; DARWIN-NEXT:    shll $24, %r9d
+; DARWIN-NEXT:    addl %esi, %edx
+; DARWIN-NEXT:    addl %r9d, %edx
+; DARWIN-NEXT:    addl %r8d, %edx
+; DARWIN-NEXT:    retq
+entry:
   %tmp21 = zext i32 %lb to i64
   %tmp23 = zext i32 %ub to i64
   %tmp24 = shl i64 %tmp23, 32
diff --git a/llvm/test/CodeGen/X86/dagcombine-select.ll b/llvm/test/CodeGen/X86/dagcombine-select.ll
index 1380c02663ee0e..76754aae43c841 100644
--- a/llvm/test/CodeGen/X86/dagcombine-select.ll
+++ b/llvm/test/CodeGen/X86/dagcombine-select.ll
@@ -183,7 +183,7 @@ define i32 @sel_constants_shl_constant(i1 %cond) {
 ; CHECK-NEXT:    notb %dil
 ; CHECK-NEXT:    movzbl %dil, %eax
 ; CHECK-NEXT:    andl $1, %eax
-; CHECK-NEXT:    orl $2, %eax
+; CHECK-NEXT:    addl $2, %eax
 ; CHECK-NEXT:    shll $8, %eax
 ; CHECK-NEXT:    retq
   %sel = select i1 %cond, i32 2, i32 3
diff --git a/llvm/test/CodeGen/X86/dagcombine-shifts.ll b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
index 42b325dd4c229e..30e538f4d9f154 100644
--- a/llvm/test/CodeGen/X86/dagcombine-shifts.ll
+++ b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
@@ -209,7 +209,7 @@ define i64 @fun10(i8 zeroext %v) {
 ; X86-NEXT:    movzbl %al, %ecx
 ; X86-NEXT:    movl %ecx, %eax
 ; X86-NEXT:    shll $4, %eax
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    xorl %edx, %edx
 ; X86-NEXT:    retl
 ;
@@ -220,7 +220,7 @@ define i64 @fun10(i8 zeroext %v) {
 ; X64-NEXT:    shrb $4, %al
 ; X64-NEXT:    movzbl %al, %eax
 ; X64-NEXT:    andl $-16, %edi
-; X64-NEXT:    orq %rdi, %rax
+; X64-NEXT:    addq %rdi, %rax
 ; X64-NEXT:    retq
 entry:
   %shr = lshr i8 %v, 4
diff --git a/llvm/test/CodeGen/X86/disable-shrink-store.ll b/llvm/test/CodeGen/X86/disable-shrink-store.ll
index 325c870e4a6154..07b606f2613314 100644
--- a/llvm/test/CodeGen/X86/disable-shrink-store.ll
+++ b/llvm/test/CodeGen/X86/disable-shrink-store.ll
@@ -5,7 +5,7 @@ define void @shrink(ptr %ptr) {
 ; CHECK-LABEL: shrink:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movzbl (%rdi), %eax
-; CHECK-NEXT:    orl $25600, %eax # imm = 0x6400
+; CHECK-NEXT:    addl $25600, %eax # imm = 0x6400
 ; CHECK-NEXT:    movw %ax, (%rdi)
 ; CHECK-NEXT:    retq
 entry:
diff --git a/llvm/test/CodeGen/X86/extract-bits.ll b/llvm/test/CodeGen/X86/extract-bits.ll
index 90e075bfabf0a2..d4acd274db5328 100644
--- a/llvm/test/CodeGen/X86/extract-bits.ll
+++ b/llvm/test/CodeGen/X86/extract-bits.ll
@@ -51,7 +51,7 @@ define i32 @bextr32_a0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    shll $8, %eax
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-BMI1-NEXT:    orl %eax, %ecx
+; X86-BMI1-NEXT:    addl %eax, %ecx
 ; X86-BMI1-NEXT:    bextrl %ecx, {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    retl
 ;
@@ -79,7 +79,7 @@ define i32 @bextr32_a0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrl %eax, %edi, %eax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -183,7 +183,7 @@ define i32 @bextr32_a1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    shll $8, %eax
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-BMI1-NEXT:    orl %eax, %ecx
+; X86-BMI1-NEXT:    addl %eax, %ecx
 ; X86-BMI1-NEXT:    bextrl %ecx, {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    retl
 ;
@@ -210,7 +210,7 @@ define i32 @bextr32_a1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %
 ; X64-BMI1-LABEL: bextr32_a1_indexzext:
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
-; X64-BMI1-NEXT:    orl %esi, %edx
+; X64-BMI1-NEXT:    addl %esi, %edx
 ; X64-BMI1-NEXT:    bextrl %edx, %edi, %eax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -251,7 +251,7 @@ define i32 @bextr32_a2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; X86-BMI1-NEXT:    shll $8, %ecx
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
-; X86-BMI1-NEXT:    orl %ecx, %edx
+; X86-BMI1-NEXT:    addl %ecx, %edx
 ; X86-BMI1-NEXT:    bextrl %edx, (%eax), %eax
 ; X86-BMI1-NEXT:    retl
 ;
@@ -281,7 +281,7 @@ define i32 @bextr32_a2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrl %eax, (%rdi), %eax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -321,7 +321,7 @@ define i32 @bextr32_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroex
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; X86-BMI1-NEXT:    shll $8, %ecx
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
-; X86-BMI1-NEXT:    orl %ecx, %edx
+; X86-BMI1-NEXT:    addl %ecx, %edx
 ; X86-BMI1-NEXT:    bextrl %edx, (%eax), %eax
 ; X86-BMI1-NEXT:    retl
 ;
@@ -350,7 +350,7 @@ define i32 @bextr32_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroex
 ; X64-BMI1-LABEL: bextr32_a3_load_indexzext:
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
-; X64-BMI1-NEXT:    orl %esi, %edx
+; X64-BMI1-NEXT:    addl %esi, %edx
 ; X64-BMI1-NEXT:    bextrl %edx, (%rdi), %eax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -390,7 +390,7 @@ define i32 @bextr32_a4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits)
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    shll $8, %eax
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-BMI1-NEXT:    orl %eax, %ecx
+; X86-BMI1-NEXT:    addl %eax, %ecx
 ; X86-BMI1-NEXT:    bextrl %ecx, {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    retl
 ;
@@ -418,7 +418,7 @@ define i32 @bextr32_a4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits)
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrl %eax, %edi, %eax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -466,7 +466,7 @@ define i32 @bextr32_a5_skipextrauses(i32 %val, i32 %numskipbits, i32 %numlowbits
 ; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    shll $8, %ecx
 ; X86-BMI1-NEXT:    movzbl %al, %edx
-; X86-BMI1-NEXT:    orl %ecx, %edx
+; X86-BMI1-NEXT:    addl %ecx, %edx
 ; X86-BMI1-NEXT:    bextrl %edx, {{[0-9]+}}(%esp), %esi
 ; X86-BMI1-NEXT:    movl %eax, (%esp)
 ; X86-BMI1-NEXT:    calll use32 at PLT
@@ -511,7 +511,7 @@ define i32 @bextr32_a5_skipextrauses(i32 %val, i32 %numskipbits, i32 %numlowbits
 ; X64-BMI1-NEXT:    pushq %rbx
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrl %eax, %edi, %ebx
 ; X64-BMI1-NEXT:    movl %esi, %edi
 ; X64-BMI1-NEXT:    callq use32 at PLT
@@ -665,7 +665,7 @@ define i64 @bextr64_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -953,7 +953,7 @@ define i64 @bextr64_a1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    # kill: def $edx killed $edx def $rdx
 ; X64-BMI1-NEXT:    shll $8, %edx
-; X64-BMI1-NEXT:    orl %esi, %edx
+; X64-BMI1-NEXT:    addl %esi, %edx
 ; X64-BMI1-NEXT:    bextrq %rdx, %rdi, %rax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -1103,7 +1103,7 @@ define i64 @bextr64_a2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, (%rdi), %rax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -1250,7 +1250,7 @@ define i64 @bextr64_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroex
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    # kill: def $edx killed $edx def $rdx
 ; X64-BMI1-NEXT:    shll $8, %edx
-; X64-BMI1-NEXT:    orl %esi, %edx
+; X64-BMI1-NEXT:    addl %esi, %edx
 ; X64-BMI1-NEXT:    bextrq %rdx, (%rdi), %rax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -1397,7 +1397,7 @@ define i64 @bextr64_a4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits)
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -1584,7 +1584,7 @@ define i64 @bextr64_a5_skipextrauses(i64 %val, i64 %numskipbits, i64 %numlowbits
 ; X64-BMI1-NEXT:    pushq %rbx
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rbx
 ; X64-BMI1-NEXT:    movq %rsi, %rdi
 ; X64-BMI1-NEXT:    callq use64 at PLT
@@ -1718,7 +1718,7 @@ define i32 @bextr64_32_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-BMI1-NEXT:    retq
@@ -1818,7 +1818,7 @@ define i32 @bextr64_32_a1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-BMI1-NEXT:    retq
@@ -2064,7 +2064,7 @@ define i32 @bextr64_32_a2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-BMI1-NEXT:    retq
@@ -2190,7 +2190,7 @@ define i32 @bextr64_32_a3(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-BMI1-NEXT:    retq
@@ -2233,7 +2233,7 @@ define i32 @bextr32_b0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    shll $8, %eax
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-BMI1-NEXT:    orl %eax, %ecx
+; X86-BMI1-NEXT:    addl %eax, %ecx
 ; X86-BMI1-NEXT:    bextrl %ecx, {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    retl
 ;
@@ -2261,7 +2261,7 @@ define i32 @bextr32_b0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrl %eax, %edi, %eax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -2298,7 +2298,7 @@ define i32 @bextr32_b1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    shll $8, %eax
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-BMI1-NEXT:    orl %eax, %ecx
+; X86-BMI1-NEXT:    addl %eax, %ecx
 ; X86-BMI1-NEXT:    bextrl %ecx, {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    retl
 ;
@@ -2325,7 +2325,7 @@ define i32 @bextr32_b1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %
 ; X64-BMI1-LABEL: bextr32_b1_indexzext:
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
-; X64-BMI1-NEXT:    orl %esi, %edx
+; X64-BMI1-NEXT:    addl %esi, %edx
 ; X64-BMI1-NEXT:    bextrl %edx, %edi, %eax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -2366,7 +2366,7 @@ define i32 @bextr32_b2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; X86-BMI1-NEXT:    shll $8, %ecx
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
-; X86-BMI1-NEXT:    orl %ecx, %edx
+; X86-BMI1-NEXT:    addl %ecx, %edx
 ; X86-BMI1-NEXT:    bextrl %edx, (%eax), %eax
 ; X86-BMI1-NEXT:    retl
 ;
@@ -2396,7 +2396,7 @@ define i32 @bextr32_b2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrl %eax, (%rdi), %eax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -2436,7 +2436,7 @@ define i32 @bextr32_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroex
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; X86-BMI1-NEXT:    shll $8, %ecx
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
-; X86-BMI1-NEXT:    orl %ecx, %edx
+; X86-BMI1-NEXT:    addl %ecx, %edx
 ; X86-BMI1-NEXT:    bextrl %edx, (%eax), %eax
 ; X86-BMI1-NEXT:    retl
 ;
@@ -2465,7 +2465,7 @@ define i32 @bextr32_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroex
 ; X64-BMI1-LABEL: bextr32_b3_load_indexzext:
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
-; X64-BMI1-NEXT:    orl %esi, %edx
+; X64-BMI1-NEXT:    addl %esi, %edx
 ; X64-BMI1-NEXT:    bextrl %edx, (%rdi), %eax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -2505,7 +2505,7 @@ define i32 @bextr32_b4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits)
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    shll $8, %eax
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-BMI1-NEXT:    orl %eax, %ecx
+; X86-BMI1-NEXT:    addl %eax, %ecx
 ; X86-BMI1-NEXT:    bextrl %ecx, {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    retl
 ;
@@ -2533,7 +2533,7 @@ define i32 @bextr32_b4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits)
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrl %eax, %edi, %eax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -2581,7 +2581,7 @@ define i32 @bextr32_b5_skipextrauses(i32 %val, i32 %numskipbits, i32 %numlowbits
 ; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    shll $8, %ecx
 ; X86-BMI1-NEXT:    movzbl %al, %edx
-; X86-BMI1-NEXT:    orl %ecx, %edx
+; X86-BMI1-NEXT:    addl %ecx, %edx
 ; X86-BMI1-NEXT:    bextrl %edx, {{[0-9]+}}(%esp), %esi
 ; X86-BMI1-NEXT:    movl %eax, (%esp)
 ; X86-BMI1-NEXT:    calll use32 at PLT
@@ -2626,7 +2626,7 @@ define i32 @bextr32_b5_skipextrauses(i32 %val, i32 %numskipbits, i32 %numlowbits
 ; X64-BMI1-NEXT:    pushq %rbx
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrl %eax, %edi, %ebx
 ; X64-BMI1-NEXT:    movl %esi, %edi
 ; X64-BMI1-NEXT:    callq use32 at PLT
@@ -2776,7 +2776,7 @@ define i64 @bextr64_b0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -2914,7 +2914,7 @@ define i64 @bextr64_b1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    # kill: def $edx killed $edx def $rdx
 ; X64-BMI1-NEXT:    shll $8, %edx
-; X64-BMI1-NEXT:    orl %esi, %edx
+; X64-BMI1-NEXT:    addl %esi, %edx
 ; X64-BMI1-NEXT:    bextrq %rdx, %rdi, %rax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -3060,7 +3060,7 @@ define i64 @bextr64_b2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, (%rdi), %rax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -3203,7 +3203,7 @@ define i64 @bextr64_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroex
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    # kill: def $edx killed $edx def $rdx
 ; X64-BMI1-NEXT:    shll $8, %edx
-; X64-BMI1-NEXT:    orl %esi, %edx
+; X64-BMI1-NEXT:    addl %esi, %edx
 ; X64-BMI1-NEXT:    bextrq %rdx, (%rdi), %rax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -3346,7 +3346,7 @@ define i64 @bextr64_b4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits)
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -3527,7 +3527,7 @@ define i64 @bextr64_b5_skipextrauses(i64 %val, i64 %numskipbits, i64 %numlowbits
 ; X64-BMI1-NEXT:    pushq %rbx
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rbx
 ; X64-BMI1-NEXT:    movq %rsi, %rdi
 ; X64-BMI1-NEXT:    callq use64 at PLT
@@ -3659,7 +3659,7 @@ define i32 @bextr64_32_b0(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-BMI1-NEXT:    retq
@@ -3760,7 +3760,7 @@ define i32 @bextr64_32_b1(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-BMI1-NEXT:    retq
@@ -3862,7 +3862,7 @@ define i32 @bextr64_32_b2(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-BMI1-NEXT:    retq
@@ -3988,7 +3988,7 @@ define i32 @bextr64_32_b3(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-BMI1-NEXT:    retq
@@ -6188,7 +6188,7 @@ define i32 @bextr64_32_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-BMI1-NEXT:    retq
@@ -6286,7 +6286,7 @@ define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-BMI1-NEXT:    retq
@@ -6385,7 +6385,7 @@ define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-BMI1-NEXT:    retq
@@ -6554,7 +6554,7 @@ define i32 @bextr32_d0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    shll $8, %eax
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-BMI1-NEXT:    orl %eax, %ecx
+; X86-BMI1-NEXT:    addl %eax, %ecx
 ; X86-BMI1-NEXT:    bextrl %ecx, {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    retl
 ;
@@ -6582,7 +6582,7 @@ define i32 @bextr32_d0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrl %eax, %edi, %eax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -6616,7 +6616,7 @@ define i32 @bextr32_d1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) noun
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    shll $8, %eax
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; X86-BMI1-NEXT:    orl %eax, %ecx
+; X86-BMI1-NEXT:    addl %eax, %ecx
 ; X86-BMI1-NEXT:    bextrl %ecx, {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    retl
 ;
@@ -6644,7 +6644,7 @@ define i32 @bextr32_d1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) noun
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrl %eax, %edi, %eax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -6682,7 +6682,7 @@ define i32 @bextr32_d2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; X86-BMI1-NEXT:    shll $8, %ecx
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
-; X86-BMI1-NEXT:    orl %ecx, %edx
+; X86-BMI1-NEXT:    addl %ecx, %edx
 ; X86-BMI1-NEXT:    bextrl %edx, (%eax), %eax
 ; X86-BMI1-NEXT:    retl
 ;
@@ -6711,7 +6711,7 @@ define i32 @bextr32_d2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrl %eax, (%rdi), %eax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -6748,7 +6748,7 @@ define i32 @bextr32_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; X86-BMI1-NEXT:    shll $8, %ecx
 ; X86-BMI1-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
-; X86-BMI1-NEXT:    orl %ecx, %edx
+; X86-BMI1-NEXT:    addl %ecx, %edx
 ; X86-BMI1-NEXT:    bextrl %edx, (%eax), %eax
 ; X86-BMI1-NEXT:    retl
 ;
@@ -6777,7 +6777,7 @@ define i32 @bextr32_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrl %eax, (%rdi), %eax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -6825,7 +6825,7 @@ define i32 @bextr32_d5_skipextrauses(i32 %val, i32 %numskipbits, i32 %numlowbits
 ; X86-BMI1-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-BMI1-NEXT:    shll $8, %ecx
 ; X86-BMI1-NEXT:    movzbl %al, %edx
-; X86-BMI1-NEXT:    orl %ecx, %edx
+; X86-BMI1-NEXT:    addl %ecx, %edx
 ; X86-BMI1-NEXT:    bextrl %edx, {{[0-9]+}}(%esp), %esi
 ; X86-BMI1-NEXT:    movl %eax, (%esp)
 ; X86-BMI1-NEXT:    calll use32 at PLT
@@ -6870,7 +6870,7 @@ define i32 @bextr32_d5_skipextrauses(i32 %val, i32 %numskipbits, i32 %numlowbits
 ; X64-BMI1-NEXT:    pushq %rbx
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrl %eax, %edi, %ebx
 ; X64-BMI1-NEXT:    movl %esi, %edi
 ; X64-BMI1-NEXT:    callq use32 at PLT
@@ -7052,7 +7052,7 @@ define i64 @bextr64_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -7222,7 +7222,7 @@ define i64 @bextr64_d1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) noun
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -7399,7 +7399,7 @@ define i64 @bextr64_d2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, (%rdi), %rax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -7573,7 +7573,7 @@ define i64 @bextr64_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, (%rdi), %rax
 ; X64-BMI1-NEXT:    retq
 ;
@@ -7790,7 +7790,7 @@ define i64 @bextr64_d5_skipextrauses(i64 %val, i64 %numskipbits, i64 %numlowbits
 ; X64-BMI1-NEXT:    pushq %rbx
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rbx
 ; X64-BMI1-NEXT:    movq %rsi, %rdi
 ; X64-BMI1-NEXT:    callq use64 at PLT
@@ -7938,7 +7938,7 @@ define i32 @bextr64_32_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-BMI1-NEXT:    retq
@@ -8037,7 +8037,7 @@ define i32 @bextr64_32_d1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; X64-BMI1:       # %bb.0:
 ; X64-BMI1-NEXT:    shll $8, %edx
 ; X64-BMI1-NEXT:    movzbl %sil, %eax
-; X64-BMI1-NEXT:    orl %edx, %eax
+; X64-BMI1-NEXT:    addl %edx, %eax
 ; X64-BMI1-NEXT:    bextrq %rax, %rdi, %rax
 ; X64-BMI1-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-BMI1-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/fold-masked-merge.ll b/llvm/test/CodeGen/X86/fold-masked-merge.ll
index 135494ac25f8cb..5a7f034b988407 100644
--- a/llvm/test/CodeGen/X86/fold-masked-merge.ll
+++ b/llvm/test/CodeGen/X86/fold-masked-merge.ll
@@ -18,7 +18,7 @@ define i32 @masked_merge0(i32 %a0, i32 %a1, i32 %a2) {
 ; BMI:       # %bb.0:
 ; BMI-NEXT:    andl %edi, %esi
 ; BMI-NEXT:    andnl %edx, %edi, %eax
-; BMI-NEXT:    orl %esi, %eax
+; BMI-NEXT:    addl %esi, %eax
 ; BMI-NEXT:    retq
   %and0 = and i32 %a0, %a1
   %not = xor i32 %a0, -1
@@ -30,11 +30,12 @@ define i32 @masked_merge0(i32 %a0, i32 %a1, i32 %a2) {
 define i16 @masked_merge1(i16 %a0, i16 %a1, i16 %a2) {
 ; NOBMI-LABEL: masked_merge1:
 ; NOBMI:       # %bb.0:
-; NOBMI-NEXT:    movl %edi, %eax
+; NOBMI-NEXT:    # kill: def $esi killed $esi def $rsi
+; NOBMI-NEXT:    # kill: def $edi killed $edi def $rdi
 ; NOBMI-NEXT:    andl %edi, %esi
-; NOBMI-NEXT:    notl %eax
-; NOBMI-NEXT:    andl %edx, %eax
-; NOBMI-NEXT:    orl %esi, %eax
+; NOBMI-NEXT:    notl %edi
+; NOBMI-NEXT:    andl %edx, %edi
+; NOBMI-NEXT:    leal (%rdi,%rsi), %eax
 ; NOBMI-NEXT:    # kill: def $ax killed $ax killed $eax
 ; NOBMI-NEXT:    retq
 ;
@@ -42,7 +43,7 @@ define i16 @masked_merge1(i16 %a0, i16 %a1, i16 %a2) {
 ; BMI:       # %bb.0:
 ; BMI-NEXT:    andl %edi, %esi
 ; BMI-NEXT:    andnl %edx, %edi, %eax
-; BMI-NEXT:    orl %esi, %eax
+; BMI-NEXT:    addl %esi, %eax
 ; BMI-NEXT:    # kill: def $ax killed $ax killed $eax
 ; BMI-NEXT:    retq
   %and0 = and i16 %a0, %a1
@@ -92,7 +93,7 @@ define i64 @masked_merge3(i64 %a0, i64 %a1, i64 %a2) {
 ; BMI-NEXT:    notq %rdx
 ; BMI-NEXT:    andnq %rdx, %rdi, %rcx
 ; BMI-NEXT:    andnq %rdi, %rsi, %rax
-; BMI-NEXT:    orq %rcx, %rax
+; BMI-NEXT:    addq %rcx, %rax
 ; BMI-NEXT:    retq
   %v0 = xor i64 %a1, -1
   %v1 = xor i64 %a2, -1
@@ -203,11 +204,12 @@ define i32 @not_a_masked_merge4(i32 %a0, i32 %a1, i32 %a2) {
 define i32 @masked_merge_no_transform0(i32 %a0, i32 %a1, i32 %a2, ptr %p1) {
 ; NOBMI-LABEL: masked_merge_no_transform0:
 ; NOBMI:       # %bb.0:
-; NOBMI-NEXT:    movl %edi, %eax
+; NOBMI-NEXT:    # kill: def $esi killed $esi def $rsi
+; NOBMI-NEXT:    # kill: def $edi killed $edi def $rdi
 ; NOBMI-NEXT:    andl %edi, %esi
-; NOBMI-NEXT:    notl %eax
-; NOBMI-NEXT:    andl %edx, %eax
-; NOBMI-NEXT:    orl %esi, %eax
+; NOBMI-NEXT:    notl %edi
+; NOBMI-NEXT:    andl %edx, %edi
+; NOBMI-NEXT:    leal (%rdi,%rsi), %eax
 ; NOBMI-NEXT:    movl %esi, (%rcx)
 ; NOBMI-NEXT:    retq
 ;
@@ -215,7 +217,7 @@ define i32 @masked_merge_no_transform0(i32 %a0, i32 %a1, i32 %a2, ptr %p1) {
 ; BMI:       # %bb.0:
 ; BMI-NEXT:    andl %edi, %esi
 ; BMI-NEXT:    andnl %edx, %edi, %eax
-; BMI-NEXT:    orl %esi, %eax
+; BMI-NEXT:    addl %esi, %eax
 ; BMI-NEXT:    movl %esi, (%rcx)
 ; BMI-NEXT:    retq
   %and0 = and i32 %a0, %a1
@@ -230,11 +232,12 @@ define i32 @masked_merge_no_transform0(i32 %a0, i32 %a1, i32 %a2, ptr %p1) {
 define i32 @masked_merge_no_transform1(i32 %a0, i32 %a1, i32 %a2, ptr %p1) {
 ; NOBMI-LABEL: masked_merge_no_transform1:
 ; NOBMI:       # %bb.0:
-; NOBMI-NEXT:    movl %edx, %eax
+; NOBMI-NEXT:    # kill: def $edx killed $edx def $rdx
+; NOBMI-NEXT:    # kill: def $esi killed $esi def $rsi
 ; NOBMI-NEXT:    andl %edi, %esi
 ; NOBMI-NEXT:    notl %edi
-; NOBMI-NEXT:    andl %edi, %eax
-; NOBMI-NEXT:    orl %esi, %eax
+; NOBMI-NEXT:    andl %edi, %edx
+; NOBMI-NEXT:    leal (%rdx,%rsi), %eax
 ; NOBMI-NEXT:    movl %edi, (%rcx)
 ; NOBMI-NEXT:    retq
 ;
@@ -243,7 +246,7 @@ define i32 @masked_merge_no_transform1(i32 %a0, i32 %a1, i32 %a2, ptr %p1) {
 ; BMI-NEXT:    andl %edi, %esi
 ; BMI-NEXT:    andnl %edx, %edi, %eax
 ; BMI-NEXT:    notl %edi
-; BMI-NEXT:    orl %esi, %eax
+; BMI-NEXT:    addl %esi, %eax
 ; BMI-NEXT:    movl %edi, (%rcx)
 ; BMI-NEXT:    retq
   %and0 = and i32 %a0, %a1
@@ -258,20 +261,21 @@ define i32 @masked_merge_no_transform1(i32 %a0, i32 %a1, i32 %a2, ptr %p1) {
 define i32 @masked_merge_no_transform2(i32 %a0, i32 %a1, i32 %a2, ptr %p1) {
 ; NOBMI-LABEL: masked_merge_no_transform2:
 ; NOBMI:       # %bb.0:
-; NOBMI-NEXT:    movl %esi, %eax
-; NOBMI-NEXT:    andl %edi, %eax
+; NOBMI-NEXT:    # kill: def $esi killed $esi def $rsi
+; NOBMI-NEXT:    # kill: def $edi killed $edi def $rdi
+; NOBMI-NEXT:    andl %edi, %esi
 ; NOBMI-NEXT:    notl %edi
 ; NOBMI-NEXT:    andl %edx, %edi
-; NOBMI-NEXT:    orl %edi, %eax
+; NOBMI-NEXT:    leal (%rsi,%rdi), %eax
 ; NOBMI-NEXT:    movl %edi, (%rcx)
 ; NOBMI-NEXT:    retq
 ;
 ; BMI-LABEL: masked_merge_no_transform2:
 ; BMI:       # %bb.0:
-; BMI-NEXT:    movl %esi, %eax
-; BMI-NEXT:    andl %edi, %eax
+; BMI-NEXT:    # kill: def $esi killed $esi def $rsi
+; BMI-NEXT:    andl %edi, %esi
 ; BMI-NEXT:    andnl %edx, %edi, %edx
-; BMI-NEXT:    orl %edx, %eax
+; BMI-NEXT:    leal (%rsi,%rdx), %eax
 ; BMI-NEXT:    movl %edx, (%rcx)
 ; BMI-NEXT:    retq
   %and0 = and i32 %a0, %a1
diff --git a/llvm/test/CodeGen/X86/fp128-i128.ll b/llvm/test/CodeGen/X86/fp128-i128.ll
index f176a299c4e9be..93e2a14b8e509d 100644
--- a/llvm/test/CodeGen/X86/fp128-i128.ll
+++ b/llvm/test/CodeGen/X86/fp128-i128.ll
@@ -54,7 +54,7 @@ define dso_local void @TestUnionLD1(fp128 %s, i64 %n) #0 {
 ; SSE-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
 ; SSE-NEXT:    movabsq $281474976710655, %rdx # imm = 0xFFFFFFFFFFFF
 ; SSE-NEXT:    andq %rdi, %rdx
-; SSE-NEXT:    orq %rax, %rdx
+; SSE-NEXT:    addq %rax, %rdx
 ; SSE-NEXT:    movq %rcx, -{{[0-9]+}}(%rsp)
 ; SSE-NEXT:    movq %rdx, -{{[0-9]+}}(%rsp)
 ; SSE-NEXT:    movaps -{{[0-9]+}}(%rsp), %xmm0
@@ -68,7 +68,7 @@ define dso_local void @TestUnionLD1(fp128 %s, i64 %n) #0 {
 ; AVX-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
 ; AVX-NEXT:    movabsq $281474976710655, %rdx # imm = 0xFFFFFFFFFFFF
 ; AVX-NEXT:    andq %rdi, %rdx
-; AVX-NEXT:    orq %rax, %rdx
+; AVX-NEXT:    addq %rax, %rdx
 ; AVX-NEXT:    movq %rcx, -{{[0-9]+}}(%rsp)
 ; AVX-NEXT:    movq %rdx, -{{[0-9]+}}(%rsp)
 ; AVX-NEXT:    vmovaps -{{[0-9]+}}(%rsp), %xmm0
@@ -231,7 +231,7 @@ define fp128 @TestI128_3(fp128 %x, ptr nocapture readnone %ex) #0 {
 ; SSE-NEXT:    movabsq $-9223090561878065153, %rdx # imm = 0x8000FFFFFFFFFFFF
 ; SSE-NEXT:    andq {{[0-9]+}}(%rsp), %rdx
 ; SSE-NEXT:    movabsq $4611123068473966592, %rax # imm = 0x3FFE000000000000
-; SSE-NEXT:    orq %rdx, %rax
+; SSE-NEXT:    addq %rdx, %rax
 ; SSE-NEXT:  .LBB4_3: # %if.end
 ; SSE-NEXT:    movq %rcx, (%rsp)
 ; SSE-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
@@ -258,7 +258,7 @@ define fp128 @TestI128_3(fp128 %x, ptr nocapture readnone %ex) #0 {
 ; AVX-NEXT:    movabsq $-9223090561878065153, %rdx # imm = 0x8000FFFFFFFFFFFF
 ; AVX-NEXT:    andq {{[0-9]+}}(%rsp), %rdx
 ; AVX-NEXT:    movabsq $4611123068473966592, %rax # imm = 0x3FFE000000000000
-; AVX-NEXT:    orq %rdx, %rax
+; AVX-NEXT:    addq %rdx, %rax
 ; AVX-NEXT:  .LBB4_3: # %if.end
 ; AVX-NEXT:    movq %rcx, (%rsp)
 ; AVX-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
diff --git a/llvm/test/CodeGen/X86/fpenv.ll b/llvm/test/CodeGen/X86/fpenv.ll
index c79e19f07cda54..355324becfe30d 100644
--- a/llvm/test/CodeGen/X86/fpenv.ll
+++ b/llvm/test/CodeGen/X86/fpenv.ll
@@ -88,7 +88,7 @@ define void @func_03() nounwind {
 ; X86-NOSSE-NEXT:    fnstcw (%esp)
 ; X86-NOSSE-NEXT:    movl $-3073, %eax # imm = 0xF3FF
 ; X86-NOSSE-NEXT:    andl (%esp), %eax
-; X86-NOSSE-NEXT:    orl $2048, %eax # imm = 0x800
+; X86-NOSSE-NEXT:    addl $2048, %eax # imm = 0x800
 ; X86-NOSSE-NEXT:    movw %ax, (%esp)
 ; X86-NOSSE-NEXT:    fldcw (%esp)
 ; X86-NOSSE-NEXT:    popl %eax
@@ -100,13 +100,13 @@ define void @func_03() nounwind {
 ; X86-SSE-NEXT:    fnstcw (%esp)
 ; X86-SSE-NEXT:    movl $-3073, %eax # imm = 0xF3FF
 ; X86-SSE-NEXT:    andl (%esp), %eax
-; X86-SSE-NEXT:    orl $2048, %eax # imm = 0x800
+; X86-SSE-NEXT:    addl $2048, %eax # imm = 0x800
 ; X86-SSE-NEXT:    movw %ax, (%esp)
 ; X86-SSE-NEXT:    fldcw (%esp)
 ; X86-SSE-NEXT:    stmxcsr (%esp)
 ; X86-SSE-NEXT:    movl $-24577, %eax # imm = 0x9FFF
 ; X86-SSE-NEXT:    andl (%esp), %eax
-; X86-SSE-NEXT:    orl $16384, %eax # imm = 0x4000
+; X86-SSE-NEXT:    addl $16384, %eax # imm = 0x4000
 ; X86-SSE-NEXT:    movl %eax, (%esp)
 ; X86-SSE-NEXT:    ldmxcsr (%esp)
 ; X86-SSE-NEXT:    popl %eax
@@ -117,13 +117,13 @@ define void @func_03() nounwind {
 ; X64-NEXT:    fnstcw -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    movl $-3073, %eax # imm = 0xF3FF
 ; X64-NEXT:    andl -{{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    orl $2048, %eax # imm = 0x800
+; X64-NEXT:    addl $2048, %eax # imm = 0x800
 ; X64-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    fldcw -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    stmxcsr -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    movl $-24577, %eax # imm = 0x9FFF
 ; X64-NEXT:    andl -{{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    orl $16384, %eax # imm = 0x4000
+; X64-NEXT:    addl $16384, %eax # imm = 0x4000
 ; X64-NEXT:    movl %eax, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    ldmxcsr -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    retq
@@ -138,7 +138,7 @@ define void @func_04() nounwind {
 ; X86-NOSSE-NEXT:    fnstcw (%esp)
 ; X86-NOSSE-NEXT:    movl $-3073, %eax # imm = 0xF3FF
 ; X86-NOSSE-NEXT:    andl (%esp), %eax
-; X86-NOSSE-NEXT:    orl $1024, %eax # imm = 0x400
+; X86-NOSSE-NEXT:    addl $1024, %eax # imm = 0x400
 ; X86-NOSSE-NEXT:    movw %ax, (%esp)
 ; X86-NOSSE-NEXT:    fldcw (%esp)
 ; X86-NOSSE-NEXT:    popl %eax
@@ -150,13 +150,13 @@ define void @func_04() nounwind {
 ; X86-SSE-NEXT:    fnstcw (%esp)
 ; X86-SSE-NEXT:    movl $-3073, %eax # imm = 0xF3FF
 ; X86-SSE-NEXT:    andl (%esp), %eax
-; X86-SSE-NEXT:    orl $1024, %eax # imm = 0x400
+; X86-SSE-NEXT:    addl $1024, %eax # imm = 0x400
 ; X86-SSE-NEXT:    movw %ax, (%esp)
 ; X86-SSE-NEXT:    fldcw (%esp)
 ; X86-SSE-NEXT:    stmxcsr (%esp)
 ; X86-SSE-NEXT:    movl $-24577, %eax # imm = 0x9FFF
 ; X86-SSE-NEXT:    andl (%esp), %eax
-; X86-SSE-NEXT:    orl $8192, %eax # imm = 0x2000
+; X86-SSE-NEXT:    addl $8192, %eax # imm = 0x2000
 ; X86-SSE-NEXT:    movl %eax, (%esp)
 ; X86-SSE-NEXT:    ldmxcsr (%esp)
 ; X86-SSE-NEXT:    popl %eax
@@ -167,13 +167,13 @@ define void @func_04() nounwind {
 ; X64-NEXT:    fnstcw -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    movl $-3073, %eax # imm = 0xF3FF
 ; X64-NEXT:    andl -{{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    orl $1024, %eax # imm = 0x400
+; X64-NEXT:    addl $1024, %eax # imm = 0x400
 ; X64-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    fldcw -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    stmxcsr -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    movl $-24577, %eax # imm = 0x9FFF
 ; X64-NEXT:    andl -{{[0-9]+}}(%rsp), %eax
-; X64-NEXT:    orl $8192, %eax # imm = 0x2000
+; X64-NEXT:    addl $8192, %eax # imm = 0x2000
 ; X64-NEXT:    movl %eax, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    ldmxcsr -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    retq
@@ -194,7 +194,7 @@ define void @func_05(i32 %x) nounwind {
 ; X86-NOSSE-NEXT:    fnstcw (%esp)
 ; X86-NOSSE-NEXT:    movl $-3073, %ecx # imm = 0xF3FF
 ; X86-NOSSE-NEXT:    andl (%esp), %ecx
-; X86-NOSSE-NEXT:    orl %eax, %ecx
+; X86-NOSSE-NEXT:    addl %eax, %ecx
 ; X86-NOSSE-NEXT:    movw %cx, (%esp)
 ; X86-NOSSE-NEXT:    fldcw (%esp)
 ; X86-NOSSE-NEXT:    popl %eax
@@ -212,7 +212,7 @@ define void @func_05(i32 %x) nounwind {
 ; X86-SSE-NEXT:    fnstcw (%esp)
 ; X86-SSE-NEXT:    movl $-3073, %ecx # imm = 0xF3FF
 ; X86-SSE-NEXT:    andl (%esp), %ecx
-; X86-SSE-NEXT:    orl %eax, %ecx
+; X86-SSE-NEXT:    addl %eax, %ecx
 ; X86-SSE-NEXT:    movw %cx, (%esp)
 ; X86-SSE-NEXT:    fldcw (%esp)
 ; X86-SSE-NEXT:    stmxcsr (%esp)
@@ -235,7 +235,7 @@ define void @func_05(i32 %x) nounwind {
 ; X64-NEXT:    fnstcw -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    movl $-3073, %ecx # imm = 0xF3FF
 ; X64-NEXT:    andl -{{[0-9]+}}(%rsp), %ecx
-; X64-NEXT:    orl %eax, %ecx
+; X64-NEXT:    addl %eax, %ecx
 ; X64-NEXT:    movw %cx, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    fldcw -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    stmxcsr -{{[0-9]+}}(%rsp)
diff --git a/llvm/test/CodeGen/X86/fptosi-sat-vector-128.ll b/llvm/test/CodeGen/X86/fptosi-sat-vector-128.ll
index eaa1293ed2f981..2369c2eb33cfb7 100644
--- a/llvm/test/CodeGen/X86/fptosi-sat-vector-128.ll
+++ b/llvm/test/CodeGen/X86/fptosi-sat-vector-128.ll
@@ -77,7 +77,7 @@ define <4 x i8> @test_signed_v4i8_v4f32(<4 x float> %f) nounwind {
 ; CHECK-NEXT:    cvttss2si %xmm3, %ecx
 ; CHECK-NEXT:    movzbl %cl, %ecx
 ; CHECK-NEXT:    shll $8, %ecx
-; CHECK-NEXT:    orl %eax, %ecx
+; CHECK-NEXT:    addl %eax, %ecx
 ; CHECK-NEXT:    movaps %xmm0, %xmm3
 ; CHECK-NEXT:    unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1]
 ; CHECK-NEXT:    movaps %xmm1, %xmm4
@@ -87,14 +87,14 @@ define <4 x i8> @test_signed_v4i8_v4f32(<4 x float> %f) nounwind {
 ; CHECK-NEXT:    cvttss2si %xmm3, %eax
 ; CHECK-NEXT:    movzbl %al, %eax
 ; CHECK-NEXT:    shll $16, %eax
-; CHECK-NEXT:    orl %ecx, %eax
 ; CHECK-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
 ; CHECK-NEXT:    maxss %xmm0, %xmm1
 ; CHECK-NEXT:    minss %xmm1, %xmm2
-; CHECK-NEXT:    cvttss2si %xmm2, %ecx
-; CHECK-NEXT:    shll $24, %ecx
-; CHECK-NEXT:    orl %eax, %ecx
-; CHECK-NEXT:    movd %ecx, %xmm0
+; CHECK-NEXT:    cvttss2si %xmm2, %edx
+; CHECK-NEXT:    shll $24, %edx
+; CHECK-NEXT:    addl %eax, %edx
+; CHECK-NEXT:    addl %ecx, %edx
+; CHECK-NEXT:    movd %edx, %xmm0
 ; CHECK-NEXT:    retq
   %x = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f32(<4 x float> %f)
   ret <4 x i8> %x
@@ -387,7 +387,7 @@ define <2 x i8> @test_signed_v2i8_v2f64(<2 x double> %f) nounwind {
 ; CHECK-NEXT:    minsd %xmm1, %xmm3
 ; CHECK-NEXT:    cvttsd2si %xmm3, %ecx
 ; CHECK-NEXT:    shll $8, %ecx
-; CHECK-NEXT:    orl %eax, %ecx
+; CHECK-NEXT:    addl %eax, %ecx
 ; CHECK-NEXT:    movd %ecx, %xmm0
 ; CHECK-NEXT:    retq
   %x = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f64(<2 x double> %f)
@@ -700,7 +700,7 @@ define <8 x i8> @test_signed_v8i8_v8f16(<8 x half> %f) nounwind {
 ; CHECK-NEXT:    ucomiss %xmm0, %xmm0
 ; CHECK-NEXT:    cmovpl %r14d, %eax
 ; CHECK-NEXT:    movzbl %al, %r15d
-; CHECK-NEXT:    orl %r12d, %r15d
+; CHECK-NEXT:    addl %r12d, %r15d
 ; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
 ; CHECK-NEXT:    callq __extendhfsf2 at PLT
 ; CHECK-NEXT:    cvttss2si %xmm0, %eax
@@ -710,7 +710,7 @@ define <8 x i8> @test_signed_v8i8_v8f16(<8 x half> %f) nounwind {
 ; CHECK-NEXT:    cmoval %ebp, %eax
 ; CHECK-NEXT:    ucomiss %xmm0, %xmm0
 ; CHECK-NEXT:    cmovpl %r14d, %eax
-; CHECK-NEXT:    movzbl %al, %r12d
+; CHECK-NEXT:    movzbl %al, %r13d
 ; CHECK-NEXT:    movdqa (%rsp), %xmm0 # 16-byte Reload
 ; CHECK-NEXT:    psrld $16, %xmm0
 ; CHECK-NEXT:    callq __extendhfsf2 at PLT
@@ -721,9 +721,9 @@ define <8 x i8> @test_signed_v8i8_v8f16(<8 x half> %f) nounwind {
 ; CHECK-NEXT:    cmoval %ebp, %eax
 ; CHECK-NEXT:    ucomiss %xmm0, %xmm0
 ; CHECK-NEXT:    cmovpl %r14d, %eax
-; CHECK-NEXT:    movzbl %al, %r13d
-; CHECK-NEXT:    shll $8, %r13d
-; CHECK-NEXT:    orl %r12d, %r13d
+; CHECK-NEXT:    movzbl %al, %r12d
+; CHECK-NEXT:    shll $8, %r12d
+; CHECK-NEXT:    addl %r13d, %r12d
 ; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
 ; CHECK-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
 ; CHECK-NEXT:    callq __extendhfsf2 at PLT
@@ -734,9 +734,8 @@ define <8 x i8> @test_signed_v8i8_v8f16(<8 x half> %f) nounwind {
 ; CHECK-NEXT:    cmoval %ebp, %eax
 ; CHECK-NEXT:    ucomiss %xmm0, %xmm0
 ; CHECK-NEXT:    cmovpl %r14d, %eax
-; CHECK-NEXT:    movzbl %al, %r12d
-; CHECK-NEXT:    shll $16, %r12d
-; CHECK-NEXT:    orl %r13d, %r12d
+; CHECK-NEXT:    movzbl %al, %r13d
+; CHECK-NEXT:    shll $16, %r13d
 ; CHECK-NEXT:    movdqa (%rsp), %xmm0 # 16-byte Reload
 ; CHECK-NEXT:    psrlq $48, %xmm0
 ; CHECK-NEXT:    callq __extendhfsf2 at PLT
@@ -748,7 +747,8 @@ define <8 x i8> @test_signed_v8i8_v8f16(<8 x half> %f) nounwind {
 ; CHECK-NEXT:    ucomiss %xmm0, %xmm0
 ; CHECK-NEXT:    cmovpl %r14d, %eax
 ; CHECK-NEXT:    shll $24, %eax
-; CHECK-NEXT:    orl %r12d, %eax
+; CHECK-NEXT:    addl %r13d, %eax
+; CHECK-NEXT:    addl %r12d, %eax
 ; CHECK-NEXT:    movd %eax, %xmm0
 ; CHECK-NEXT:    pinsrw $2, %r15d, %xmm0
 ; CHECK-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -774,7 +774,7 @@ define <8 x i8> @test_signed_v8i8_v8f16(<8 x half> %f) nounwind {
 ; CHECK-NEXT:    ucomiss %xmm0, %xmm0
 ; CHECK-NEXT:    cmovpl %r14d, %eax
 ; CHECK-NEXT:    movzbl %al, %eax
-; CHECK-NEXT:    orl %r15d, %eax
+; CHECK-NEXT:    addl %r15d, %eax
 ; CHECK-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; CHECK-NEXT:    pinsrw $3, %eax, %xmm0
 ; CHECK-NEXT:    addq $40, %rsp
diff --git a/llvm/test/CodeGen/X86/fptoui-sat-vector-128.ll b/llvm/test/CodeGen/X86/fptoui-sat-vector-128.ll
index 4305886168abed..0f2da77c4cfce9 100644
--- a/llvm/test/CodeGen/X86/fptoui-sat-vector-128.ll
+++ b/llvm/test/CodeGen/X86/fptoui-sat-vector-128.ll
@@ -68,7 +68,7 @@ define <4 x i8> @test_unsigned_v4i8_v4f32(<4 x float> %f) nounwind {
 ; CHECK-NEXT:    cvttss2si %xmm3, %ecx
 ; CHECK-NEXT:    movzbl %cl, %ecx
 ; CHECK-NEXT:    shll $8, %ecx
-; CHECK-NEXT:    orl %eax, %ecx
+; CHECK-NEXT:    addl %eax, %ecx
 ; CHECK-NEXT:    movaps %xmm0, %xmm3
 ; CHECK-NEXT:    unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1]
 ; CHECK-NEXT:    xorps %xmm4, %xmm4
@@ -78,14 +78,14 @@ define <4 x i8> @test_unsigned_v4i8_v4f32(<4 x float> %f) nounwind {
 ; CHECK-NEXT:    cvttss2si %xmm3, %eax
 ; CHECK-NEXT:    movzbl %al, %eax
 ; CHECK-NEXT:    shll $16, %eax
-; CHECK-NEXT:    orl %ecx, %eax
 ; CHECK-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
 ; CHECK-NEXT:    maxss %xmm0, %xmm1
 ; CHECK-NEXT:    minss %xmm1, %xmm2
-; CHECK-NEXT:    cvttss2si %xmm2, %ecx
-; CHECK-NEXT:    shll $24, %ecx
-; CHECK-NEXT:    orl %eax, %ecx
-; CHECK-NEXT:    movd %ecx, %xmm0
+; CHECK-NEXT:    cvttss2si %xmm2, %edx
+; CHECK-NEXT:    shll $24, %edx
+; CHECK-NEXT:    addl %eax, %edx
+; CHECK-NEXT:    addl %ecx, %edx
+; CHECK-NEXT:    movd %edx, %xmm0
 ; CHECK-NEXT:    retq
   %x = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f32(<4 x float> %f)
   ret <4 x i8> %x
@@ -384,7 +384,7 @@ define <2 x i8> @test_unsigned_v2i8_v2f64(<2 x double> %f) nounwind {
 ; CHECK-NEXT:    minsd %xmm1, %xmm3
 ; CHECK-NEXT:    cvttsd2si %xmm3, %ecx
 ; CHECK-NEXT:    shll $8, %ecx
-; CHECK-NEXT:    orl %eax, %ecx
+; CHECK-NEXT:    addl %eax, %ecx
 ; CHECK-NEXT:    movd %ecx, %xmm0
 ; CHECK-NEXT:    retq
   %x = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f64(<2 x double> %f)
@@ -678,7 +678,7 @@ define <8 x i8> @test_unsigned_v8i8_v8f16(<8 x half> %f) nounwind {
 ; CHECK-NEXT:    ucomiss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; CHECK-NEXT:    cmoval %ebp, %eax
 ; CHECK-NEXT:    movzbl %al, %r14d
-; CHECK-NEXT:    orl %r15d, %r14d
+; CHECK-NEXT:    addl %r15d, %r14d
 ; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
 ; CHECK-NEXT:    callq __extendhfsf2 at PLT
 ; CHECK-NEXT:    cvttss2si %xmm0, %eax
@@ -686,7 +686,7 @@ define <8 x i8> @test_unsigned_v8i8_v8f16(<8 x half> %f) nounwind {
 ; CHECK-NEXT:    cmovbl %ebx, %eax
 ; CHECK-NEXT:    ucomiss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; CHECK-NEXT:    cmoval %ebp, %eax
-; CHECK-NEXT:    movzbl %al, %r15d
+; CHECK-NEXT:    movzbl %al, %r12d
 ; CHECK-NEXT:    movdqa (%rsp), %xmm0 # 16-byte Reload
 ; CHECK-NEXT:    psrld $16, %xmm0
 ; CHECK-NEXT:    callq __extendhfsf2 at PLT
@@ -695,9 +695,9 @@ define <8 x i8> @test_unsigned_v8i8_v8f16(<8 x half> %f) nounwind {
 ; CHECK-NEXT:    cmovbl %ebx, %eax
 ; CHECK-NEXT:    ucomiss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; CHECK-NEXT:    cmoval %ebp, %eax
-; CHECK-NEXT:    movzbl %al, %r12d
-; CHECK-NEXT:    shll $8, %r12d
-; CHECK-NEXT:    orl %r15d, %r12d
+; CHECK-NEXT:    movzbl %al, %r15d
+; CHECK-NEXT:    shll $8, %r15d
+; CHECK-NEXT:    addl %r12d, %r15d
 ; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
 ; CHECK-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
 ; CHECK-NEXT:    callq __extendhfsf2 at PLT
@@ -706,9 +706,8 @@ define <8 x i8> @test_unsigned_v8i8_v8f16(<8 x half> %f) nounwind {
 ; CHECK-NEXT:    cmovbl %ebx, %eax
 ; CHECK-NEXT:    ucomiss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; CHECK-NEXT:    cmoval %ebp, %eax
-; CHECK-NEXT:    movzbl %al, %r15d
-; CHECK-NEXT:    shll $16, %r15d
-; CHECK-NEXT:    orl %r12d, %r15d
+; CHECK-NEXT:    movzbl %al, %r12d
+; CHECK-NEXT:    shll $16, %r12d
 ; CHECK-NEXT:    movdqa (%rsp), %xmm0 # 16-byte Reload
 ; CHECK-NEXT:    psrlq $48, %xmm0
 ; CHECK-NEXT:    callq __extendhfsf2 at PLT
@@ -718,7 +717,8 @@ define <8 x i8> @test_unsigned_v8i8_v8f16(<8 x half> %f) nounwind {
 ; CHECK-NEXT:    ucomiss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; CHECK-NEXT:    cmoval %ebp, %eax
 ; CHECK-NEXT:    shll $24, %eax
-; CHECK-NEXT:    orl %r15d, %eax
+; CHECK-NEXT:    addl %r12d, %eax
+; CHECK-NEXT:    addl %r15d, %eax
 ; CHECK-NEXT:    movd %eax, %xmm0
 ; CHECK-NEXT:    pinsrw $2, %r14d, %xmm0
 ; CHECK-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -740,7 +740,7 @@ define <8 x i8> @test_unsigned_v8i8_v8f16(<8 x half> %f) nounwind {
 ; CHECK-NEXT:    ucomiss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; CHECK-NEXT:    cmoval %ebp, %eax
 ; CHECK-NEXT:    movzbl %al, %eax
-; CHECK-NEXT:    orl %r14d, %eax
+; CHECK-NEXT:    addl %r14d, %eax
 ; CHECK-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; CHECK-NEXT:    pinsrw $3, %eax, %xmm0
 ; CHECK-NEXT:    addq $32, %rsp
diff --git a/llvm/test/CodeGen/X86/fshl.ll b/llvm/test/CodeGen/X86/fshl.ll
index 065b396e82ec31..e130c38d3bbcb2 100644
--- a/llvm/test/CodeGen/X86/fshl.ll
+++ b/llvm/test/CodeGen/X86/fshl.ll
@@ -21,7 +21,7 @@ define i8 @var_shift_i8(i8 %x, i8 %y, i8 %z) nounwind {
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    shll $8, %eax
-; X86-NEXT:    orl %edx, %eax
+; X86-NEXT:    addl %edx, %eax
 ; X86-NEXT:    andb $7, %cl
 ; X86-NEXT:    shll %cl, %eax
 ; X86-NEXT:    movb %ah, %al
@@ -32,7 +32,7 @@ define i8 @var_shift_i8(i8 %x, i8 %y, i8 %z) nounwind {
 ; X64-NEXT:    movl %edx, %ecx
 ; X64-NEXT:    shll $8, %edi
 ; X64-NEXT:    movzbl %sil, %eax
-; X64-NEXT:    orl %edi, %eax
+; X64-NEXT:    addl %edi, %eax
 ; X64-NEXT:    andb $7, %cl
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    shll %cl, %eax
@@ -59,7 +59,7 @@ define i16 @var_shift_i16(i16 %x, i16 %y, i16 %z) nounwind {
 ; X86-SLOW-NEXT:    movzwl {{[0-9]+}}(%esp), %edx
 ; X86-SLOW-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SLOW-NEXT:    shll $16, %eax
-; X86-SLOW-NEXT:    orl %edx, %eax
+; X86-SLOW-NEXT:    addl %edx, %eax
 ; X86-SLOW-NEXT:    andb $15, %cl
 ; X86-SLOW-NEXT:    shll %cl, %eax
 ; X86-SLOW-NEXT:    shrl $16, %eax
@@ -81,7 +81,7 @@ define i16 @var_shift_i16(i16 %x, i16 %y, i16 %z) nounwind {
 ; X64-SLOW-NEXT:    movl %edx, %ecx
 ; X64-SLOW-NEXT:    shll $16, %edi
 ; X64-SLOW-NEXT:    movzwl %si, %eax
-; X64-SLOW-NEXT:    orl %edi, %eax
+; X64-SLOW-NEXT:    addl %edi, %eax
 ; X64-SLOW-NEXT:    andb $15, %cl
 ; X64-SLOW-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-SLOW-NEXT:    shll %cl, %eax
@@ -448,7 +448,7 @@ define i8 @const_shift_i8(i8 %x, i8 %y) nounwind {
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    shrb %cl
 ; X86-NEXT:    shlb $7, %al
-; X86-NEXT:    orb %cl, %al
+; X86-NEXT:    addb %cl, %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: const_shift_i8:
@@ -478,7 +478,7 @@ define i16 @const_shift_i16(i16 %x, i16 %y) nounwind {
 ; X86-SLOW-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
 ; X86-SLOW-NEXT:    shrl $9, %ecx
 ; X86-SLOW-NEXT:    shll $7, %eax
-; X86-SLOW-NEXT:    orl %ecx, %eax
+; X86-SLOW-NEXT:    addl %ecx, %eax
 ; X86-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-SLOW-NEXT:    retl
 ;
@@ -494,7 +494,7 @@ define i16 @const_shift_i16(i16 %x, i16 %y) nounwind {
 ; X64-SLOW-NEXT:    movzwl %si, %eax
 ; X64-SLOW-NEXT:    shll $7, %edi
 ; X64-SLOW-NEXT:    shrl $9, %eax
-; X64-SLOW-NEXT:    orl %edi, %eax
+; X64-SLOW-NEXT:    addl %edi, %eax
 ; X64-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-SLOW-NEXT:    retq
   %tmp = tail call i16 @llvm.fshl.i16(i16 %x, i16 %y, i16 7)
@@ -515,7 +515,7 @@ define i32 @const_shift_i32(i32 %x, i32 %y) nounwind {
 ; X86-SLOW-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SLOW-NEXT:    shrl $25, %ecx
 ; X86-SLOW-NEXT:    shll $7, %eax
-; X86-SLOW-NEXT:    orl %ecx, %eax
+; X86-SLOW-NEXT:    addl %ecx, %eax
 ; X86-SLOW-NEXT:    retl
 ;
 ; X64-FAST-LABEL: const_shift_i32:
@@ -555,10 +555,10 @@ define i64 @const_shift_i64(i64 %x, i64 %y) nounwind {
 ; X86-SLOW-NEXT:    shrl $25, %esi
 ; X86-SLOW-NEXT:    movl %ecx, %eax
 ; X86-SLOW-NEXT:    shll $7, %eax
-; X86-SLOW-NEXT:    orl %esi, %eax
+; X86-SLOW-NEXT:    addl %esi, %eax
 ; X86-SLOW-NEXT:    shrl $25, %ecx
 ; X86-SLOW-NEXT:    shll $7, %edx
-; X86-SLOW-NEXT:    orl %ecx, %edx
+; X86-SLOW-NEXT:    addl %ecx, %edx
 ; X86-SLOW-NEXT:    popl %esi
 ; X86-SLOW-NEXT:    retl
 ;
diff --git a/llvm/test/CodeGen/X86/fshr.ll b/llvm/test/CodeGen/X86/fshr.ll
index 4340f8fd484aeb..e0bf7b447e6222 100644
--- a/llvm/test/CodeGen/X86/fshr.ll
+++ b/llvm/test/CodeGen/X86/fshr.ll
@@ -21,7 +21,7 @@ define i8 @var_shift_i8(i8 %x, i8 %y, i8 %z) nounwind {
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    shll $8, %eax
-; X86-NEXT:    orl %edx, %eax
+; X86-NEXT:    addl %edx, %eax
 ; X86-NEXT:    andb $7, %cl
 ; X86-NEXT:    shrl %cl, %eax
 ; X86-NEXT:    # kill: def $al killed $al killed $eax
@@ -32,7 +32,7 @@ define i8 @var_shift_i8(i8 %x, i8 %y, i8 %z) nounwind {
 ; X64-NEXT:    movl %edx, %ecx
 ; X64-NEXT:    shll $8, %edi
 ; X64-NEXT:    movzbl %sil, %eax
-; X64-NEXT:    orl %edi, %eax
+; X64-NEXT:    addl %edi, %eax
 ; X64-NEXT:    andb $7, %cl
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    shrl %cl, %eax
@@ -58,7 +58,7 @@ define i16 @var_shift_i16(i16 %x, i16 %y, i16 %z) nounwind {
 ; X86-SLOW-NEXT:    movzwl {{[0-9]+}}(%esp), %edx
 ; X86-SLOW-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SLOW-NEXT:    shll $16, %eax
-; X86-SLOW-NEXT:    orl %edx, %eax
+; X86-SLOW-NEXT:    addl %edx, %eax
 ; X86-SLOW-NEXT:    andb $15, %cl
 ; X86-SLOW-NEXT:    shrl %cl, %eax
 ; X86-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
@@ -79,7 +79,7 @@ define i16 @var_shift_i16(i16 %x, i16 %y, i16 %z) nounwind {
 ; X64-SLOW-NEXT:    movl %edx, %ecx
 ; X64-SLOW-NEXT:    shll $16, %edi
 ; X64-SLOW-NEXT:    movzwl %si, %eax
-; X64-SLOW-NEXT:    orl %edi, %eax
+; X64-SLOW-NEXT:    addl %edi, %eax
 ; X64-SLOW-NEXT:    andb $15, %cl
 ; X64-SLOW-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-SLOW-NEXT:    shrl %cl, %eax
@@ -439,7 +439,7 @@ define i8 @const_shift_i8(i8 %x, i8 %y) nounwind {
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    shrb $7, %cl
 ; X86-NEXT:    addb %al, %al
-; X86-NEXT:    orb %cl, %al
+; X86-NEXT:    addb %cl, %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: const_shift_i8:
@@ -447,7 +447,7 @@ define i8 @const_shift_i8(i8 %x, i8 %y) nounwind {
 ; X64-NEXT:    # kill: def $edi killed $edi def $rdi
 ; X64-NEXT:    shrb $7, %sil
 ; X64-NEXT:    leal (%rdi,%rdi), %eax
-; X64-NEXT:    orb %sil, %al
+; X64-NEXT:    addb %sil, %al
 ; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
   %tmp = tail call i8 @llvm.fshr.i8(i8 %x, i8 %y, i8 7)
@@ -468,7 +468,7 @@ define i16 @const_shift_i16(i16 %x, i16 %y) nounwind {
 ; X86-SLOW-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
 ; X86-SLOW-NEXT:    shrl $7, %ecx
 ; X86-SLOW-NEXT:    shll $9, %eax
-; X86-SLOW-NEXT:    orl %ecx, %eax
+; X86-SLOW-NEXT:    addl %ecx, %eax
 ; X86-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-SLOW-NEXT:    retl
 ;
@@ -484,7 +484,7 @@ define i16 @const_shift_i16(i16 %x, i16 %y) nounwind {
 ; X64-SLOW-NEXT:    movzwl %si, %eax
 ; X64-SLOW-NEXT:    shll $9, %edi
 ; X64-SLOW-NEXT:    shrl $7, %eax
-; X64-SLOW-NEXT:    orl %edi, %eax
+; X64-SLOW-NEXT:    addl %edi, %eax
 ; X64-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-SLOW-NEXT:    retq
   %tmp = tail call i16 @llvm.fshr.i16(i16 %x, i16 %y, i16 7)
@@ -505,7 +505,7 @@ define i32 @const_shift_i32(i32 %x, i32 %y) nounwind {
 ; X86-SLOW-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SLOW-NEXT:    shrl $7, %ecx
 ; X86-SLOW-NEXT:    shll $25, %eax
-; X86-SLOW-NEXT:    orl %ecx, %eax
+; X86-SLOW-NEXT:    addl %ecx, %eax
 ; X86-SLOW-NEXT:    retl
 ;
 ; X64-FAST-LABEL: const_shift_i32:
@@ -545,10 +545,10 @@ define i64 @const_shift_i64(i64 %x, i64 %y) nounwind {
 ; X86-SLOW-NEXT:    shrl $7, %ecx
 ; X86-SLOW-NEXT:    movl %esi, %eax
 ; X86-SLOW-NEXT:    shll $25, %eax
-; X86-SLOW-NEXT:    orl %ecx, %eax
+; X86-SLOW-NEXT:    addl %ecx, %eax
 ; X86-SLOW-NEXT:    shrl $7, %esi
 ; X86-SLOW-NEXT:    shll $25, %edx
-; X86-SLOW-NEXT:    orl %esi, %edx
+; X86-SLOW-NEXT:    addl %esi, %edx
 ; X86-SLOW-NEXT:    popl %esi
 ; X86-SLOW-NEXT:    retl
 ;
diff --git a/llvm/test/CodeGen/X86/funnel-shift.ll b/llvm/test/CodeGen/X86/funnel-shift.ll
index c6f0662cadd6bf..3f96c0e8a4cb63 100644
--- a/llvm/test/CodeGen/X86/funnel-shift.ll
+++ b/llvm/test/CodeGen/X86/funnel-shift.ll
@@ -988,7 +988,7 @@ define void @PR45265(i32 %0, ptr nocapture readonly %1) nounwind {
 ; X86-SSE2-NEXT:    sarl $31, %eax
 ; X86-SSE2-NEXT:    movzbl 10(%ecx,%esi,4), %ecx
 ; X86-SSE2-NEXT:    shll $16, %ecx
-; X86-SSE2-NEXT:    orl %edx, %ecx
+; X86-SSE2-NEXT:    addl %edx, %ecx
 ; X86-SSE2-NEXT:    shll $8, %ecx
 ; X86-SSE2-NEXT:    movl %ecx, %edx
 ; X86-SSE2-NEXT:    sarl $8, %edx
@@ -1013,7 +1013,7 @@ define void @PR45265(i32 %0, ptr nocapture readonly %1) nounwind {
 ; X64-AVX2-NEXT:    movsbq 10(%rsi,%rcx,4), %rdx
 ; X64-AVX2-NEXT:    shlq $16, %rdx
 ; X64-AVX2-NEXT:    movzwl 8(%rsi,%rcx,4), %edi
-; X64-AVX2-NEXT:    orq %rdx, %rdi
+; X64-AVX2-NEXT:    addq %rdx, %rdi
 ; X64-AVX2-NEXT:    movq (%rsi,%rcx,4), %rcx
 ; X64-AVX2-NEXT:    shrdq $40, %rdi, %rcx
 ; X64-AVX2-NEXT:    cmpq %rax, %rcx
diff --git a/llvm/test/CodeGen/X86/half.ll b/llvm/test/CodeGen/X86/half.ll
index 9f01d07e6a6705..98c7bc27961668 100644
--- a/llvm/test/CodeGen/X86/half.ll
+++ b/llvm/test/CodeGen/X86/half.ll
@@ -1229,7 +1229,7 @@ define half @fcopysign(half %x, half %y) {
 ; CHECK-LIBCALL-NEXT:    andl $-32768, %eax # imm = 0x8000
 ; CHECK-LIBCALL-NEXT:    pextrw $0, %xmm0, %ecx
 ; CHECK-LIBCALL-NEXT:    andl $32767, %ecx # imm = 0x7FFF
-; CHECK-LIBCALL-NEXT:    orl %eax, %ecx
+; CHECK-LIBCALL-NEXT:    addl %eax, %ecx
 ; CHECK-LIBCALL-NEXT:    pinsrw $0, %ecx, %xmm0
 ; CHECK-LIBCALL-NEXT:    retq
 ;
@@ -1239,7 +1239,7 @@ define half @fcopysign(half %x, half %y) {
 ; BWON-F16C-NEXT:    andl $-32768, %eax # imm = 0x8000
 ; BWON-F16C-NEXT:    vpextrw $0, %xmm0, %ecx
 ; BWON-F16C-NEXT:    andl $32767, %ecx # imm = 0x7FFF
-; BWON-F16C-NEXT:    orl %eax, %ecx
+; BWON-F16C-NEXT:    addl %eax, %ecx
 ; BWON-F16C-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm0
 ; BWON-F16C-NEXT:    retq
 ;
@@ -1249,7 +1249,7 @@ define half @fcopysign(half %x, half %y) {
 ; CHECK-I686-NEXT:    andl {{[0-9]+}}(%esp), %eax
 ; CHECK-I686-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
 ; CHECK-I686-NEXT:    andl $32767, %ecx # imm = 0x7FFF
-; CHECK-I686-NEXT:    orl %eax, %ecx
+; CHECK-I686-NEXT:    addl %eax, %ecx
 ; CHECK-I686-NEXT:    pinsrw $0, %ecx, %xmm0
 ; CHECK-I686-NEXT:    retl
   %a = call half @llvm.copysign.f16(half %x, half %y)
diff --git a/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll b/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll
index 7fb07c6b3163e7..9997e66950f648 100644
--- a/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll
+++ b/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll
@@ -9,7 +9,7 @@ define void @i24_or(ptr %a) {
 ; X86-NEXT:    movzwl (%eax), %ecx
 ; X86-NEXT:    movzbl 2(%eax), %edx
 ; X86-NEXT:    shll $16, %edx
-; X86-NEXT:    orl %ecx, %edx
+; X86-NEXT:    addl %ecx, %edx
 ; X86-NEXT:    orl $384, %edx # imm = 0x180
 ; X86-NEXT:    movw %dx, (%eax)
 ; X86-NEXT:    retl
@@ -19,7 +19,7 @@ define void @i24_or(ptr %a) {
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    movzbl 2(%rdi), %ecx
 ; X64-NEXT:    shll $16, %ecx
-; X64-NEXT:    orl %eax, %ecx
+; X64-NEXT:    addl %eax, %ecx
 ; X64-NEXT:    orl $384, %ecx # imm = 0x180
 ; X64-NEXT:    movw %cx, (%rdi)
 ; X64-NEXT:    retq
@@ -36,7 +36,7 @@ define void @i24_and_or(ptr %a) {
 ; X86-NEXT:    movzwl (%eax), %ecx
 ; X86-NEXT:    movzbl 2(%eax), %edx
 ; X86-NEXT:    shll $16, %edx
-; X86-NEXT:    orl %ecx, %edx
+; X86-NEXT:    addl %ecx, %edx
 ; X86-NEXT:    orl $384, %edx # imm = 0x180
 ; X86-NEXT:    andl $-128, %edx
 ; X86-NEXT:    movw %dx, (%eax)
@@ -47,7 +47,7 @@ define void @i24_and_or(ptr %a) {
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    movzbl 2(%rdi), %ecx
 ; X64-NEXT:    shll $16, %ecx
-; X64-NEXT:    orl %eax, %ecx
+; X64-NEXT:    addl %eax, %ecx
 ; X64-NEXT:    orl $384, %ecx # imm = 0x180
 ; X64-NEXT:    andl $-128, %ecx
 ; X64-NEXT:    movw %cx, (%rdi)
@@ -70,10 +70,10 @@ define void @i24_insert_bit(ptr %a, i1 zeroext %bit) {
 ; X86-NEXT:    movzwl (%eax), %edx
 ; X86-NEXT:    movzbl 2(%eax), %esi
 ; X86-NEXT:    shll $16, %esi
-; X86-NEXT:    orl %edx, %esi
+; X86-NEXT:    addl %edx, %esi
 ; X86-NEXT:    shll $13, %ecx
 ; X86-NEXT:    andl $16769023, %esi # imm = 0xFFDFFF
-; X86-NEXT:    orl %ecx, %esi
+; X86-NEXT:    addl %ecx, %esi
 ; X86-NEXT:    movw %si, (%eax)
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 4
@@ -84,10 +84,10 @@ define void @i24_insert_bit(ptr %a, i1 zeroext %bit) {
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    movzbl 2(%rdi), %ecx
 ; X64-NEXT:    shll $16, %ecx
-; X64-NEXT:    orl %eax, %ecx
+; X64-NEXT:    addl %eax, %ecx
 ; X64-NEXT:    shll $13, %esi
 ; X64-NEXT:    andl $16769023, %ecx # imm = 0xFFDFFF
-; X64-NEXT:    orl %esi, %ecx
+; X64-NEXT:    addl %esi, %ecx
 ; X64-NEXT:    movw %cx, (%rdi)
 ; X64-NEXT:    retq
   %extbit = zext i1 %bit to i24
@@ -131,10 +131,10 @@ define void @i56_and_or(ptr %a) {
 ; X64-NEXT:    movzwl 4(%rdi), %eax
 ; X64-NEXT:    movzbl 6(%rdi), %ecx
 ; X64-NEXT:    shll $16, %ecx
-; X64-NEXT:    orl %eax, %ecx
+; X64-NEXT:    addl %eax, %ecx
 ; X64-NEXT:    shlq $32, %rcx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    orq %rcx, %rax
+; X64-NEXT:    addq %rcx, %rax
 ; X64-NEXT:    orq $384, %rax # imm = 0x180
 ; X64-NEXT:    movabsq $72057594037927808, %rcx # imm = 0xFFFFFFFFFFFF80
 ; X64-NEXT:    andq %rax, %rcx
@@ -157,7 +157,7 @@ define void @i56_insert_bit(ptr %a, i1 zeroext %bit) {
 ; X86-NEXT:    shll $13, %ecx
 ; X86-NEXT:    movl $-8193, %edx # imm = 0xDFFF
 ; X86-NEXT:    andl (%eax), %edx
-; X86-NEXT:    orl %ecx, %edx
+; X86-NEXT:    addl %ecx, %edx
 ; X86-NEXT:    movl %edx, (%eax)
 ; X86-NEXT:    retl
 ;
@@ -166,13 +166,13 @@ define void @i56_insert_bit(ptr %a, i1 zeroext %bit) {
 ; X64-NEXT:    movzwl 4(%rdi), %eax
 ; X64-NEXT:    movzbl 6(%rdi), %ecx
 ; X64-NEXT:    shll $16, %ecx
-; X64-NEXT:    orl %eax, %ecx
+; X64-NEXT:    addl %eax, %ecx
 ; X64-NEXT:    shlq $32, %rcx
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    orq %rcx, %rax
+; X64-NEXT:    addq %rcx, %rax
 ; X64-NEXT:    shll $13, %esi
 ; X64-NEXT:    andq $-8193, %rax # imm = 0xDFFF
-; X64-NEXT:    orl %eax, %esi
+; X64-NEXT:    addl %eax, %esi
 ; X64-NEXT:    shrq $32, %rax
 ; X64-NEXT:    movw %ax, 4(%rdi)
 ; X64-NEXT:    movl %esi, (%rdi)
diff --git a/llvm/test/CodeGen/X86/inline-spiller-impdef-on-implicit-def-regression.ll b/llvm/test/CodeGen/X86/inline-spiller-impdef-on-implicit-def-regression.ll
index 0250b1b4a7f861..4b3ccbaacc7407 100644
--- a/llvm/test/CodeGen/X86/inline-spiller-impdef-on-implicit-def-regression.ll
+++ b/llvm/test/CodeGen/X86/inline-spiller-impdef-on-implicit-def-regression.ll
@@ -45,7 +45,7 @@ define i32 @decode_sb(ptr %t, i32 %bl, i32 %_msprop1966, i32 %sub.i, i64 %idxpro
 ; CHECK-NEXT:    movl %r14d, %r10d
 ; CHECK-NEXT:    andl $1, %r10d
 ; CHECK-NEXT:    movabsq $17592186044416, %rax # imm = 0x100000000000
-; CHECK-NEXT:    orq %r10, %rax
+; CHECK-NEXT:    addq %r10, %rax
 ; CHECK-NEXT:    movl %esi, %r10d
 ; CHECK-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; CHECK-NEXT:    shrl %cl, %r10d
diff --git a/llvm/test/CodeGen/X86/insert.ll b/llvm/test/CodeGen/X86/insert.ll
index 381de2ecaa1646..672ac8c288ee2c 100644
--- a/llvm/test/CodeGen/X86/insert.ll
+++ b/llvm/test/CodeGen/X86/insert.ll
@@ -32,7 +32,7 @@ define i64 @sub16(i64 noundef %res, ptr %byte) {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movzwl (%eax), %eax
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: sub16:
@@ -76,7 +76,7 @@ define i32 @sub16_32(i32 noundef %res, ptr %byte) {
 ; X86-NEXT:    shll $16, %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movzwl (%eax), %eax
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: sub16_32:
diff --git a/llvm/test/CodeGen/X86/is_fpclass-fp80.ll b/llvm/test/CodeGen/X86/is_fpclass-fp80.ll
index ec2323ac2250c7..0abf44262f99b4 100644
--- a/llvm/test/CodeGen/X86/is_fpclass-fp80.ll
+++ b/llvm/test/CodeGen/X86/is_fpclass-fp80.ll
@@ -249,7 +249,7 @@ define i1 @is_inf_f80(x86_fp80 %x) {
 ; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-32-NEXT:    notl %eax
 ; CHECK-32-NEXT:    movl $-2147483648, %ecx # imm = 0x80000000
-; CHECK-32-NEXT:    xorl {{[0-9]+}}(%esp), %ecx
+; CHECK-32-NEXT:    addl {{[0-9]+}}(%esp), %ecx
 ; CHECK-32-NEXT:    andl $32767, %eax # imm = 0x7FFF
 ; CHECK-32-NEXT:    orl {{[0-9]+}}(%esp), %eax
 ; CHECK-32-NEXT:    orl %ecx, %eax
@@ -276,7 +276,7 @@ define i1 @is_posinf_f80(x86_fp80 %x) {
 ; CHECK-32:       # %bb.0: # %entry
 ; CHECK-32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; CHECK-32-NEXT:    movl $-2147483648, %ecx # imm = 0x80000000
-; CHECK-32-NEXT:    xorl {{[0-9]+}}(%esp), %ecx
+; CHECK-32-NEXT:    addl {{[0-9]+}}(%esp), %ecx
 ; CHECK-32-NEXT:    xorl $32767, %eax # imm = 0x7FFF
 ; CHECK-32-NEXT:    orl {{[0-9]+}}(%esp), %eax
 ; CHECK-32-NEXT:    orl %ecx, %eax
@@ -303,7 +303,7 @@ define i1 @is_neginf_f80(x86_fp80 %x) {
 ; CHECK-32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; CHECK-32-NEXT:    xorl $65535, %eax # imm = 0xFFFF
 ; CHECK-32-NEXT:    movl $-2147483648, %ecx # imm = 0x80000000
-; CHECK-32-NEXT:    xorl {{[0-9]+}}(%esp), %ecx
+; CHECK-32-NEXT:    addl {{[0-9]+}}(%esp), %ecx
 ; CHECK-32-NEXT:    orl {{[0-9]+}}(%esp), %eax
 ; CHECK-32-NEXT:    orl %ecx, %eax
 ; CHECK-32-NEXT:    sete %al
diff --git a/llvm/test/CodeGen/X86/is_fpclass.ll b/llvm/test/CodeGen/X86/is_fpclass.ll
index 2046d790cc57e4..e8b869652f9da9 100644
--- a/llvm/test/CodeGen/X86/is_fpclass.ll
+++ b/llvm/test/CodeGen/X86/is_fpclass.ll
@@ -1481,7 +1481,7 @@ define <4 x i1> @isnan_v4f(<4 x float> %x) {
 ; CHECK-32-NEXT:    sahf
 ; CHECK-32-NEXT:    setp %dl
 ; CHECK-32-NEXT:    shlb $3, %dl
-; CHECK-32-NEXT:    orb %dh, %dl
+; CHECK-32-NEXT:    addb %dh, %dl
 ; CHECK-32-NEXT:    fucomp %st(0)
 ; CHECK-32-NEXT:    fnstsw %ax
 ; CHECK-32-NEXT:    # kill: def $ah killed $ah killed $ax
@@ -1493,8 +1493,8 @@ define <4 x i1> @isnan_v4f(<4 x float> %x) {
 ; CHECK-32-NEXT:    sahf
 ; CHECK-32-NEXT:    setp %al
 ; CHECK-32-NEXT:    addb %al, %al
-; CHECK-32-NEXT:    orb %dh, %al
-; CHECK-32-NEXT:    orb %dl, %al
+; CHECK-32-NEXT:    addb %dh, %al
+; CHECK-32-NEXT:    addb %dl, %al
 ; CHECK-32-NEXT:    movb %al, (%ecx)
 ; CHECK-32-NEXT:    movl %ecx, %eax
 ; CHECK-32-NEXT:    retl $4
@@ -1526,7 +1526,7 @@ define <4 x i1> @isnan_v4f_strictfp(<4 x float> %x) strictfp {
 ; CHECK-32-NEXT:    cmpl $2139095041, %esi # imm = 0x7F800001
 ; CHECK-32-NEXT:    setge %dl
 ; CHECK-32-NEXT:    shlb $3, %dl
-; CHECK-32-NEXT:    orb %dh, %dl
+; CHECK-32-NEXT:    addb %dh, %dl
 ; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; CHECK-32-NEXT:    andl %ecx, %esi
 ; CHECK-32-NEXT:    cmpl $2139095041, %esi # imm = 0x7F800001
@@ -1535,8 +1535,8 @@ define <4 x i1> @isnan_v4f_strictfp(<4 x float> %x) strictfp {
 ; CHECK-32-NEXT:    cmpl $2139095041, %ecx # imm = 0x7F800001
 ; CHECK-32-NEXT:    setge %cl
 ; CHECK-32-NEXT:    addb %cl, %cl
-; CHECK-32-NEXT:    orb %dh, %cl
-; CHECK-32-NEXT:    orb %dl, %cl
+; CHECK-32-NEXT:    addb %dh, %cl
+; CHECK-32-NEXT:    addb %dl, %cl
 ; CHECK-32-NEXT:    movb %cl, (%eax)
 ; CHECK-32-NEXT:    popl %esi
 ; CHECK-32-NEXT:    .cfi_def_cfa_offset 4
diff --git a/llvm/test/CodeGen/X86/kshift.ll b/llvm/test/CodeGen/X86/kshift.ll
index f4efacc1946cff..a2d7986d091a0b 100644
--- a/llvm/test/CodeGen/X86/kshift.ll
+++ b/llvm/test/CodeGen/X86/kshift.ll
@@ -84,7 +84,7 @@ define i32 @kshiftl_v32i1_1(<32 x i16> %x, <32 x i16> %y) {
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k2}
 ; KNL-NEXT:    kmovw %k0, %eax
 ; KNL-NEXT:    shll $16, %eax
-; KNL-NEXT:    orl %ecx, %eax
+; KNL-NEXT:    addl %ecx, %eax
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
 ;
@@ -146,15 +146,15 @@ define i64 @kshiftl_v64i1_1(<64 x i8> %x, <64 x i8> %y) {
 ; KNL-NEXT:    vptestmd %zmm4, %zmm4, %k0 {%k4}
 ; KNL-NEXT:    kmovw %k0, %ecx
 ; KNL-NEXT:    shll $16, %ecx
-; KNL-NEXT:    orl %eax, %ecx
+; KNL-NEXT:    addl %eax, %ecx
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k2}
 ; KNL-NEXT:    kmovw %k0, %edx
 ; KNL-NEXT:    vptestmd %zmm3, %zmm3, %k0 {%k1}
 ; KNL-NEXT:    kmovw %k0, %eax
 ; KNL-NEXT:    shll $16, %eax
-; KNL-NEXT:    orl %edx, %eax
+; KNL-NEXT:    addl %edx, %eax
 ; KNL-NEXT:    shlq $32, %rax
-; KNL-NEXT:    orq %rcx, %rax
+; KNL-NEXT:    addq %rcx, %rax
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
 ;
@@ -381,7 +381,7 @@ define i32 @kshiftr_v32i1_1(<32 x i16> %x, <32 x i16> %y) {
 ; KNL-NEXT:    shll $16, %ecx
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k2}
 ; KNL-NEXT:    kmovw %k0, %eax
-; KNL-NEXT:    orl %ecx, %eax
+; KNL-NEXT:    addl %ecx, %eax
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
 ;
@@ -443,15 +443,15 @@ define i64 @kshiftr_v64i1_1(<64 x i8> %x, <64 x i8> %y) {
 ; KNL-NEXT:    shll $16, %eax
 ; KNL-NEXT:    vptestmd %zmm3, %zmm3, %k0 {%k4}
 ; KNL-NEXT:    kmovw %k0, %ecx
-; KNL-NEXT:    orl %eax, %ecx
+; KNL-NEXT:    addl %eax, %ecx
 ; KNL-NEXT:    shlq $32, %rcx
 ; KNL-NEXT:    vptestmd %zmm0, %zmm0, %k0 {%k2}
 ; KNL-NEXT:    kmovw %k0, %edx
 ; KNL-NEXT:    vptestmd %zmm4, %zmm4, %k0 {%k1}
 ; KNL-NEXT:    kmovw %k0, %eax
 ; KNL-NEXT:    shll $16, %eax
-; KNL-NEXT:    orl %edx, %eax
-; KNL-NEXT:    orq %rcx, %rax
+; KNL-NEXT:    addl %edx, %eax
+; KNL-NEXT:    addq %rcx, %rax
 ; KNL-NEXT:    vzeroupper
 ; KNL-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/limited-prec.ll b/llvm/test/CodeGen/X86/limited-prec.ll
index b2b722841eff3f..68a4950f852293 100644
--- a/llvm/test/CodeGen/X86/limited-prec.ll
+++ b/llvm/test/CodeGen/X86/limited-prec.ll
@@ -316,7 +316,7 @@ define float @f4(float %x) nounwind noinline {
 ; precision6-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; precision6-NEXT:    movl %eax, %ecx
 ; precision6-NEXT:    andl $8388607, %ecx # imm = 0x7FFFFF
-; precision6-NEXT:    orl $1065353216, %ecx # imm = 0x3F800000
+; precision6-NEXT:    addl $1065353216, %ecx # imm = 0x3F800000
 ; precision6-NEXT:    movl %ecx, (%esp)
 ; precision6-NEXT:    andl $2139095040, %eax # imm = 0x7F800000
 ; precision6-NEXT:    shrl $23, %eax
@@ -340,7 +340,7 @@ define float @f4(float %x) nounwind noinline {
 ; precision12-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; precision12-NEXT:    movl %eax, %ecx
 ; precision12-NEXT:    andl $8388607, %ecx # imm = 0x7FFFFF
-; precision12-NEXT:    orl $1065353216, %ecx # imm = 0x3F800000
+; precision12-NEXT:    addl $1065353216, %ecx # imm = 0x3F800000
 ; precision12-NEXT:    movl %ecx, (%esp)
 ; precision12-NEXT:    andl $2139095040, %eax # imm = 0x7F800000
 ; precision12-NEXT:    shrl $23, %eax
@@ -368,7 +368,7 @@ define float @f4(float %x) nounwind noinline {
 ; precision18-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; precision18-NEXT:    movl %eax, %ecx
 ; precision18-NEXT:    andl $8388607, %ecx # imm = 0x7FFFFF
-; precision18-NEXT:    orl $1065353216, %ecx # imm = 0x3F800000
+; precision18-NEXT:    addl $1065353216, %ecx # imm = 0x3F800000
 ; precision18-NEXT:    movl %ecx, (%esp)
 ; precision18-NEXT:    andl $2139095040, %eax # imm = 0x7F800000
 ; precision18-NEXT:    shrl $23, %eax
@@ -408,7 +408,7 @@ define float @f5(float %x) nounwind noinline {
 ; precision6-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; precision6-NEXT:    movl %eax, %ecx
 ; precision6-NEXT:    andl $8388607, %ecx # imm = 0x7FFFFF
-; precision6-NEXT:    orl $1065353216, %ecx # imm = 0x3F800000
+; precision6-NEXT:    addl $1065353216, %ecx # imm = 0x3F800000
 ; precision6-NEXT:    movl %ecx, (%esp)
 ; precision6-NEXT:    andl $2139095040, %eax # imm = 0x7F800000
 ; precision6-NEXT:    shrl $23, %eax
@@ -430,7 +430,7 @@ define float @f5(float %x) nounwind noinline {
 ; precision12-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; precision12-NEXT:    movl %eax, %ecx
 ; precision12-NEXT:    andl $8388607, %ecx # imm = 0x7FFFFF
-; precision12-NEXT:    orl $1065353216, %ecx # imm = 0x3F800000
+; precision12-NEXT:    addl $1065353216, %ecx # imm = 0x3F800000
 ; precision12-NEXT:    movl %ecx, (%esp)
 ; precision12-NEXT:    andl $2139095040, %eax # imm = 0x7F800000
 ; precision12-NEXT:    shrl $23, %eax
@@ -456,7 +456,7 @@ define float @f5(float %x) nounwind noinline {
 ; precision18-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; precision18-NEXT:    movl %eax, %ecx
 ; precision18-NEXT:    andl $8388607, %ecx # imm = 0x7FFFFF
-; precision18-NEXT:    orl $1065353216, %ecx # imm = 0x3F800000
+; precision18-NEXT:    addl $1065353216, %ecx # imm = 0x3F800000
 ; precision18-NEXT:    movl %ecx, (%esp)
 ; precision18-NEXT:    andl $2139095040, %eax # imm = 0x7F800000
 ; precision18-NEXT:    shrl $23, %eax
@@ -494,7 +494,7 @@ define float @f6(float %x) nounwind noinline {
 ; precision6-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; precision6-NEXT:    movl %eax, %ecx
 ; precision6-NEXT:    andl $8388607, %ecx # imm = 0x7FFFFF
-; precision6-NEXT:    orl $1065353216, %ecx # imm = 0x3F800000
+; precision6-NEXT:    addl $1065353216, %ecx # imm = 0x3F800000
 ; precision6-NEXT:    movl %ecx, (%esp)
 ; precision6-NEXT:    andl $2139095040, %eax # imm = 0x7F800000
 ; precision6-NEXT:    shrl $23, %eax
@@ -518,7 +518,7 @@ define float @f6(float %x) nounwind noinline {
 ; precision12-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; precision12-NEXT:    movl %eax, %ecx
 ; precision12-NEXT:    andl $8388607, %ecx # imm = 0x7FFFFF
-; precision12-NEXT:    orl $1065353216, %ecx # imm = 0x3F800000
+; precision12-NEXT:    addl $1065353216, %ecx # imm = 0x3F800000
 ; precision12-NEXT:    movl %ecx, (%esp)
 ; precision12-NEXT:    andl $2139095040, %eax # imm = 0x7F800000
 ; precision12-NEXT:    shrl $23, %eax
@@ -544,7 +544,7 @@ define float @f6(float %x) nounwind noinline {
 ; precision18-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; precision18-NEXT:    movl %eax, %ecx
 ; precision18-NEXT:    andl $8388607, %ecx # imm = 0x7FFFFF
-; precision18-NEXT:    orl $1065353216, %ecx # imm = 0x3F800000
+; precision18-NEXT:    addl $1065353216, %ecx # imm = 0x3F800000
 ; precision18-NEXT:    movl %ecx, (%esp)
 ; precision18-NEXT:    andl $2139095040, %eax # imm = 0x7F800000
 ; precision18-NEXT:    shrl $23, %eax
diff --git a/llvm/test/CodeGen/X86/llvm.frexp.ll b/llvm/test/CodeGen/X86/llvm.frexp.ll
index d6038cebd45822..b3ec7b391cf76a 100644
--- a/llvm/test/CodeGen/X86/llvm.frexp.ll
+++ b/llvm/test/CodeGen/X86/llvm.frexp.ll
@@ -27,7 +27,7 @@ define { half, i32 } @test_frexp_f16_i32(half %a) {
 ; X64-NEXT:    cmovael %eax, %edi
 ; X64-NEXT:    addl $-14, %edi
 ; X64-NEXT:    andl $-31745, %ecx # imm = 0x83FF
-; X64-NEXT:    orl $14336, %ecx # imm = 0x3800
+; X64-NEXT:    addl $14336, %ecx # imm = 0x3800
 ; X64-NEXT:    addl $-31744, %esi # imm = 0x8400
 ; X64-NEXT:    movzwl %si, %esi
 ; X64-NEXT:    xorl %eax, %eax
@@ -70,7 +70,7 @@ define { half, i32 } @test_frexp_f16_i32(half %a) {
 ; WIN32-NEXT:    addl $-25, %eax
 ; WIN32-NEXT:  LBB0_5:
 ; WIN32-NEXT:    andl $-2139095041, %edx # imm = 0x807FFFFF
-; WIN32-NEXT:    orl $1056964608, %edx # imm = 0x3F000000
+; WIN32-NEXT:    addl $1056964608, %edx # imm = 0x3F000000
 ; WIN32-NEXT:    movl %edx, {{[0-9]+}}(%esp)
 ; WIN32-NEXT:    addl $-126, %eax
 ; WIN32-NEXT:    addl $-2139095040, %ecx # imm = 0x80800000
@@ -114,7 +114,7 @@ define half @test_frexp_f16_i32_only_use_fract(half %a) {
 ; X64-NEXT:    cmpl $1024, %edx # imm = 0x400
 ; X64-NEXT:    cmovael %ecx, %eax
 ; X64-NEXT:    andl $-31745, %eax # imm = 0x83FF
-; X64-NEXT:    orl $14336, %eax # imm = 0x3800
+; X64-NEXT:    addl $14336, %eax # imm = 0x3800
 ; X64-NEXT:    addl $-31744, %edx # imm = 0x8400
 ; X64-NEXT:    movzwl %dx, %edx
 ; X64-NEXT:    cmpl $33792, %edx # imm = 0x8400
@@ -143,7 +143,7 @@ define half @test_frexp_f16_i32_only_use_fract(half %a) {
 ; WIN32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; WIN32-NEXT:  LBB1_2:
 ; WIN32-NEXT:    andl $-2139095041, %ecx # imm = 0x807FFFFF
-; WIN32-NEXT:    orl $1056964608, %ecx # imm = 0x3F000000
+; WIN32-NEXT:    addl $1056964608, %ecx # imm = 0x3F000000
 ; WIN32-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
 ; WIN32-NEXT:    addl $-2139095040, %eax # imm = 0x80800000
 ; WIN32-NEXT:    flds {{[0-9]+}}(%esp)
@@ -294,7 +294,7 @@ define { float, i32 } @test_frexp_f32_i32(float %a) {
 ; WIN32-NEXT:    addl $-25, %edx
 ; WIN32-NEXT:  LBB3_5:
 ; WIN32-NEXT:    andl $-2139095041, %eax # imm = 0x807FFFFF
-; WIN32-NEXT:    orl $1056964608, %eax # imm = 0x3F000000
+; WIN32-NEXT:    addl $1056964608, %eax # imm = 0x3F000000
 ; WIN32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
 ; WIN32-NEXT:    addl $-126, %edx
 ; WIN32-NEXT:    addl $-2139095040, %ecx # imm = 0x80800000
@@ -345,7 +345,7 @@ define float @test_frexp_f32_i32_only_use_fract(float %a) {
 ; WIN32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; WIN32-NEXT:  LBB4_2:
 ; WIN32-NEXT:    andl $-2139095041, %ecx # imm = 0x807FFFFF
-; WIN32-NEXT:    orl $1056964608, %ecx # imm = 0x3F000000
+; WIN32-NEXT:    addl $1056964608, %ecx # imm = 0x3F000000
 ; WIN32-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
 ; WIN32-NEXT:    addl $-2139095040, %eax # imm = 0x80800000
 ; WIN32-NEXT:    flds {{[0-9]+}}(%esp)
@@ -586,16 +586,16 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) {
 ; WIN32-NEXT:    addl $-25, %esi
 ; WIN32-NEXT:  LBB6_20:
 ; WIN32-NEXT:    andl $-2139095041, %eax # imm = 0x807FFFFF
-; WIN32-NEXT:    orl $1056964608, %eax # imm = 0x3F000000
+; WIN32-NEXT:    addl $1056964608, %eax # imm = 0x3F000000
 ; WIN32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
 ; WIN32-NEXT:    andl $-2139095041, %ebx # imm = 0x807FFFFF
-; WIN32-NEXT:    orl $1056964608, %ebx # imm = 0x3F000000
+; WIN32-NEXT:    addl $1056964608, %ebx # imm = 0x3F000000
 ; WIN32-NEXT:    movl %ebx, {{[0-9]+}}(%esp)
 ; WIN32-NEXT:    andl $-2139095041, %ecx # imm = 0x807FFFFF
-; WIN32-NEXT:    orl $1056964608, %ecx # imm = 0x3F000000
+; WIN32-NEXT:    addl $1056964608, %ecx # imm = 0x3F000000
 ; WIN32-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
 ; WIN32-NEXT:    andl $-2139095041, %edi # imm = 0x807FFFFF
-; WIN32-NEXT:    orl $1056964608, %edi # imm = 0x3F000000
+; WIN32-NEXT:    addl $1056964608, %edi # imm = 0x3F000000
 ; WIN32-NEXT:    movl %edi, {{[0-9]+}}(%esp)
 ; WIN32-NEXT:    addl $-126, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; WIN32-NEXT:    addl $-126, (%esp) # 4-byte Folded Spill
@@ -753,7 +753,7 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) {
 ; WIN32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; WIN32-NEXT:  LBB7_2:
 ; WIN32-NEXT:    andl $-2139095041, %ecx # imm = 0x807FFFFF
-; WIN32-NEXT:    orl $1056964608, %ecx # imm = 0x3F000000
+; WIN32-NEXT:    addl $1056964608, %ecx # imm = 0x3F000000
 ; WIN32-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
 ; WIN32-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; WIN32-NEXT:    movl %edx, %ecx
@@ -764,7 +764,7 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) {
 ; WIN32-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; WIN32-NEXT:  LBB7_4:
 ; WIN32-NEXT:    andl $-2139095041, %edx # imm = 0x807FFFFF
-; WIN32-NEXT:    orl $1056964608, %edx # imm = 0x3F000000
+; WIN32-NEXT:    addl $1056964608, %edx # imm = 0x3F000000
 ; WIN32-NEXT:    movl %edx, {{[0-9]+}}(%esp)
 ; WIN32-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; WIN32-NEXT:    movl %esi, %edx
@@ -775,7 +775,7 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) {
 ; WIN32-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; WIN32-NEXT:  LBB7_6:
 ; WIN32-NEXT:    andl $-2139095041, %esi # imm = 0x807FFFFF
-; WIN32-NEXT:    orl $1056964608, %esi # imm = 0x3F000000
+; WIN32-NEXT:    addl $1056964608, %esi # imm = 0x3F000000
 ; WIN32-NEXT:    movl %esi, {{[0-9]+}}(%esp)
 ; WIN32-NEXT:    movl (%esp), %edi
 ; WIN32-NEXT:    movl %edi, %esi
@@ -786,7 +786,7 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) {
 ; WIN32-NEXT:    movl {{[0-9]+}}(%esp), %edi
 ; WIN32-NEXT:  LBB7_8:
 ; WIN32-NEXT:    andl $-2139095041, %edi # imm = 0x807FFFFF
-; WIN32-NEXT:    orl $1056964608, %edi # imm = 0x3F000000
+; WIN32-NEXT:    addl $1056964608, %edi # imm = 0x3F000000
 ; WIN32-NEXT:    movl %edi, {{[0-9]+}}(%esp)
 ; WIN32-NEXT:    addl $-2139095040, %eax # imm = 0x80800000
 ; WIN32-NEXT:    flds {{[0-9]+}}(%esp)
diff --git a/llvm/test/CodeGen/X86/load-chain.ll b/llvm/test/CodeGen/X86/load-chain.ll
index fe6f91932b9856..2b9f2e37f016bb 100644
--- a/llvm/test/CodeGen/X86/load-chain.ll
+++ b/llvm/test/CodeGen/X86/load-chain.ll
@@ -14,7 +14,7 @@ define void @translate(ptr %ptr) nounwind {
 ; CHECK-NEXT:    movl $-32707, %ebp # imm = 0x803D
 ; CHECK-NEXT:    andl (%rdi), %ebp
 ; CHECK-NEXT:    callq maybe_mutate at PLT
-; CHECK-NEXT:    orl $514, %ebp # imm = 0x202
+; CHECK-NEXT:    addl $514, %ebp # imm = 0x202
 ; CHECK-NEXT:    movw %bp, (%rbx)
 ; CHECK-NEXT:    addq $8, %rsp
 ; CHECK-NEXT:    popq %rbx
diff --git a/llvm/test/CodeGen/X86/load-combine.ll b/llvm/test/CodeGen/X86/load-combine.ll
index b5f3e789918813..f84e205f2a75c6 100644
--- a/llvm/test/CodeGen/X86/load-combine.ll
+++ b/llvm/test/CodeGen/X86/load-combine.ll
@@ -359,12 +359,12 @@ define i32 @load_i32_by_i8_bswap_uses(ptr %arg) {
 ; CHECK-NEXT:    movzbl 1(%eax), %edx
 ; CHECK-NEXT:    movl %edx, %esi
 ; CHECK-NEXT:    shll $16, %esi
-; CHECK-NEXT:    orl %ecx, %esi
+; CHECK-NEXT:    addl %ecx, %esi
 ; CHECK-NEXT:    movzbl 2(%eax), %ecx
 ; CHECK-NEXT:    shll $8, %ecx
-; CHECK-NEXT:    orl %esi, %ecx
 ; CHECK-NEXT:    movzbl 3(%eax), %eax
-; CHECK-NEXT:    orl %ecx, %eax
+; CHECK-NEXT:    addl %ecx, %eax
+; CHECK-NEXT:    addl %esi, %eax
 ; CHECK-NEXT:    orl %edx, %eax
 ; CHECK-NEXT:    popl %esi
 ; CHECK-NEXT:    .cfi_def_cfa_offset 4
@@ -377,12 +377,12 @@ define i32 @load_i32_by_i8_bswap_uses(ptr %arg) {
 ; CHECK64-NEXT:    movzbl 1(%rdi), %ecx
 ; CHECK64-NEXT:    movl %ecx, %edx
 ; CHECK64-NEXT:    shll $16, %edx
-; CHECK64-NEXT:    orl %eax, %edx
+; CHECK64-NEXT:    addl %eax, %edx
 ; CHECK64-NEXT:    movzbl 2(%rdi), %esi
 ; CHECK64-NEXT:    shll $8, %esi
-; CHECK64-NEXT:    orl %edx, %esi
 ; CHECK64-NEXT:    movzbl 3(%rdi), %eax
-; CHECK64-NEXT:    orl %esi, %eax
+; CHECK64-NEXT:    addl %esi, %eax
+; CHECK64-NEXT:    addl %edx, %eax
 ; CHECK64-NEXT:    orl %ecx, %eax
 ; CHECK64-NEXT:    retq
   %tmp1 = load i8, ptr %arg, align 1
@@ -419,12 +419,12 @@ define i32 @load_i32_by_i8_bswap_volatile(ptr %arg) {
 ; CHECK-NEXT:    shll $24, %ecx
 ; CHECK-NEXT:    movzbl 1(%eax), %edx
 ; CHECK-NEXT:    shll $16, %edx
-; CHECK-NEXT:    orl %ecx, %edx
+; CHECK-NEXT:    addl %ecx, %edx
 ; CHECK-NEXT:    movzbl 2(%eax), %ecx
 ; CHECK-NEXT:    shll $8, %ecx
-; CHECK-NEXT:    orl %edx, %ecx
 ; CHECK-NEXT:    movzbl 3(%eax), %eax
-; CHECK-NEXT:    orl %ecx, %eax
+; CHECK-NEXT:    addl %ecx, %eax
+; CHECK-NEXT:    addl %edx, %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_bswap_volatile:
@@ -433,12 +433,12 @@ define i32 @load_i32_by_i8_bswap_volatile(ptr %arg) {
 ; CHECK64-NEXT:    shll $24, %eax
 ; CHECK64-NEXT:    movzbl 1(%rdi), %ecx
 ; CHECK64-NEXT:    shll $16, %ecx
-; CHECK64-NEXT:    orl %eax, %ecx
+; CHECK64-NEXT:    addl %eax, %ecx
 ; CHECK64-NEXT:    movzbl 2(%rdi), %edx
 ; CHECK64-NEXT:    shll $8, %edx
-; CHECK64-NEXT:    orl %ecx, %edx
 ; CHECK64-NEXT:    movzbl 3(%rdi), %eax
-; CHECK64-NEXT:    orl %edx, %eax
+; CHECK64-NEXT:    addl %edx, %eax
+; CHECK64-NEXT:    addl %ecx, %eax
 ; CHECK64-NEXT:    retq
   %tmp1 = load volatile i8, ptr %arg, align 1
   %tmp2 = zext i8 %tmp1 to i32
@@ -472,19 +472,19 @@ define i32 @load_i32_by_i8_bswap_store_in_between(ptr %arg, ptr %arg1) {
 ; CHECK-NEXT:    pushl %esi
 ; CHECK-NEXT:    .cfi_def_cfa_offset 8
 ; CHECK-NEXT:    .cfi_offset %esi, -8
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    movzbl (%eax), %edx
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT:    movzbl (%ecx), %edx
 ; CHECK-NEXT:    shll $24, %edx
-; CHECK-NEXT:    movzbl 1(%eax), %esi
-; CHECK-NEXT:    movl $0, (%ecx)
+; CHECK-NEXT:    movzbl 1(%ecx), %esi
+; CHECK-NEXT:    movl $0, (%eax)
 ; CHECK-NEXT:    shll $16, %esi
-; CHECK-NEXT:    orl %edx, %esi
-; CHECK-NEXT:    movzbl 2(%eax), %ecx
-; CHECK-NEXT:    shll $8, %ecx
-; CHECK-NEXT:    orl %esi, %ecx
-; CHECK-NEXT:    movzbl 3(%eax), %eax
-; CHECK-NEXT:    orl %ecx, %eax
+; CHECK-NEXT:    addl %edx, %esi
+; CHECK-NEXT:    movzbl 2(%ecx), %edx
+; CHECK-NEXT:    shll $8, %edx
+; CHECK-NEXT:    movzbl 3(%ecx), %eax
+; CHECK-NEXT:    addl %edx, %eax
+; CHECK-NEXT:    addl %esi, %eax
 ; CHECK-NEXT:    popl %esi
 ; CHECK-NEXT:    .cfi_def_cfa_offset 4
 ; CHECK-NEXT:    retl
@@ -496,12 +496,12 @@ define i32 @load_i32_by_i8_bswap_store_in_between(ptr %arg, ptr %arg1) {
 ; CHECK64-NEXT:    movzbl 1(%rdi), %ecx
 ; CHECK64-NEXT:    movl $0, (%rsi)
 ; CHECK64-NEXT:    shll $16, %ecx
-; CHECK64-NEXT:    orl %eax, %ecx
+; CHECK64-NEXT:    addl %eax, %ecx
 ; CHECK64-NEXT:    movzbl 2(%rdi), %edx
 ; CHECK64-NEXT:    shll $8, %edx
-; CHECK64-NEXT:    orl %ecx, %edx
 ; CHECK64-NEXT:    movzbl 3(%rdi), %eax
-; CHECK64-NEXT:    orl %edx, %eax
+; CHECK64-NEXT:    addl %edx, %eax
+; CHECK64-NEXT:    addl %ecx, %eax
 ; CHECK64-NEXT:    retq
   %tmp2 = load i8, ptr %arg, align 1
   %tmp3 = zext i8 %tmp2 to i32
@@ -531,18 +531,23 @@ define i32 @load_i32_by_i8_bswap_store_in_between(ptr %arg, ptr %arg1) {
 define i32 @load_i32_by_i8_bswap_unrelated_load(ptr %arg, ptr %arg1) {
 ; CHECK-LABEL: load_i32_by_i8_bswap_unrelated_load:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushl %esi
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    .cfi_offset %esi, -8
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; CHECK-NEXT:    movzbl (%ecx), %edx
 ; CHECK-NEXT:    shll $24, %edx
-; CHECK-NEXT:    movzbl 1(%eax), %eax
-; CHECK-NEXT:    shll $16, %eax
-; CHECK-NEXT:    orl %edx, %eax
+; CHECK-NEXT:    movzbl 1(%eax), %esi
+; CHECK-NEXT:    shll $16, %esi
+; CHECK-NEXT:    addl %edx, %esi
 ; CHECK-NEXT:    movzbl 2(%ecx), %edx
 ; CHECK-NEXT:    shll $8, %edx
-; CHECK-NEXT:    orl %eax, %edx
 ; CHECK-NEXT:    movzbl 3(%ecx), %eax
-; CHECK-NEXT:    orl %edx, %eax
+; CHECK-NEXT:    addl %edx, %eax
+; CHECK-NEXT:    addl %esi, %eax
+; CHECK-NEXT:    popl %esi
+; CHECK-NEXT:    .cfi_def_cfa_offset 4
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_bswap_unrelated_load:
@@ -551,12 +556,12 @@ define i32 @load_i32_by_i8_bswap_unrelated_load(ptr %arg, ptr %arg1) {
 ; CHECK64-NEXT:    shll $24, %eax
 ; CHECK64-NEXT:    movzbl 1(%rsi), %ecx
 ; CHECK64-NEXT:    shll $16, %ecx
-; CHECK64-NEXT:    orl %eax, %ecx
+; CHECK64-NEXT:    addl %eax, %ecx
 ; CHECK64-NEXT:    movzbl 2(%rdi), %edx
 ; CHECK64-NEXT:    shll $8, %edx
-; CHECK64-NEXT:    orl %ecx, %edx
 ; CHECK64-NEXT:    movzbl 3(%rdi), %eax
-; CHECK64-NEXT:    orl %edx, %eax
+; CHECK64-NEXT:    addl %edx, %eax
+; CHECK64-NEXT:    addl %ecx, %eax
 ; CHECK64-NEXT:    retq
   %tmp3 = load i8, ptr %arg, align 1
   %tmp4 = zext i8 %tmp3 to i32
@@ -1125,7 +1130,7 @@ define i32 @zext_load_i32_by_i8_shl_8(ptr %arg) {
 ; CHECK-NEXT:    shll $8, %ecx
 ; CHECK-NEXT:    movzbl 1(%eax), %eax
 ; CHECK-NEXT:    shll $16, %eax
-; CHECK-NEXT:    orl %ecx, %eax
+; CHECK-NEXT:    addl %ecx, %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: zext_load_i32_by_i8_shl_8:
@@ -1134,7 +1139,7 @@ define i32 @zext_load_i32_by_i8_shl_8(ptr %arg) {
 ; CHECK64-NEXT:    shll $8, %ecx
 ; CHECK64-NEXT:    movzbl 1(%rdi), %eax
 ; CHECK64-NEXT:    shll $16, %eax
-; CHECK64-NEXT:    orl %ecx, %eax
+; CHECK64-NEXT:    addl %ecx, %eax
 ; CHECK64-NEXT:    retq
   %tmp2 = load i8, ptr %arg, align 1
   %tmp3 = zext i8 %tmp2 to i32
@@ -1157,7 +1162,7 @@ define i32 @zext_load_i32_by_i8_shl_16(ptr %arg) {
 ; CHECK-NEXT:    shll $16, %ecx
 ; CHECK-NEXT:    movzbl 1(%eax), %eax
 ; CHECK-NEXT:    shll $24, %eax
-; CHECK-NEXT:    orl %ecx, %eax
+; CHECK-NEXT:    addl %ecx, %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: zext_load_i32_by_i8_shl_16:
@@ -1166,7 +1171,7 @@ define i32 @zext_load_i32_by_i8_shl_16(ptr %arg) {
 ; CHECK64-NEXT:    shll $16, %ecx
 ; CHECK64-NEXT:    movzbl 1(%rdi), %eax
 ; CHECK64-NEXT:    shll $24, %eax
-; CHECK64-NEXT:    orl %ecx, %eax
+; CHECK64-NEXT:    addl %ecx, %eax
 ; CHECK64-NEXT:    retq
   %tmp2 = load i8, ptr %arg, align 1
   %tmp3 = zext i8 %tmp2 to i32
@@ -1229,7 +1234,7 @@ define i32 @zext_load_i32_by_i8_bswap_shl_8(ptr %arg) {
 ; CHECK-NEXT:    shll $8, %ecx
 ; CHECK-NEXT:    movzbl (%eax), %eax
 ; CHECK-NEXT:    shll $16, %eax
-; CHECK-NEXT:    orl %ecx, %eax
+; CHECK-NEXT:    addl %ecx, %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: zext_load_i32_by_i8_bswap_shl_8:
@@ -1238,7 +1243,7 @@ define i32 @zext_load_i32_by_i8_bswap_shl_8(ptr %arg) {
 ; CHECK64-NEXT:    shll $8, %ecx
 ; CHECK64-NEXT:    movzbl (%rdi), %eax
 ; CHECK64-NEXT:    shll $16, %eax
-; CHECK64-NEXT:    orl %ecx, %eax
+; CHECK64-NEXT:    addl %ecx, %eax
 ; CHECK64-NEXT:    retq
   %tmp1 = getelementptr inbounds i8, ptr %arg, i32 1
   %tmp2 = load i8, ptr %tmp1, align 1
@@ -1261,7 +1266,7 @@ define i32 @zext_load_i32_by_i8_bswap_shl_16(ptr %arg) {
 ; CHECK-NEXT:    shll $16, %ecx
 ; CHECK-NEXT:    movzbl (%eax), %eax
 ; CHECK-NEXT:    shll $24, %eax
-; CHECK-NEXT:    orl %ecx, %eax
+; CHECK-NEXT:    addl %ecx, %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: zext_load_i32_by_i8_bswap_shl_16:
@@ -1270,7 +1275,7 @@ define i32 @zext_load_i32_by_i8_bswap_shl_16(ptr %arg) {
 ; CHECK64-NEXT:    shll $16, %ecx
 ; CHECK64-NEXT:    movzbl (%rdi), %eax
 ; CHECK64-NEXT:    shll $24, %eax
-; CHECK64-NEXT:    orl %ecx, %eax
+; CHECK64-NEXT:    addl %ecx, %eax
 ; CHECK64-NEXT:    retq
   %tmp1 = getelementptr inbounds i8, ptr %arg, i32 1
   %tmp2 = load i8, ptr %tmp1, align 1
diff --git a/llvm/test/CodeGen/X86/load-local-v3i1.ll b/llvm/test/CodeGen/X86/load-local-v3i1.ll
index 52e0eb826d1434..af3ce8c10acfe0 100644
--- a/llvm/test/CodeGen/X86/load-local-v3i1.ll
+++ b/llvm/test/CodeGen/X86/load-local-v3i1.ll
@@ -18,10 +18,10 @@ define  <3 x i32> @masked_load_v3(ptr addrspace(1), <3 x i1>) {
 ; CHECK-NEXT:    andb $1, %sil
 ; CHECK-NEXT:    andb $1, %dl
 ; CHECK-NEXT:    addb %dl, %dl
-; CHECK-NEXT:    orb %sil, %dl
+; CHECK-NEXT:    addb %sil, %dl
 ; CHECK-NEXT:    andb $1, %cl
 ; CHECK-NEXT:    shlb $2, %cl
-; CHECK-NEXT:    orb %dl, %cl
+; CHECK-NEXT:    addb %dl, %cl
 ; CHECK-NEXT:    testb $1, %cl
 ; CHECK-NEXT:    # implicit-def: $xmm0
 ; CHECK-NEXT:    jne .LBB0_1
@@ -55,10 +55,10 @@ define void @masked_store4_v3(<3 x i32>, ptr addrspace(1), <3 x i1>) {
 ; CHECK-NEXT:    andb $1, %sil
 ; CHECK-NEXT:    andb $1, %dl
 ; CHECK-NEXT:    addb %dl, %dl
-; CHECK-NEXT:    orb %sil, %dl
+; CHECK-NEXT:    addb %sil, %dl
 ; CHECK-NEXT:    andb $1, %cl
 ; CHECK-NEXT:    shlb $2, %cl
-; CHECK-NEXT:    orb %dl, %cl
+; CHECK-NEXT:    addb %dl, %cl
 ; CHECK-NEXT:    testb $1, %cl
 ; CHECK-NEXT:    jne .LBB1_1
 ; CHECK-NEXT:  # %bb.2: # %else
diff --git a/llvm/test/CodeGen/X86/load-local-v3i129.ll b/llvm/test/CodeGen/X86/load-local-v3i129.ll
index 8fa7ce06645374..eb5d172a3b3527 100644
--- a/llvm/test/CodeGen/X86/load-local-v3i129.ll
+++ b/llvm/test/CodeGen/X86/load-local-v3i129.ll
@@ -12,7 +12,7 @@ define void @_start() nounwind {
 ; FAST-SHLD-NEXT:    shrq $2, %rcx
 ; FAST-SHLD-NEXT:    shldq $2, %rdx, %rcx
 ; FAST-SHLD-NEXT:    andq $-4, %rax
-; FAST-SHLD-NEXT:    orq $1, %rax
+; FAST-SHLD-NEXT:    incq %rax
 ; FAST-SHLD-NEXT:    movq %rax, -40(%rsp)
 ; FAST-SHLD-NEXT:    movq %rcx, -32(%rsp)
 ; FAST-SHLD-NEXT:    orq $-2, -56(%rsp)
@@ -23,7 +23,7 @@ define void @_start() nounwind {
 ; SLOW-SHLD:       # %bb.0: # %Entry
 ; SLOW-SHLD-NEXT:    movq -40(%rsp), %rax
 ; SLOW-SHLD-NEXT:    andq $-4, %rax
-; SLOW-SHLD-NEXT:    orq $1, %rax
+; SLOW-SHLD-NEXT:    incq %rax
 ; SLOW-SHLD-NEXT:    movq %rax, -40(%rsp)
 ; SLOW-SHLD-NEXT:    orq $-2, -56(%rsp)
 ; SLOW-SHLD-NEXT:    movq $-1, -48(%rsp)
diff --git a/llvm/test/CodeGen/X86/load-local-v4i5.ll b/llvm/test/CodeGen/X86/load-local-v4i5.ll
index 1d119b1dfefc28..2b4e9bd5410e03 100644
--- a/llvm/test/CodeGen/X86/load-local-v4i5.ll
+++ b/llvm/test/CodeGen/X86/load-local-v4i5.ll
@@ -17,13 +17,13 @@ define void @_start() {
 ; CHECK-NEXT:    andl $31, %eax
 ; CHECK-NEXT:    andl $31, %esi
 ; CHECK-NEXT:    shll $5, %esi
-; CHECK-NEXT:    orl %eax, %esi
+; CHECK-NEXT:    addl %eax, %esi
 ; CHECK-NEXT:    andl $31, %edx
 ; CHECK-NEXT:    shll $10, %edx
-; CHECK-NEXT:    orl %esi, %edx
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    shll $15, %eax
-; CHECK-NEXT:    orl %edx, %eax
+; CHECK-NEXT:    addl %edx, %eax
+; CHECK-NEXT:    addl %esi, %eax
 ; CHECK-NEXT:    movw %ax, -4(%rsp)
 ; CHECK-NEXT:    movb %dil, -5(%rsp)
 ; CHECK-NEXT:    cmpb $31, %dil
diff --git a/llvm/test/CodeGen/X86/logic-shift.ll b/llvm/test/CodeGen/X86/logic-shift.ll
index 96e63d1122ec92..6252bb97bceee0 100644
--- a/llvm/test/CodeGen/X86/logic-shift.ll
+++ b/llvm/test/CodeGen/X86/logic-shift.ll
@@ -905,7 +905,7 @@ define i8 @or_fshl_commute3(i8 %x, i8 %y) {
 ; CHECK-NEXT:    orl %edi, %esi
 ; CHECK-NEXT:    shlb $5, %sil
 ; CHECK-NEXT:    shrb $3, %al
-; CHECK-NEXT:    orb %sil, %al
+; CHECK-NEXT:    addb %sil, %al
 ; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    retq
   %or1 = or i8 %y, %x
@@ -983,7 +983,7 @@ define i8 @or_fshr_commute3(i8 %x, i8 %y) {
 ; CHECK-NEXT:    orl %edi, %esi
 ; CHECK-NEXT:    shrb $6, %sil
 ; CHECK-NEXT:    leal (,%rdi,4), %eax
-; CHECK-NEXT:    orb %sil, %al
+; CHECK-NEXT:    addb %sil, %al
 ; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    retq
   %or1 = or i8 %y, %x
diff --git a/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll b/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
index 14dfc046c029a6..99a364b8e3e188 100644
--- a/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
+++ b/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
@@ -65,7 +65,7 @@ define void @t(ptr nocapture %in, ptr nocapture %out, ptr nocapture %rk, i32 %r)
 ; GENERIC-NEXT:    andl $1020, %r9d ## imm = 0x3FC
 ; GENERIC-NEXT:    movzbl 2(%rax,%r9), %r9d
 ; GENERIC-NEXT:    shll $16, %r9d
-; GENERIC-NEXT:    orl %r8d, %r9d
+; GENERIC-NEXT:    addl %r8d, %r9d
 ; GENERIC-NEXT:    xorl 16(%rcx,%rdx), %r9d
 ; GENERIC-NEXT:    shrl $8, %r14d
 ; GENERIC-NEXT:    movzbl 3(%rdi,%r14,4), %edi
@@ -73,7 +73,7 @@ define void @t(ptr nocapture %in, ptr nocapture %out, ptr nocapture %rk, i32 %r)
 ; GENERIC-NEXT:    movzbl %bpl, %r8d
 ; GENERIC-NEXT:    movzbl 2(%rax,%r8,4), %eax
 ; GENERIC-NEXT:    shll $16, %eax
-; GENERIC-NEXT:    orl %edi, %eax
+; GENERIC-NEXT:    addl %edi, %eax
 ; GENERIC-NEXT:    xorl 20(%rcx,%rdx), %eax
 ; GENERIC-NEXT:    movl %r9d, %ecx
 ; GENERIC-NEXT:    shrl $24, %ecx
@@ -151,12 +151,12 @@ define void @t(ptr nocapture %in, ptr nocapture %out, ptr nocapture %rk, i32 %r)
 ; ATOM-NEXT:    shll $24, %edi
 ; ATOM-NEXT:    movzbl 2(%rax,%r9), %r9d
 ; ATOM-NEXT:    shll $16, %r9d
-; ATOM-NEXT:    orl %r8d, %r9d
+; ATOM-NEXT:    addl %r8d, %r9d
 ; ATOM-NEXT:    movzbl %bl, %r8d
 ; ATOM-NEXT:    movzbl 2(%rax,%r8,4), %eax
 ; ATOM-NEXT:    xorl 16(%rcx,%rdx), %r9d
 ; ATOM-NEXT:    shll $16, %eax
-; ATOM-NEXT:    orl %edi, %eax
+; ATOM-NEXT:    addl %edi, %eax
 ; ATOM-NEXT:    movl %r9d, %edi
 ; ATOM-NEXT:    shrl $16, %r9d
 ; ATOM-NEXT:    xorl 20(%rcx,%rdx), %eax
diff --git a/llvm/test/CodeGen/X86/madd.ll b/llvm/test/CodeGen/X86/madd.ll
index df4155845f37bf..8b35e389d59be5 100644
--- a/llvm/test/CodeGen/X86/madd.ll
+++ b/llvm/test/CodeGen/X86/madd.ll
@@ -2772,7 +2772,7 @@ define i64 @sum_and_sum_of_squares(ptr %a, i32 %n) {
 ; SSE2-NEXT:    paddd %xmm0, %xmm1
 ; SSE2-NEXT:    movd %xmm1, %eax
 ; SSE2-NEXT:    shlq $32, %rcx
-; SSE2-NEXT:    orq %rcx, %rax
+; SSE2-NEXT:    addq %rcx, %rax
 ; SSE2-NEXT:    retq
 ;
 ; AVX1-LABEL: sum_and_sum_of_squares:
@@ -2814,7 +2814,7 @@ define i64 @sum_and_sum_of_squares(ptr %a, i32 %n) {
 ; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
 ; AVX1-NEXT:    shlq $32, %rcx
-; AVX1-NEXT:    orq %rcx, %rax
+; AVX1-NEXT:    addq %rcx, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -2849,7 +2849,7 @@ define i64 @sum_and_sum_of_squares(ptr %a, i32 %n) {
 ; AVX256-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX256-NEXT:    vmovd %xmm0, %eax
 ; AVX256-NEXT:    shlq $32, %rcx
-; AVX256-NEXT:    orq %rcx, %rax
+; AVX256-NEXT:    addq %rcx, %rax
 ; AVX256-NEXT:    vzeroupper
 ; AVX256-NEXT:    retq
 entry:
diff --git a/llvm/test/CodeGen/X86/masked_compressstore.ll b/llvm/test/CodeGen/X86/masked_compressstore.ll
index 3187bf6448690e..9bf200ad7e0493 100644
--- a/llvm/test/CodeGen/X86/masked_compressstore.ll
+++ b/llvm/test/CodeGen/X86/masked_compressstore.ll
@@ -1261,7 +1261,7 @@ define void @compressstore_v32f32_v32i32(ptr %base, <32 x float> %V, <32 x i32>
 ; SSE2-NEXT:    packsswb %xmm10, %xmm8
 ; SSE2-NEXT:    pmovmskb %xmm8, %eax
 ; SSE2-NEXT:    shll $16, %eax
-; SSE2-NEXT:    orl %ecx, %eax
+; SSE2-NEXT:    addl %ecx, %eax
 ; SSE2-NEXT:    testb $1, %al
 ; SSE2-NEXT:    jne LBB6_1
 ; SSE2-NEXT:  ## %bb.2: ## %else
@@ -1585,7 +1585,7 @@ define void @compressstore_v32f32_v32i32(ptr %base, <32 x float> %V, <32 x i32>
 ; SSE42-NEXT:    packsswb %xmm10, %xmm8
 ; SSE42-NEXT:    pmovmskb %xmm8, %eax
 ; SSE42-NEXT:    shll $16, %eax
-; SSE42-NEXT:    orl %ecx, %eax
+; SSE42-NEXT:    addl %ecx, %eax
 ; SSE42-NEXT:    testb $1, %al
 ; SSE42-NEXT:    jne LBB6_1
 ; SSE42-NEXT:  ## %bb.2: ## %else
@@ -1866,7 +1866,7 @@ define void @compressstore_v32f32_v32i32(ptr %base, <32 x float> %V, <32 x i32>
 ; AVX1-NEXT:    vpacksswb %xmm4, %xmm5, %xmm4
 ; AVX1-NEXT:    vpmovmskb %xmm4, %eax
 ; AVX1-NEXT:    shll $16, %eax
-; AVX1-NEXT:    orl %ecx, %eax
+; AVX1-NEXT:    addl %ecx, %eax
 ; AVX1-NEXT:    testb $1, %al
 ; AVX1-NEXT:    jne LBB6_1
 ; AVX1-NEXT:  ## %bb.2: ## %else
diff --git a/llvm/test/CodeGen/X86/masked_expandload.ll b/llvm/test/CodeGen/X86/masked_expandload.ll
index 4c5b67962a58bd..27d3564bfac080 100644
--- a/llvm/test/CodeGen/X86/masked_expandload.ll
+++ b/llvm/test/CodeGen/X86/masked_expandload.ll
@@ -1357,7 +1357,7 @@ define <32 x float> @expandload_v32f32_v32i32(ptr %base, <32 x float> %src0, <32
 ; SSE2-NEXT:    packsswb %xmm10, %xmm8
 ; SSE2-NEXT:    pmovmskb %xmm8, %ecx
 ; SSE2-NEXT:    shll $16, %ecx
-; SSE2-NEXT:    orl %edx, %ecx
+; SSE2-NEXT:    addl %edx, %ecx
 ; SSE2-NEXT:    testb $1, %cl
 ; SSE2-NEXT:    jne LBB8_1
 ; SSE2-NEXT:  ## %bb.2: ## %else
@@ -1714,7 +1714,7 @@ define <32 x float> @expandload_v32f32_v32i32(ptr %base, <32 x float> %src0, <32
 ; SSE42-NEXT:    packsswb %xmm10, %xmm8
 ; SSE42-NEXT:    pmovmskb %xmm8, %ecx
 ; SSE42-NEXT:    shll $16, %ecx
-; SSE42-NEXT:    orl %edx, %ecx
+; SSE42-NEXT:    addl %edx, %ecx
 ; SSE42-NEXT:    testb $1, %cl
 ; SSE42-NEXT:    jne LBB8_1
 ; SSE42-NEXT:  ## %bb.2: ## %else
@@ -2011,7 +2011,7 @@ define <32 x float> @expandload_v32f32_v32i32(ptr %base, <32 x float> %src0, <32
 ; AVX1-NEXT:    vpacksswb %xmm4, %xmm5, %xmm4
 ; AVX1-NEXT:    vpmovmskb %xmm4, %eax
 ; AVX1-NEXT:    shll $16, %eax
-; AVX1-NEXT:    orl %ecx, %eax
+; AVX1-NEXT:    addl %ecx, %eax
 ; AVX1-NEXT:    testb $1, %al
 ; AVX1-NEXT:    jne LBB8_1
 ; AVX1-NEXT:  ## %bb.2: ## %else
diff --git a/llvm/test/CodeGen/X86/masked_load.ll b/llvm/test/CodeGen/X86/masked_load.ll
index 89459a2d10177d..c39e98d502f1a7 100644
--- a/llvm/test/CodeGen/X86/masked_load.ll
+++ b/llvm/test/CodeGen/X86/masked_load.ll
@@ -4516,7 +4516,7 @@ define <32 x i8> @load_v32i8_v32i8(<32 x i8> %trigger, ptr %addr, <32 x i8> %dst
 ; SSE2-NEXT:    pmovmskb %xmm0, %ecx
 ; SSE2-NEXT:    pmovmskb %xmm1, %eax
 ; SSE2-NEXT:    shll $16, %eax
-; SSE2-NEXT:    orl %ecx, %eax
+; SSE2-NEXT:    addl %ecx, %eax
 ; SSE2-NEXT:    testb $1, %al
 ; SSE2-NEXT:    jne LBB24_1
 ; SSE2-NEXT:  ## %bb.2: ## %else
@@ -4935,7 +4935,7 @@ define <32 x i8> @load_v32i8_v32i8(<32 x i8> %trigger, ptr %addr, <32 x i8> %dst
 ; SSE42-NEXT:    pmovmskb %xmm0, %ecx
 ; SSE42-NEXT:    pmovmskb %xmm1, %eax
 ; SSE42-NEXT:    shll $16, %eax
-; SSE42-NEXT:    orl %ecx, %eax
+; SSE42-NEXT:    addl %ecx, %eax
 ; SSE42-NEXT:    testb $1, %al
 ; SSE42-NEXT:    jne LBB24_1
 ; SSE42-NEXT:  ## %bb.2: ## %else
@@ -5169,7 +5169,7 @@ define <32 x i8> @load_v32i8_v32i8(<32 x i8> %trigger, ptr %addr, <32 x i8> %dst
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    shll $16, %eax
-; AVX1-NEXT:    orl %ecx, %eax
+; AVX1-NEXT:    addl %ecx, %eax
 ; AVX1-NEXT:    testb $1, %al
 ; AVX1-NEXT:    jne LBB24_1
 ; AVX1-NEXT:  ## %bb.2: ## %else
diff --git a/llvm/test/CodeGen/X86/masked_store.ll b/llvm/test/CodeGen/X86/masked_store.ll
index 898b34e969b1d2..36260721f268a8 100644
--- a/llvm/test/CodeGen/X86/masked_store.ll
+++ b/llvm/test/CodeGen/X86/masked_store.ll
@@ -3282,7 +3282,7 @@ define void @store_v32i8_v32i8(<32 x i8> %trigger, ptr %addr, <32 x i8> %val) {
 ; SSE2-NEXT:    pcmpeqb %xmm4, %xmm1
 ; SSE2-NEXT:    pmovmskb %xmm1, %eax
 ; SSE2-NEXT:    shll $16, %eax
-; SSE2-NEXT:    orl %ecx, %eax
+; SSE2-NEXT:    addl %ecx, %eax
 ; SSE2-NEXT:    testb $1, %al
 ; SSE2-NEXT:    movd %xmm2, %ecx
 ; SSE2-NEXT:    jne LBB16_1
@@ -3490,7 +3490,7 @@ define void @store_v32i8_v32i8(<32 x i8> %trigger, ptr %addr, <32 x i8> %val) {
 ; SSE4-NEXT:    pcmpeqb %xmm4, %xmm1
 ; SSE4-NEXT:    pmovmskb %xmm1, %eax
 ; SSE4-NEXT:    shll $16, %eax
-; SSE4-NEXT:    orl %ecx, %eax
+; SSE4-NEXT:    addl %ecx, %eax
 ; SSE4-NEXT:    testb $1, %al
 ; SSE4-NEXT:    jne LBB16_1
 ; SSE4-NEXT:  ## %bb.2: ## %else
@@ -3725,7 +3725,7 @@ define void @store_v32i8_v32i8(<32 x i8> %trigger, ptr %addr, <32 x i8> %val) {
 ; AVX1-NEXT:    vpcmpeqb %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    shll $16, %eax
-; AVX1-NEXT:    orl %ecx, %eax
+; AVX1-NEXT:    addl %ecx, %eax
 ; AVX1-NEXT:    testb $1, %al
 ; AVX1-NEXT:    jne LBB16_1
 ; AVX1-NEXT:  ## %bb.2: ## %else
@@ -5273,10 +5273,10 @@ define void @widen_masked_store(<3 x i32> %v, ptr %p, <3 x i1> %mask) {
 ; SSE2-NEXT:    andb $1, %sil
 ; SSE2-NEXT:    andb $1, %dl
 ; SSE2-NEXT:    addb %dl, %dl
-; SSE2-NEXT:    orb %sil, %dl
+; SSE2-NEXT:    addb %sil, %dl
 ; SSE2-NEXT:    andb $1, %cl
 ; SSE2-NEXT:    shlb $2, %cl
-; SSE2-NEXT:    orb %dl, %cl
+; SSE2-NEXT:    addb %dl, %cl
 ; SSE2-NEXT:    testb $1, %cl
 ; SSE2-NEXT:    jne LBB28_1
 ; SSE2-NEXT:  ## %bb.2: ## %else
@@ -5306,10 +5306,10 @@ define void @widen_masked_store(<3 x i32> %v, ptr %p, <3 x i1> %mask) {
 ; SSE4-NEXT:    andb $1, %sil
 ; SSE4-NEXT:    andb $1, %dl
 ; SSE4-NEXT:    addb %dl, %dl
-; SSE4-NEXT:    orb %sil, %dl
+; SSE4-NEXT:    addb %sil, %dl
 ; SSE4-NEXT:    andb $1, %cl
 ; SSE4-NEXT:    shlb $2, %cl
-; SSE4-NEXT:    orb %dl, %cl
+; SSE4-NEXT:    addb %dl, %cl
 ; SSE4-NEXT:    testb $1, %cl
 ; SSE4-NEXT:    jne LBB28_1
 ; SSE4-NEXT:  ## %bb.2: ## %else
@@ -5665,7 +5665,7 @@ define void @store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts(ptr %trigge
 ; SSE2-NEXT:    pmovmskb %xmm8, %ecx
 ; SSE2-NEXT:    andl $85, %ecx
 ; SSE2-NEXT:    shll $16, %ecx
-; SSE2-NEXT:    orl %edi, %ecx
+; SSE2-NEXT:    addl %edi, %ecx
 ; SSE2-NEXT:    testb $1, %cl
 ; SSE2-NEXT:    jne LBB31_1
 ; SSE2-NEXT:  ## %bb.2: ## %else
@@ -5926,7 +5926,7 @@ define void @store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts(ptr %trigge
 ; SSE4-NEXT:    pmovmskb %xmm0, %edi
 ; SSE4-NEXT:    andl $85, %edi
 ; SSE4-NEXT:    shll $16, %edi
-; SSE4-NEXT:    orl %eax, %edi
+; SSE4-NEXT:    addl %eax, %edi
 ; SSE4-NEXT:    movl 48(%rsi), %r13d
 ; SSE4-NEXT:    testb $1, %dil
 ; SSE4-NEXT:    movl 44(%rsi), %eax
diff --git a/llvm/test/CodeGen/X86/masked_store_trunc.ll b/llvm/test/CodeGen/X86/masked_store_trunc.ll
index f4a0207dafde7c..874165531c966f 100644
--- a/llvm/test/CodeGen/X86/masked_store_trunc.ll
+++ b/llvm/test/CodeGen/X86/masked_store_trunc.ll
@@ -4384,7 +4384,7 @@ define void @truncstore_v32i16_v32i8(<32 x i16> %x, ptr %p, <32 x i8> %mask) {
 ; SSE2-NEXT:    pmovmskb %xmm5, %eax
 ; SSE2-NEXT:    notl %eax
 ; SSE2-NEXT:    shll $16, %eax
-; SSE2-NEXT:    orl %ecx, %eax
+; SSE2-NEXT:    addl %ecx, %eax
 ; SSE2-NEXT:    testb $1, %al
 ; SSE2-NEXT:    movd %xmm0, %ecx
 ; SSE2-NEXT:    jne .LBB15_1
@@ -4601,7 +4601,7 @@ define void @truncstore_v32i16_v32i8(<32 x i16> %x, ptr %p, <32 x i8> %mask) {
 ; SSE4-NEXT:    pmovmskb %xmm5, %eax
 ; SSE4-NEXT:    notl %eax
 ; SSE4-NEXT:    shll $16, %eax
-; SSE4-NEXT:    orl %ecx, %eax
+; SSE4-NEXT:    addl %ecx, %eax
 ; SSE4-NEXT:    testb $1, %al
 ; SSE4-NEXT:    jne .LBB15_1
 ; SSE4-NEXT:  # %bb.2: # %else
@@ -4846,7 +4846,7 @@ define void @truncstore_v32i16_v32i8(<32 x i16> %x, ptr %p, <32 x i8> %mask) {
 ; AVX1-NEXT:    vpmovmskb %xmm1, %eax
 ; AVX1-NEXT:    notl %eax
 ; AVX1-NEXT:    shll $16, %eax
-; AVX1-NEXT:    orl %ecx, %eax
+; AVX1-NEXT:    addl %ecx, %eax
 ; AVX1-NEXT:    testb $1, %al
 ; AVX1-NEXT:    jne .LBB15_1
 ; AVX1-NEXT:  # %bb.2: # %else
diff --git a/llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll b/llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll
index 487f7298f442c2..459d1e6d8f0d52 100644
--- a/llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll
+++ b/llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll
@@ -5288,7 +5288,7 @@ define void @truncstore_v32i16_v32i8(<32 x i16> %x, ptr %p, <32 x i8> %mask) {
 ; SSE2-NEXT:    pmovmskb %xmm5, %eax
 ; SSE2-NEXT:    notl %eax
 ; SSE2-NEXT:    shll $16, %eax
-; SSE2-NEXT:    orl %ecx, %eax
+; SSE2-NEXT:    addl %ecx, %eax
 ; SSE2-NEXT:    testb $1, %al
 ; SSE2-NEXT:    movd %xmm0, %ecx
 ; SSE2-NEXT:    jne .LBB15_1
@@ -5500,7 +5500,7 @@ define void @truncstore_v32i16_v32i8(<32 x i16> %x, ptr %p, <32 x i8> %mask) {
 ; SSE4-NEXT:    pmovmskb %xmm5, %eax
 ; SSE4-NEXT:    notl %eax
 ; SSE4-NEXT:    shll $16, %eax
-; SSE4-NEXT:    orl %ecx, %eax
+; SSE4-NEXT:    addl %ecx, %eax
 ; SSE4-NEXT:    testb $1, %al
 ; SSE4-NEXT:    jne .LBB15_1
 ; SSE4-NEXT:  # %bb.2: # %else
@@ -5742,7 +5742,7 @@ define void @truncstore_v32i16_v32i8(<32 x i16> %x, ptr %p, <32 x i8> %mask) {
 ; AVX1-NEXT:    vpmovmskb %xmm1, %eax
 ; AVX1-NEXT:    notl %eax
 ; AVX1-NEXT:    shll $16, %eax
-; AVX1-NEXT:    orl %ecx, %eax
+; AVX1-NEXT:    addl %ecx, %eax
 ; AVX1-NEXT:    testb $1, %al
 ; AVX1-NEXT:    jne .LBB15_1
 ; AVX1-NEXT:  # %bb.2: # %else
diff --git a/llvm/test/CodeGen/X86/masked_store_trunc_usat.ll b/llvm/test/CodeGen/X86/masked_store_trunc_usat.ll
index 498f250f11c690..9d2f670077494b 100644
--- a/llvm/test/CodeGen/X86/masked_store_trunc_usat.ll
+++ b/llvm/test/CodeGen/X86/masked_store_trunc_usat.ll
@@ -5105,7 +5105,7 @@ define void @truncstore_v32i16_v32i8(<32 x i16> %x, ptr %p, <32 x i8> %mask) {
 ; SSE2-NEXT:    pmovmskb %xmm5, %eax
 ; SSE2-NEXT:    notl %eax
 ; SSE2-NEXT:    shll $16, %eax
-; SSE2-NEXT:    orl %ecx, %eax
+; SSE2-NEXT:    addl %ecx, %eax
 ; SSE2-NEXT:    testb $1, %al
 ; SSE2-NEXT:    movd %xmm0, %ecx
 ; SSE2-NEXT:    jne .LBB15_1
@@ -5326,7 +5326,7 @@ define void @truncstore_v32i16_v32i8(<32 x i16> %x, ptr %p, <32 x i8> %mask) {
 ; SSE4-NEXT:    pmovmskb %xmm5, %eax
 ; SSE4-NEXT:    notl %eax
 ; SSE4-NEXT:    shll $16, %eax
-; SSE4-NEXT:    orl %ecx, %eax
+; SSE4-NEXT:    addl %ecx, %eax
 ; SSE4-NEXT:    testb $1, %al
 ; SSE4-NEXT:    jne .LBB15_1
 ; SSE4-NEXT:  # %bb.2: # %else
@@ -5573,7 +5573,7 @@ define void @truncstore_v32i16_v32i8(<32 x i16> %x, ptr %p, <32 x i8> %mask) {
 ; AVX1-NEXT:    vpmovmskb %xmm1, %eax
 ; AVX1-NEXT:    notl %eax
 ; AVX1-NEXT:    shll $16, %eax
-; AVX1-NEXT:    orl %ecx, %eax
+; AVX1-NEXT:    addl %ecx, %eax
 ; AVX1-NEXT:    testb $1, %al
 ; AVX1-NEXT:    jne .LBB15_1
 ; AVX1-NEXT:  # %bb.2: # %else
diff --git a/llvm/test/CodeGen/X86/memset-inline.ll b/llvm/test/CodeGen/X86/memset-inline.ll
index 905a0ffda061f9..cfb6e1c17e9e54 100644
--- a/llvm/test/CodeGen/X86/memset-inline.ll
+++ b/llvm/test/CodeGen/X86/memset-inline.ll
@@ -23,7 +23,7 @@ define void @memset_2(ptr %a, i8 %value) nounwind {
 ; GPR:       # %bb.0:
 ; GPR-NEXT:    movzbl %sil, %eax
 ; GPR-NEXT:    shll $8, %esi
-; GPR-NEXT:    orl %esi, %eax
+; GPR-NEXT:    addl %esi, %eax
 ; GPR-NEXT:    movw %ax, (%rdi)
 ; GPR-NEXT:    retq
   tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 2, i1 0)
diff --git a/llvm/test/CodeGen/X86/movmsk-cmp.ll b/llvm/test/CodeGen/X86/movmsk-cmp.ll
index a7564c9622c5ca..08176fd7aeb9c7 100644
--- a/llvm/test/CodeGen/X86/movmsk-cmp.ll
+++ b/llvm/test/CodeGen/X86/movmsk-cmp.ll
@@ -3615,7 +3615,7 @@ define i32 @movmskb256(<32 x i8> %x) {
 ; SSE-NEXT:    pmovmskb %xmm0, %ecx
 ; SSE-NEXT:    pmovmskb %xmm1, %eax
 ; SSE-NEXT:    shll $16, %eax
-; SSE-NEXT:    orl %ecx, %eax
+; SSE-NEXT:    addl %ecx, %eax
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: movmskb256:
@@ -3624,7 +3624,7 @@ define i32 @movmskb256(<32 x i8> %x) {
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    shll $16, %eax
-; AVX1-NEXT:    orl %ecx, %eax
+; AVX1-NEXT:    addl %ecx, %eax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/mul128.ll b/llvm/test/CodeGen/X86/mul128.ll
index fc1cc1f65627a8..ef1db4945c9c61 100644
--- a/llvm/test/CodeGen/X86/mul128.ll
+++ b/llvm/test/CodeGen/X86/mul128.ll
@@ -111,7 +111,7 @@ define void @PR13897() nounwind {
 ; X64-NEXT:    movq bbb(%rip), %rax
 ; X64-NEXT:    movl %eax, %ecx
 ; X64-NEXT:    shlq $32, %rax
-; X64-NEXT:    orq %rcx, %rax
+; X64-NEXT:    addq %rcx, %rax
 ; X64-NEXT:    movq %rax, aaa+8(%rip)
 ; X64-NEXT:    movq %rax, aaa(%rip)
 ; X64-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/no-wide-load.ll b/llvm/test/CodeGen/X86/no-wide-load.ll
index fb6d2c735d36c4..6b8e6bba75db06 100644
--- a/llvm/test/CodeGen/X86/no-wide-load.ll
+++ b/llvm/test/CodeGen/X86/no-wide-load.ll
@@ -8,7 +8,7 @@ define void @foo(ptr %p, i16 signext %s) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movzwl 4(%rdi), %eax
 ; CHECK-NEXT:    andl $-1121, %eax # imm = 0xFB9F
-; CHECK-NEXT:    orl $1024, %eax # imm = 0x400
+; CHECK-NEXT:    addl $1024, %eax # imm = 0x400
 ; CHECK-NEXT:    movw %ax, 4(%rdi)
 ; CHECK-NEXT:    retq
 entry:
diff --git a/llvm/test/CodeGen/X86/or-lea.ll b/llvm/test/CodeGen/X86/or-lea.ll
index 616ab994378927..d7927036394c94 100644
--- a/llvm/test/CodeGen/X86/or-lea.ll
+++ b/llvm/test/CodeGen/X86/or-lea.ll
@@ -127,7 +127,7 @@ define i32 @or_shift4_and1(i32 %x, i32 %y) {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    andl $1, %eax
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: or_shift4_and1:
@@ -831,7 +831,7 @@ define i32 @or_shift1_disjoint(i32 %x, i32 %y) {
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    addl %eax, %eax
-; X86-NEXT:    orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: or_shift1_disjoint:
diff --git a/llvm/test/CodeGen/X86/pr20011.ll b/llvm/test/CodeGen/X86/pr20011.ll
index 4810226b4a7564..9d66d4999ecb3a 100644
--- a/llvm/test/CodeGen/X86/pr20011.ll
+++ b/llvm/test/CodeGen/X86/pr20011.ll
@@ -12,7 +12,7 @@ define void @crash(i64 %x0, i64 %y0, ptr nocapture %dest) nounwind {
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    shlb $2, %dl
 ; X86-NEXT:    andb $3, %cl
-; X86-NEXT:    orb %dl, %cl
+; X86-NEXT:    addb %dl, %cl
 ; X86-NEXT:    andb $15, %cl
 ; X86-NEXT:    movb %cl, (%eax)
 ; X86-NEXT:    retl
@@ -21,7 +21,7 @@ define void @crash(i64 %x0, i64 %y0, ptr nocapture %dest) nounwind {
 ; X64:       # %bb.0:
 ; X64-NEXT:    shlb $2, %sil
 ; X64-NEXT:    andb $3, %dil
-; X64-NEXT:    orb %sil, %dil
+; X64-NEXT:    addb %sil, %dil
 ; X64-NEXT:    andb $15, %dil
 ; X64-NEXT:    movb %dil, (%rdx)
 ; X64-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/pr23664.ll b/llvm/test/CodeGen/X86/pr23664.ll
index 453e5db2bed61c..8179602b8c2a83 100644
--- a/llvm/test/CodeGen/X86/pr23664.ll
+++ b/llvm/test/CodeGen/X86/pr23664.ll
@@ -6,7 +6,7 @@ define i2 @f(i32 %arg) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    # kill: def $edi killed $edi def $rdi
 ; CHECK-NEXT:    leal (%rdi,%rdi), %eax
-; CHECK-NEXT:    orb $1, %al
+; CHECK-NEXT:    incb %al
 ; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    retq
   %trunc = trunc i32 %arg to i1
diff --git a/llvm/test/CodeGen/X86/pr27202.ll b/llvm/test/CodeGen/X86/pr27202.ll
index 3bd3be62fb4c85..25ce65b3d9b1a1 100644
--- a/llvm/test/CodeGen/X86/pr27202.ll
+++ b/llvm/test/CodeGen/X86/pr27202.ll
@@ -51,7 +51,7 @@ define i64 @PR46237(i64 %x, i64 %y, i64 %z) optsize {
 ; CHECK-NEXT:    andl $7, %esi
 ; CHECK-NEXT:    andl $7, %edx
 ; CHECK-NEXT:    leaq (%rdx,%rsi,8), %rax
-; CHECK-NEXT:    orq %rcx, %rax
+; CHECK-NEXT:    addq %rcx, %rax
 ; CHECK-NEXT:    retq
   %and = shl i64 %z, 6
   %shl = and i64 %and, 192
diff --git a/llvm/test/CodeGen/X86/pr28173.ll b/llvm/test/CodeGen/X86/pr28173.ll
index cf4969fa7dae07..ed56271c650b3b 100644
--- a/llvm/test/CodeGen/X86/pr28173.ll
+++ b/llvm/test/CodeGen/X86/pr28173.ll
@@ -44,7 +44,7 @@ define i16 @foo16_1(i1 zeroext %i, i32 %j) #0 {
 ; CHECK-LABEL: foo16_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movzbl %dil, %eax
-; CHECK-NEXT:    orl $2, %eax
+; CHECK-NEXT:    addl $2, %eax
 ; CHECK-NEXT:    # kill: def $ax killed $ax killed $eax
 ; CHECK-NEXT:    retq
   br label %bb
@@ -79,7 +79,7 @@ define i8 @foo8(i1 zeroext %i) #0 {
 ; CHECK-LABEL: foo8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    orb $-2, %al
+; CHECK-NEXT:    addb $-2, %al
 ; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    retq
   br label %bb
diff --git a/llvm/test/CodeGen/X86/pr35636.ll b/llvm/test/CodeGen/X86/pr35636.ll
index 0b7d64f38c7802..7f668c967fc15b 100644
--- a/llvm/test/CodeGen/X86/pr35636.ll
+++ b/llvm/test/CodeGen/X86/pr35636.ll
@@ -16,7 +16,7 @@ define void @_Z15uint64_to_asciimPc(i64 %arg) {
 ; HSW-NEXT:    andl $134217727, %eax # imm = 0x7FFFFFF
 ; HSW-NEXT:    leal (%rax,%rax,4), %eax
 ; HSW-NEXT:    shrl $26, %eax
-; HSW-NEXT:    orb $48, %al
+; HSW-NEXT:    addb $48, %al
 ; HSW-NEXT:    movb %al, (%rax)
 ; HSW-NEXT:    retq
 ;
@@ -32,7 +32,7 @@ define void @_Z15uint64_to_asciimPc(i64 %arg) {
 ; ZN-NEXT:    andl $134217727, %eax # imm = 0x7FFFFFF
 ; ZN-NEXT:    leal (%rax,%rax,4), %eax
 ; ZN-NEXT:    shrl $26, %eax
-; ZN-NEXT:    orb $48, %al
+; ZN-NEXT:    addb $48, %al
 ; ZN-NEXT:    movb %al, (%rax)
 ; ZN-NEXT:    retq
 bb:
diff --git a/llvm/test/CodeGen/X86/pr35763.ll b/llvm/test/CodeGen/X86/pr35763.ll
index 9d2ee84cf675bd..e181f9553f880f 100644
--- a/llvm/test/CodeGen/X86/pr35763.ll
+++ b/llvm/test/CodeGen/X86/pr35763.ll
@@ -17,7 +17,7 @@ define dso_local void @PR35763() {
 ; CHECK-NEXT:    movl z+6(%rip), %eax
 ; CHECK-NEXT:    movzbl z+10(%rip), %ecx
 ; CHECK-NEXT:    shlq $32, %rcx
-; CHECK-NEXT:    orq %rax, %rcx
+; CHECK-NEXT:    addq %rax, %rcx
 ; CHECK-NEXT:    movabsq $1090921758719, %rax # imm = 0xFE0000FFFF
 ; CHECK-NEXT:    andq %rcx, %rax
 ; CHECK-NEXT:    movl %eax, z+6(%rip)
diff --git a/llvm/test/CodeGen/X86/pr43820.ll b/llvm/test/CodeGen/X86/pr43820.ll
index 2fb4410567be69..fd006460f7fb8c 100644
--- a/llvm/test/CodeGen/X86/pr43820.ll
+++ b/llvm/test/CodeGen/X86/pr43820.ll
@@ -24,7 +24,7 @@ define i1000 @square(i1000 %A) nounwind {
 ; CHECK-NEXT:    andq %rsi, %r10
 ; CHECK-NEXT:    andq %rsi, %r12
 ; CHECK-NEXT:    shlq $4, %r12
-; CHECK-NEXT:    orq %r10, %r12
+; CHECK-NEXT:    addq %r10, %r12
 ; CHECK-NEXT:    movabsq $3689348814741910323, %r10 # imm = 0x3333333333333333
 ; CHECK-NEXT:    movq %r12, %r13
 ; CHECK-NEXT:    andq %r10, %r13
@@ -44,7 +44,7 @@ define i1000 @square(i1000 %A) nounwind {
 ; CHECK-NEXT:    andq %rsi, %r12
 ; CHECK-NEXT:    andq %rsi, %r14
 ; CHECK-NEXT:    shlq $4, %r14
-; CHECK-NEXT:    orq %r12, %r14
+; CHECK-NEXT:    addq %r12, %r14
 ; CHECK-NEXT:    movq %r14, %r12
 ; CHECK-NEXT:    andq %r10, %r12
 ; CHECK-NEXT:    shrq $2, %r14
@@ -63,7 +63,7 @@ define i1000 @square(i1000 %A) nounwind {
 ; CHECK-NEXT:    andq %rsi, %r12
 ; CHECK-NEXT:    andq %rsi, %r15
 ; CHECK-NEXT:    shlq $4, %r15
-; CHECK-NEXT:    orq %r12, %r15
+; CHECK-NEXT:    addq %r12, %r15
 ; CHECK-NEXT:    movq %r15, %r12
 ; CHECK-NEXT:    andq %r10, %r12
 ; CHECK-NEXT:    shrq $2, %r15
@@ -81,7 +81,7 @@ define i1000 @square(i1000 %A) nounwind {
 ; CHECK-NEXT:    andq %rsi, %r15
 ; CHECK-NEXT:    andq %rsi, %rbx
 ; CHECK-NEXT:    shlq $4, %rbx
-; CHECK-NEXT:    orq %r15, %rbx
+; CHECK-NEXT:    addq %r15, %rbx
 ; CHECK-NEXT:    movq %rbx, %r15
 ; CHECK-NEXT:    andq %r10, %r15
 ; CHECK-NEXT:    shrq $2, %rbx
@@ -99,7 +99,7 @@ define i1000 @square(i1000 %A) nounwind {
 ; CHECK-NEXT:    andq %rsi, %rbx
 ; CHECK-NEXT:    andq %rsi, %rdi
 ; CHECK-NEXT:    shlq $4, %rdi
-; CHECK-NEXT:    orq %rbx, %rdi
+; CHECK-NEXT:    addq %rbx, %rdi
 ; CHECK-NEXT:    movq %rdi, %rbx
 ; CHECK-NEXT:    andq %r10, %rbx
 ; CHECK-NEXT:    shrq $2, %rdi
@@ -118,7 +118,7 @@ define i1000 @square(i1000 %A) nounwind {
 ; CHECK-NEXT:    andq %rsi, %rbx
 ; CHECK-NEXT:    andq %rsi, %rdi
 ; CHECK-NEXT:    shlq $4, %rdi
-; CHECK-NEXT:    orq %rbx, %rdi
+; CHECK-NEXT:    addq %rbx, %rdi
 ; CHECK-NEXT:    movq %rdi, %rbx
 ; CHECK-NEXT:    andq %r10, %rbx
 ; CHECK-NEXT:    shrq $2, %rdi
@@ -137,7 +137,7 @@ define i1000 @square(i1000 %A) nounwind {
 ; CHECK-NEXT:    andq %rsi, %rbx
 ; CHECK-NEXT:    andq %rsi, %rdi
 ; CHECK-NEXT:    shlq $4, %rdi
-; CHECK-NEXT:    orq %rbx, %rdi
+; CHECK-NEXT:    addq %rbx, %rdi
 ; CHECK-NEXT:    movq %rdi, %rbx
 ; CHECK-NEXT:    andq %r10, %rbx
 ; CHECK-NEXT:    shrq $2, %rdi
@@ -156,7 +156,7 @@ define i1000 @square(i1000 %A) nounwind {
 ; CHECK-NEXT:    andq %rsi, %rbx
 ; CHECK-NEXT:    andq %rsi, %rdi
 ; CHECK-NEXT:    shlq $4, %rdi
-; CHECK-NEXT:    orq %rbx, %rdi
+; CHECK-NEXT:    addq %rbx, %rdi
 ; CHECK-NEXT:    movq %rdi, %rbx
 ; CHECK-NEXT:    andq %r10, %rbx
 ; CHECK-NEXT:    shrq $2, %rdi
@@ -175,7 +175,7 @@ define i1000 @square(i1000 %A) nounwind {
 ; CHECK-NEXT:    andq %rsi, %rbx
 ; CHECK-NEXT:    andq %rsi, %rdi
 ; CHECK-NEXT:    shlq $4, %rdi
-; CHECK-NEXT:    orq %rbx, %rdi
+; CHECK-NEXT:    addq %rbx, %rdi
 ; CHECK-NEXT:    movq %rdi, %rbx
 ; CHECK-NEXT:    andq %r10, %rbx
 ; CHECK-NEXT:    shrq $2, %rdi
@@ -194,7 +194,7 @@ define i1000 @square(i1000 %A) nounwind {
 ; CHECK-NEXT:    andq %rsi, %rbx
 ; CHECK-NEXT:    andq %rsi, %rdi
 ; CHECK-NEXT:    shlq $4, %rdi
-; CHECK-NEXT:    orq %rbx, %rdi
+; CHECK-NEXT:    addq %rbx, %rdi
 ; CHECK-NEXT:    movq %rdi, %rbx
 ; CHECK-NEXT:    andq %r10, %rbx
 ; CHECK-NEXT:    shrq $2, %rdi
@@ -213,7 +213,7 @@ define i1000 @square(i1000 %A) nounwind {
 ; CHECK-NEXT:    andq %rsi, %rax
 ; CHECK-NEXT:    andq %rsi, %rdi
 ; CHECK-NEXT:    shlq $4, %rdi
-; CHECK-NEXT:    orq %rax, %rdi
+; CHECK-NEXT:    addq %rax, %rdi
 ; CHECK-NEXT:    movq %rdi, %rax
 ; CHECK-NEXT:    andq %r10, %rax
 ; CHECK-NEXT:    shrq $2, %rdi
@@ -230,7 +230,7 @@ define i1000 @square(i1000 %A) nounwind {
 ; CHECK-NEXT:    andq %rsi, %rax
 ; CHECK-NEXT:    andq %rsi, %r9
 ; CHECK-NEXT:    shlq $4, %r9
-; CHECK-NEXT:    orq %rax, %r9
+; CHECK-NEXT:    addq %rax, %r9
 ; CHECK-NEXT:    movq %r9, %rax
 ; CHECK-NEXT:    andq %r10, %rax
 ; CHECK-NEXT:    shrq $2, %r9
@@ -248,7 +248,7 @@ define i1000 @square(i1000 %A) nounwind {
 ; CHECK-NEXT:    andq %rsi, %rax
 ; CHECK-NEXT:    andq %rsi, %r8
 ; CHECK-NEXT:    shlq $4, %r8
-; CHECK-NEXT:    orq %rax, %r8
+; CHECK-NEXT:    addq %rax, %r8
 ; CHECK-NEXT:    movq %r8, %rax
 ; CHECK-NEXT:    andq %r10, %rax
 ; CHECK-NEXT:    shrq $2, %r8
@@ -266,7 +266,7 @@ define i1000 @square(i1000 %A) nounwind {
 ; CHECK-NEXT:    andq %rsi, %rax
 ; CHECK-NEXT:    andq %rsi, %rcx
 ; CHECK-NEXT:    shlq $4, %rcx
-; CHECK-NEXT:    orq %rax, %rcx
+; CHECK-NEXT:    addq %rax, %rcx
 ; CHECK-NEXT:    movq %rcx, %rax
 ; CHECK-NEXT:    andq %r10, %rax
 ; CHECK-NEXT:    shrq $2, %rcx
@@ -283,7 +283,7 @@ define i1000 @square(i1000 %A) nounwind {
 ; CHECK-NEXT:    andq %rsi, %rax
 ; CHECK-NEXT:    andq %rsi, %rdx
 ; CHECK-NEXT:    shlq $4, %rdx
-; CHECK-NEXT:    orq %rax, %rdx
+; CHECK-NEXT:    addq %rax, %rdx
 ; CHECK-NEXT:    movq %rdx, %rax
 ; CHECK-NEXT:    andq %r10, %rax
 ; CHECK-NEXT:    shrq $2, %rdx
@@ -301,7 +301,7 @@ define i1000 @square(i1000 %A) nounwind {
 ; CHECK-NEXT:    andq %rsi, %rax
 ; CHECK-NEXT:    andq %rsi, %rcx
 ; CHECK-NEXT:    shlq $4, %rcx
-; CHECK-NEXT:    orq %rax, %rcx
+; CHECK-NEXT:    addq %rax, %rcx
 ; CHECK-NEXT:    movq %rcx, %rax
 ; CHECK-NEXT:    andq %r10, %rax
 ; CHECK-NEXT:    shrq $2, %rcx
diff --git a/llvm/test/CodeGen/X86/pr47299.ll b/llvm/test/CodeGen/X86/pr47299.ll
index 7cb1112402ebe5..550b270cb5ada8 100644
--- a/llvm/test/CodeGen/X86/pr47299.ll
+++ b/llvm/test/CodeGen/X86/pr47299.ll
@@ -30,19 +30,19 @@ define <7 x i1> @create_mask7(i64 %0) {
 ; CHECK-NEXT:    and r10b, 1
 ; CHECK-NEXT:    and r9b, 1
 ; CHECK-NEXT:    add r9b, r9b
-; CHECK-NEXT:    or r9b, r10b
+; CHECK-NEXT:    add r9b, r10b
 ; CHECK-NEXT:    and r8b, 1
 ; CHECK-NEXT:    shl r8b, 2
-; CHECK-NEXT:    or r8b, r9b
 ; CHECK-NEXT:    and dil, 1
 ; CHECK-NEXT:    shl dil, 3
-; CHECK-NEXT:    or dil, r8b
+; CHECK-NEXT:    add dil, r8b
+; CHECK-NEXT:    add dil, r9b
 ; CHECK-NEXT:    and sil, 1
 ; CHECK-NEXT:    shl sil, 4
-; CHECK-NEXT:    or sil, dil
 ; CHECK-NEXT:    and dl, 1
 ; CHECK-NEXT:    shl dl, 5
-; CHECK-NEXT:    or dl, sil
+; CHECK-NEXT:    add dl, sil
+; CHECK-NEXT:    add dl, dil
 ; CHECK-NEXT:    shl cl, 6
 ; CHECK-NEXT:    or cl, dl
 ; CHECK-NEXT:    and cl, 127
diff --git a/llvm/test/CodeGen/X86/pr62653.ll b/llvm/test/CodeGen/X86/pr62653.ll
index b6a1bf47983dc7..88b14189f1cb7c 100644
--- a/llvm/test/CodeGen/X86/pr62653.ll
+++ b/llvm/test/CodeGen/X86/pr62653.ll
@@ -4,81 +4,82 @@
 define <64 x i4> @pr62653(<64 x i4> %a0) nounwind {
 ; CHECK-LABEL: pr62653:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    movq %rdi, %rax
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %edi
 ; CHECK-NEXT:    andl $15, %edi
 ; CHECK-NEXT:    shll $4, %edi
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %r10d
 ; CHECK-NEXT:    andl $15, %r10d
-; CHECK-NEXT:    orq %rdi, %r10
+; CHECK-NEXT:    addq %rdi, %r10
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %edi
 ; CHECK-NEXT:    andl $15, %edi
 ; CHECK-NEXT:    shll $8, %edi
-; CHECK-NEXT:    orq %r10, %rdi
+; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %r11d
+; CHECK-NEXT:    andl $15, %r11d
+; CHECK-NEXT:    shll $12, %r11d
+; CHECK-NEXT:    addq %rdi, %r11
+; CHECK-NEXT:    addq %r10, %r11
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %r10d
 ; CHECK-NEXT:    andl $15, %r10d
-; CHECK-NEXT:    shll $12, %r10d
-; CHECK-NEXT:    orq %rdi, %r10
+; CHECK-NEXT:    shll $16, %r10d
+; CHECK-NEXT:    addq %r11, %r10
+; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %edi
+; CHECK-NEXT:    andl $15, %edi
+; CHECK-NEXT:    shll $20, %edi
+; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %r11d
+; CHECK-NEXT:    andl $15, %r11d
+; CHECK-NEXT:    shll $24, %r11d
+; CHECK-NEXT:    orq %rdi, %r11
+; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %ebx
+; CHECK-NEXT:    shll $28, %ebx
+; CHECK-NEXT:    orq %r11, %rbx
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %edi
 ; CHECK-NEXT:    andl $15, %edi
-; CHECK-NEXT:    shll $16, %edi
+; CHECK-NEXT:    shlq $32, %rdi
+; CHECK-NEXT:    orq %rbx, %rdi
 ; CHECK-NEXT:    orq %r10, %rdi
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %r10d
 ; CHECK-NEXT:    andl $15, %r10d
-; CHECK-NEXT:    shll $20, %r10d
+; CHECK-NEXT:    shlq $36, %r10
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %r11d
 ; CHECK-NEXT:    andl $15, %r11d
-; CHECK-NEXT:    shll $24, %r11d
+; CHECK-NEXT:    shlq $40, %r11
 ; CHECK-NEXT:    orq %r10, %r11
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %r10d
-; CHECK-NEXT:    shll $28, %r10d
+; CHECK-NEXT:    andl $15, %r10d
+; CHECK-NEXT:    shlq $44, %r10
 ; CHECK-NEXT:    orq %r11, %r10
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %r11d
 ; CHECK-NEXT:    andl $15, %r11d
-; CHECK-NEXT:    shlq $32, %r11
+; CHECK-NEXT:    shlq $48, %r11
 ; CHECK-NEXT:    orq %r10, %r11
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %r10d
 ; CHECK-NEXT:    andl $15, %r10d
-; CHECK-NEXT:    shlq $36, %r10
+; CHECK-NEXT:    shlq $52, %r10
 ; CHECK-NEXT:    orq %r11, %r10
 ; CHECK-NEXT:    orq %rdi, %r10
-; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %edi
-; CHECK-NEXT:    andl $15, %edi
-; CHECK-NEXT:    shlq $40, %rdi
-; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %r11d
-; CHECK-NEXT:    andl $15, %r11d
-; CHECK-NEXT:    shlq $44, %r11
-; CHECK-NEXT:    orq %rdi, %r11
-; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %edi
-; CHECK-NEXT:    andl $15, %edi
-; CHECK-NEXT:    shlq $48, %rdi
-; CHECK-NEXT:    orq %r11, %rdi
-; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %r11d
-; CHECK-NEXT:    andl $15, %r11d
-; CHECK-NEXT:    shlq $52, %r11
-; CHECK-NEXT:    orq %rdi, %r11
-; CHECK-NEXT:    orq %r10, %r11
-; CHECK-NEXT:    movq %r11, 8(%rax)
+; CHECK-NEXT:    movq %r10, 8(%rax)
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %edi
 ; CHECK-NEXT:    andl $15, %edi
 ; CHECK-NEXT:    shlq $32, %rdi
 ; CHECK-NEXT:    andl $15, %esi
 ; CHECK-NEXT:    andl $15, %edx
 ; CHECK-NEXT:    shll $4, %edx
-; CHECK-NEXT:    orl %esi, %edx
+; CHECK-NEXT:    addl %esi, %edx
 ; CHECK-NEXT:    andl $15, %ecx
 ; CHECK-NEXT:    shll $8, %ecx
-; CHECK-NEXT:    orl %edx, %ecx
 ; CHECK-NEXT:    andl $15, %r8d
 ; CHECK-NEXT:    shll $12, %r8d
-; CHECK-NEXT:    orl %ecx, %r8d
+; CHECK-NEXT:    addl %ecx, %r8d
+; CHECK-NEXT:    addl %edx, %r8d
 ; CHECK-NEXT:    andl $15, %r9d
 ; CHECK-NEXT:    shll $16, %r9d
-; CHECK-NEXT:    orl %r8d, %r9d
+; CHECK-NEXT:    addl %r8d, %r9d
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %ecx
 ; CHECK-NEXT:    andl $15, %ecx
 ; CHECK-NEXT:    shll $20, %ecx
-; CHECK-NEXT:    orl %r9d, %ecx
+; CHECK-NEXT:    addl %r9d, %ecx
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %edx
 ; CHECK-NEXT:    andl $15, %edx
 ; CHECK-NEXT:    shll $24, %edx
@@ -86,19 +87,19 @@ define <64 x i4> @pr62653(<64 x i4> %a0) nounwind {
 ; CHECK-NEXT:    shll $28, %esi
 ; CHECK-NEXT:    orl %edx, %esi
 ; CHECK-NEXT:    orl %ecx, %esi
-; CHECK-NEXT:    orq %rdi, %rsi
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %ecx
 ; CHECK-NEXT:    andl $15, %ecx
 ; CHECK-NEXT:    shlq $36, %rcx
-; CHECK-NEXT:    orq %rsi, %rcx
+; CHECK-NEXT:    addq %rdi, %rcx
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %edx
 ; CHECK-NEXT:    andl $15, %edx
 ; CHECK-NEXT:    shlq $40, %rdx
-; CHECK-NEXT:    orq %rcx, %rdx
+; CHECK-NEXT:    addq %rcx, %rdx
+; CHECK-NEXT:    addq %rsi, %rdx
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %ecx
 ; CHECK-NEXT:    andl $15, %ecx
 ; CHECK-NEXT:    shlq $44, %rcx
-; CHECK-NEXT:    orq %rdx, %rcx
+; CHECK-NEXT:    addq %rdx, %rcx
 ; CHECK-NEXT:    movzbl {{[0-9]+}}(%rsp), %edx
 ; CHECK-NEXT:    andl $15, %edx
 ; CHECK-NEXT:    shlq $48, %rdx
@@ -115,6 +116,7 @@ define <64 x i4> @pr62653(<64 x i4> %a0) nounwind {
 ; CHECK-NEXT:    orq %rdx, %rsi
 ; CHECK-NEXT:    orq %rcx, %rsi
 ; CHECK-NEXT:    movq %rsi, (%rax)
+; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
   %res = shufflevector <64 x i4> %a0, <64 x i4> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 64, i32 65, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
   ret <64 x i4> %res
diff --git a/llvm/test/CodeGen/X86/pr69965.ll b/llvm/test/CodeGen/X86/pr69965.ll
index 33bea976c7896d..e8b6edcc9993ba 100644
--- a/llvm/test/CodeGen/X86/pr69965.ll
+++ b/llvm/test/CodeGen/X86/pr69965.ll
@@ -15,7 +15,7 @@ define i64 @PR69965(ptr %input_ptrs, ptr %output_ptrs) {
 ; X86-NEXT:    movl (%ecx), %ecx
 ; X86-NEXT:    addb %al, %al
 ; X86-NEXT:    movzbl %al, %eax
-; X86-NEXT:    orl %edx, %eax
+; X86-NEXT:    addl %edx, %eax
 ; X86-NEXT:    orl $32768, %eax # imm = 0x8000
 ; X86-NEXT:    movw %ax, (%ecx)
 ; X86-NEXT:    xorl %eax, %eax
@@ -32,7 +32,7 @@ define i64 @PR69965(ptr %input_ptrs, ptr %output_ptrs) {
 ; X64-NEXT:    shll $8, %eax
 ; X64-NEXT:    movq (%rsi), %rdx
 ; X64-NEXT:    movzbl %cl, %ecx
-; X64-NEXT:    orl %eax, %ecx
+; X64-NEXT:    addl %eax, %ecx
 ; X64-NEXT:    orl $32768, %ecx # imm = 0x8000
 ; X64-NEXT:    movw %cx, (%rdx)
 ; X64-NEXT:    xorl %eax, %eax
diff --git a/llvm/test/CodeGen/X86/pr77459.ll b/llvm/test/CodeGen/X86/pr77459.ll
index 96f6a188193834..74ed24295ac702 100644
--- a/llvm/test/CodeGen/X86/pr77459.ll
+++ b/llvm/test/CodeGen/X86/pr77459.ll
@@ -14,13 +14,13 @@ define i4 @reverse_cmp_v4i1(<4 x i32> %a0, <4 x i32> %a1) {
 ; SSE2-NEXT:    andb $4, %cl
 ; SSE2-NEXT:    leal (,%rax,8), %edx
 ; SSE2-NEXT:    andb $8, %dl
-; SSE2-NEXT:    orb %cl, %dl
+; SSE2-NEXT:    addb %cl, %dl
 ; SSE2-NEXT:    movl %eax, %ecx
 ; SSE2-NEXT:    shrb %cl
 ; SSE2-NEXT:    andb $2, %cl
-; SSE2-NEXT:    orb %dl, %cl
 ; SSE2-NEXT:    shrb $3, %al
-; SSE2-NEXT:    orb %cl, %al
+; SSE2-NEXT:    addb %cl, %al
+; SSE2-NEXT:    addb %dl, %al
 ; SSE2-NEXT:    # kill: def $al killed $al killed $rax
 ; SSE2-NEXT:    retq
 ;
@@ -67,13 +67,13 @@ define i8 @reverse_cmp_v8i1(<8 x i16> %a0, <8 x i16> %a1) {
 ; SSE2-NEXT:    shlb $2, %cl
 ; SSE2-NEXT:    shrb $2, %al
 ; SSE2-NEXT:    andb $51, %al
-; SSE2-NEXT:    orb %cl, %al
+; SSE2-NEXT:    addb %cl, %al
 ; SSE2-NEXT:    movl %eax, %ecx
 ; SSE2-NEXT:    andb $85, %cl
 ; SSE2-NEXT:    addb %cl, %cl
 ; SSE2-NEXT:    shrb %al
 ; SSE2-NEXT:    andb $85, %al
-; SSE2-NEXT:    orb %cl, %al
+; SSE2-NEXT:    addb %cl, %al
 ; SSE2-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE2-NEXT:    retq
 ;
@@ -123,7 +123,7 @@ define i16 @reverse_cmp_v16i1(<16 x i8> %a0, <16 x i8> %a1) {
 ; SSE2-NEXT:    shll $4, %ecx
 ; SSE2-NEXT:    shrl $4, %eax
 ; SSE2-NEXT:    andl $3855, %eax # imm = 0xF0F
-; SSE2-NEXT:    orl %ecx, %eax
+; SSE2-NEXT:    addl %ecx, %eax
 ; SSE2-NEXT:    movl %eax, %ecx
 ; SSE2-NEXT:    andl $13107, %ecx # imm = 0x3333
 ; SSE2-NEXT:    shrl $2, %eax
@@ -179,14 +179,14 @@ define i32 @reverse_cmp_v32i1(<32 x i8> %a0, <32 x i8> %a1) {
 ; SSE2-NEXT:    pcmpeqb %xmm3, %xmm1
 ; SSE2-NEXT:    pmovmskb %xmm1, %ecx
 ; SSE2-NEXT:    shll $16, %ecx
-; SSE2-NEXT:    orl %eax, %ecx
+; SSE2-NEXT:    addl %eax, %ecx
 ; SSE2-NEXT:    bswapl %ecx
 ; SSE2-NEXT:    movl %ecx, %eax
 ; SSE2-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
 ; SSE2-NEXT:    shll $4, %eax
 ; SSE2-NEXT:    shrl $4, %ecx
 ; SSE2-NEXT:    andl $252645135, %ecx # imm = 0xF0F0F0F
-; SSE2-NEXT:    orl %eax, %ecx
+; SSE2-NEXT:    addl %eax, %ecx
 ; SSE2-NEXT:    movl %ecx, %eax
 ; SSE2-NEXT:    andl $858993459, %eax # imm = 0x33333333
 ; SSE2-NEXT:    shrl $2, %ecx
@@ -209,7 +209,7 @@ define i32 @reverse_cmp_v32i1(<32 x i8> %a0, <32 x i8> %a1) {
 ; SSE42-NEXT:    pshufb %xmm2, %xmm0
 ; SSE42-NEXT:    pmovmskb %xmm0, %eax
 ; SSE42-NEXT:    shll $16, %eax
-; SSE42-NEXT:    orl %ecx, %eax
+; SSE42-NEXT:    addl %ecx, %eax
 ; SSE42-NEXT:    retq
 ;
 ; AVX2-LABEL: reverse_cmp_v32i1:
@@ -257,15 +257,15 @@ define i64 @reverse_cmp_v64i1(<64 x i8> %a0, <64 x i8> %a1) {
 ; SSE2-NEXT:    pcmpeqb %xmm5, %xmm1
 ; SSE2-NEXT:    pmovmskb %xmm1, %ecx
 ; SSE2-NEXT:    shll $16, %ecx
-; SSE2-NEXT:    orl %eax, %ecx
+; SSE2-NEXT:    addl %eax, %ecx
 ; SSE2-NEXT:    pcmpeqb %xmm6, %xmm2
 ; SSE2-NEXT:    pmovmskb %xmm2, %eax
 ; SSE2-NEXT:    pcmpeqb %xmm7, %xmm3
 ; SSE2-NEXT:    pmovmskb %xmm3, %edx
 ; SSE2-NEXT:    shll $16, %edx
-; SSE2-NEXT:    orl %eax, %edx
+; SSE2-NEXT:    addl %eax, %edx
 ; SSE2-NEXT:    shlq $32, %rdx
-; SSE2-NEXT:    orq %rcx, %rdx
+; SSE2-NEXT:    addq %rcx, %rdx
 ; SSE2-NEXT:    bswapq %rdx
 ; SSE2-NEXT:    movq %rdx, %rax
 ; SSE2-NEXT:    shrq $4, %rax
@@ -273,7 +273,7 @@ define i64 @reverse_cmp_v64i1(<64 x i8> %a0, <64 x i8> %a1) {
 ; SSE2-NEXT:    andq %rcx, %rax
 ; SSE2-NEXT:    andq %rcx, %rdx
 ; SSE2-NEXT:    shlq $4, %rdx
-; SSE2-NEXT:    orq %rax, %rdx
+; SSE2-NEXT:    addq %rax, %rdx
 ; SSE2-NEXT:    movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
 ; SSE2-NEXT:    movq %rdx, %rcx
 ; SSE2-NEXT:    andq %rax, %rcx
@@ -300,15 +300,15 @@ define i64 @reverse_cmp_v64i1(<64 x i8> %a0, <64 x i8> %a1) {
 ; SSE42-NEXT:    pshufb %xmm4, %xmm2
 ; SSE42-NEXT:    pmovmskb %xmm2, %ecx
 ; SSE42-NEXT:    shll $16, %ecx
-; SSE42-NEXT:    orl %eax, %ecx
+; SSE42-NEXT:    addl %eax, %ecx
 ; SSE42-NEXT:    pshufb %xmm4, %xmm1
 ; SSE42-NEXT:    pmovmskb %xmm1, %edx
 ; SSE42-NEXT:    pshufb %xmm4, %xmm0
 ; SSE42-NEXT:    pmovmskb %xmm0, %eax
 ; SSE42-NEXT:    shll $16, %eax
-; SSE42-NEXT:    orl %edx, %eax
+; SSE42-NEXT:    addl %edx, %eax
 ; SSE42-NEXT:    shlq $32, %rax
-; SSE42-NEXT:    orq %rcx, %rax
+; SSE42-NEXT:    addq %rcx, %rax
 ; SSE42-NEXT:    retq
 ;
 ; AVX2-LABEL: reverse_cmp_v64i1:
@@ -324,7 +324,7 @@ define i64 @reverse_cmp_v64i1(<64 x i8> %a0, <64 x i8> %a1) {
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX2-NEXT:    vpmovmskb %ymm0, %eax
 ; AVX2-NEXT:    shlq $32, %rax
-; AVX2-NEXT:    orq %rcx, %rax
+; AVX2-NEXT:    addq %rcx, %rax
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/promote-vec3.ll b/llvm/test/CodeGen/X86/promote-vec3.ll
index f28a2ad0fd009a..71a2349ae5de2e 100644
--- a/llvm/test/CodeGen/X86/promote-vec3.ll
+++ b/llvm/test/CodeGen/X86/promote-vec3.ll
@@ -62,7 +62,7 @@ define <3 x i16> @sext_i8(<3 x i8>) {
 ; SSE3-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; SSE3-NEXT:    shll $24, %ecx
 ; SSE3-NEXT:    shll $8, %eax
-; SSE3-NEXT:    orl %ecx, %eax
+; SSE3-NEXT:    addl %ecx, %eax
 ; SSE3-NEXT:    movd %eax, %xmm0
 ; SSE3-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; SSE3-NEXT:    shll $8, %eax
diff --git a/llvm/test/CodeGen/X86/rev16.ll b/llvm/test/CodeGen/X86/rev16.ll
index ecb1970185c2db..b5b4b9a9512edb 100644
--- a/llvm/test/CodeGen/X86/rev16.ll
+++ b/llvm/test/CodeGen/X86/rev16.ll
@@ -35,7 +35,7 @@ define i32 @not_rev16(i32 %a) {
 ; X86-NEXT:    shrl $8, %ecx
 ; X86-NEXT:    andl $65280, %ecx # imm = 0xFF00
 ; X86-NEXT:    andl $16711680, %eax # imm = 0xFF0000
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: not_rev16:
@@ -45,7 +45,7 @@ define i32 @not_rev16(i32 %a) {
 ; X64-NEXT:    shrl $8, %edi
 ; X64-NEXT:    andl $65280, %edi # imm = 0xFF00
 ; X64-NEXT:    andl $16711680, %eax # imm = 0xFF0000
-; X64-NEXT:    orl %edi, %eax
+; X64-NEXT:    addl %edi, %eax
 ; X64-NEXT:    retq
   %l8 = shl i32 %a, 8
   %r8 = lshr i32 %a, 8
@@ -123,7 +123,7 @@ define i32 @different_shift_amount(i32 %a) {
 ; X86-NEXT:    shrl $8, %eax
 ; X86-NEXT:    andl $-16712192, %ecx # imm = 0xFF00FE00
 ; X86-NEXT:    andl $16711935, %eax # imm = 0xFF00FF
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: different_shift_amount:
@@ -133,7 +133,7 @@ define i32 @different_shift_amount(i32 %a) {
 ; X64-NEXT:    shrl $8, %edi
 ; X64-NEXT:    andl $-16712192, %eax # imm = 0xFF00FE00
 ; X64-NEXT:    andl $16711935, %edi # imm = 0xFF00FF
-; X64-NEXT:    orl %edi, %eax
+; X64-NEXT:    addl %edi, %eax
 ; X64-NEXT:    retq
   %l8 = shl i32 %a, 9
   %r8 = lshr i32 %a, 8
@@ -203,7 +203,7 @@ define i32 @different_vars(i32 %a, i32 %b) {
 ; X86-NEXT:    shrl $8, %eax
 ; X86-NEXT:    andl $-16711936, %ecx # imm = 0xFF00FF00
 ; X86-NEXT:    andl $16711935, %eax # imm = 0xFF00FF
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: different_vars:
diff --git a/llvm/test/CodeGen/X86/rotate-extract.ll b/llvm/test/CodeGen/X86/rotate-extract.ll
index 8f046a4f5aea5b..7ae9cf2d8cc14d 100644
--- a/llvm/test/CodeGen/X86/rotate-extract.ll
+++ b/llvm/test/CodeGen/X86/rotate-extract.ll
@@ -141,7 +141,7 @@ define i64 @no_extract_shl(i64 %i) nounwind {
 ; X86-NEXT:    shll $10, %ecx
 ; X86-NEXT:    shrl $20, %eax
 ; X86-NEXT:    andl $127, %eax
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: no_extract_shl:
@@ -150,7 +150,7 @@ define i64 @no_extract_shl(i64 %i) nounwind {
 ; X64-NEXT:    shlq $10, %rax
 ; X64-NEXT:    shrq $52, %rdi
 ; X64-NEXT:    andl $127, %edi
-; X64-NEXT:    orq %rdi, %rax
+; X64-NEXT:    addq %rdi, %rax
 ; X64-NEXT:    retq
   %lhs_mul = shl i64 %i, 5
   %rhs_mul = shl i64 %i, 10
@@ -168,7 +168,7 @@ define i32 @no_extract_shrl(i32 %i) nounwind {
 ; X86-NEXT:    shrl $9, %ecx
 ; X86-NEXT:    andl $-8, %eax
 ; X86-NEXT:    shll $25, %eax
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: no_extract_shrl:
@@ -177,7 +177,7 @@ define i32 @no_extract_shrl(i32 %i) nounwind {
 ; X64-NEXT:    shrl $9, %eax
 ; X64-NEXT:    andl $-8, %edi
 ; X64-NEXT:    shll $25, %edi
-; X64-NEXT:    orl %edi, %eax
+; X64-NEXT:    addl %edi, %eax
 ; X64-NEXT:    retq
   %lhs_div = lshr i32 %i, 3
   %rhs_div = lshr i32 %i, 9
@@ -196,7 +196,7 @@ define i16 @no_extract_mul(i16 %i) nounwind {
 ; X86-NEXT:    leal (%eax,%eax,8), %edx
 ; X86-NEXT:    movzwl %cx, %eax
 ; X86-NEXT:    shrl $9, %eax
-; X86-NEXT:    orl %edx, %eax
+; X86-NEXT:    addl %edx, %eax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
@@ -209,7 +209,7 @@ define i16 @no_extract_mul(i16 %i) nounwind {
 ; X64-NEXT:    leal (%rdi,%rdi,8), %ecx
 ; X64-NEXT:    movzwl %ax, %eax
 ; X64-NEXT:    shrl $9, %eax
-; X64-NEXT:    orl %ecx, %eax
+; X64-NEXT:    addl %ecx, %eax
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
   %lhs_mul = mul i16 %i, 2304
@@ -231,7 +231,7 @@ define i8 @no_extract_udiv(i8 %i) nounwind {
 ; X86-NEXT:    addb %dh, %al
 ; X86-NEXT:    shrb $5, %al
 ; X86-NEXT:    shlb $3, %ch
-; X86-NEXT:    orb %al, %ch
+; X86-NEXT:    addb %al, %ch
 ; X86-NEXT:    andb $-9, %ch
 ; X86-NEXT:    movb %ch, %al
 ; X86-NEXT:    retl
@@ -248,7 +248,7 @@ define i8 @no_extract_udiv(i8 %i) nounwind {
 ; X64-NEXT:    addb %dl, %cl
 ; X64-NEXT:    shrb $5, %cl
 ; X64-NEXT:    shlb $3, %al
-; X64-NEXT:    orb %cl, %al
+; X64-NEXT:    addb %cl, %al
 ; X64-NEXT:    andb $-9, %al
 ; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/select.ll b/llvm/test/CodeGen/X86/select.ll
index ca5558561a65b9..0934a2f4641da5 100644
--- a/llvm/test/CodeGen/X86/select.ll
+++ b/llvm/test/CodeGen/X86/select.ll
@@ -1275,14 +1275,14 @@ define i32 @trunc_select_miscompile(i32 %a, i1 zeroext %cc) {
 ; ATHLON:       ## %bb.0:
 ; ATHLON-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; ATHLON-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
-; ATHLON-NEXT:    orb $2, %cl
+; ATHLON-NEXT:    addb $2, %cl
 ; ATHLON-NEXT:    shll %cl, %eax
 ; ATHLON-NEXT:    retl
 ;
 ; MCU-LABEL: trunc_select_miscompile:
 ; MCU:       # %bb.0:
 ; MCU-NEXT:    movl %edx, %ecx
-; MCU-NEXT:    orb $2, %cl
+; MCU-NEXT:    addb $2, %cl
 ; MCU-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; MCU-NEXT:    shll %cl, %eax
 ; MCU-NEXT:    retl
diff --git a/llvm/test/CodeGen/X86/select_const.ll b/llvm/test/CodeGen/X86/select_const.ll
index d604923b48a11a..e00748af659423 100644
--- a/llvm/test/CodeGen/X86/select_const.ll
+++ b/llvm/test/CodeGen/X86/select_const.ll
@@ -550,7 +550,7 @@ define i8 @select_pow2_diff(i1 zeroext %cond) {
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    shlb $4, %al
-; X86-NEXT:    orb $3, %al
+; X86-NEXT:    addb $3, %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: select_pow2_diff:
@@ -571,7 +571,7 @@ define i16 @select_pow2_diff_invert(i1 zeroext %cond) {
 ; X86-NEXT:    xorb $1, %al
 ; X86-NEXT:    movzbl %al, %eax
 ; X86-NEXT:    shll $6, %eax
-; X86-NEXT:    orl $7, %eax
+; X86-NEXT:    addl $7, %eax
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
@@ -580,7 +580,7 @@ define i16 @select_pow2_diff_invert(i1 zeroext %cond) {
 ; X64-NEXT:    xorb $1, %dil
 ; X64-NEXT:    movzbl %dil, %eax
 ; X64-NEXT:    shll $6, %eax
-; X64-NEXT:    orl $7, %eax
+; X64-NEXT:    addl $7, %eax
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
   %sel = select i1 %cond, i16 7, i16 71
diff --git a/llvm/test/CodeGen/X86/setcc-fsh.ll b/llvm/test/CodeGen/X86/setcc-fsh.ll
index 7ab63959f58b08..eb5cb89b3dddf6 100644
--- a/llvm/test/CodeGen/X86/setcc-fsh.ll
+++ b/llvm/test/CodeGen/X86/setcc-fsh.ll
@@ -172,7 +172,7 @@ define i1 @fshl_eq_n1(i8 %x, i8 %y, i8 %z) nounwind {
 ; CHECK-NEXT:    movl %edx, %ecx
 ; CHECK-NEXT:    shll $8, %edi
 ; CHECK-NEXT:    movzbl %sil, %eax
-; CHECK-NEXT:    orl %edi, %eax
+; CHECK-NEXT:    addl %edi, %eax
 ; CHECK-NEXT:    andb $7, %cl
 ; CHECK-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; CHECK-NEXT:    shll %cl, %eax
diff --git a/llvm/test/CodeGen/X86/shrink-compare-pgso.ll b/llvm/test/CodeGen/X86/shrink-compare-pgso.ll
index 254b8fe3fc6e30..30e6cf1d5d0155 100644
--- a/llvm/test/CodeGen/X86/shrink-compare-pgso.ll
+++ b/llvm/test/CodeGen/X86/shrink-compare-pgso.ll
@@ -101,7 +101,7 @@ define dso_local void @test5(i32 %X) nounwind !prof !14 {
 ; CHECK-NEXT:    movzbl x+6(%rip), %eax
 ; CHECK-NEXT:    shll $16, %eax
 ; CHECK-NEXT:    movzwl x+4(%rip), %ecx
-; CHECK-NEXT:    orl %eax, %ecx
+; CHECK-NEXT:    addl %eax, %ecx
 ; CHECK-NEXT:    cmpl $1, %ecx
 ; CHECK-NEXT:    jne bar # TAILCALL
 ; CHECK-NEXT:  # %bb.1: # %if.end
diff --git a/llvm/test/CodeGen/X86/shrink-compare.ll b/llvm/test/CodeGen/X86/shrink-compare.ll
index 840167ff9f4a0c..0e5ef2a0ff52dc 100644
--- a/llvm/test/CodeGen/X86/shrink-compare.ll
+++ b/llvm/test/CodeGen/X86/shrink-compare.ll
@@ -101,7 +101,7 @@ define dso_local void @test5(i32 %X) nounwind minsize {
 ; CHECK-NEXT:    movzbl x+6(%rip), %eax
 ; CHECK-NEXT:    shll $16, %eax
 ; CHECK-NEXT:    movzwl x+4(%rip), %ecx
-; CHECK-NEXT:    orl %eax, %ecx
+; CHECK-NEXT:    addl %eax, %ecx
 ; CHECK-NEXT:    cmpl $1, %ecx
 ; CHECK-NEXT:    jne bar # TAILCALL
 ; CHECK-NEXT:  # %bb.1: # %if.end
diff --git a/llvm/test/CodeGen/X86/smul_fix.ll b/llvm/test/CodeGen/X86/smul_fix.ll
index ce56283df6010b..4ccab2540042e1 100644
--- a/llvm/test/CodeGen/X86/smul_fix.ll
+++ b/llvm/test/CodeGen/X86/smul_fix.ll
@@ -108,7 +108,7 @@ define i4 @func3(i4 %x, i4 %y) nounwind {
 ; X64-NEXT:    shrb $2, %cl
 ; X64-NEXT:    shrl $8, %eax
 ; X64-NEXT:    shlb $6, %al
-; X64-NEXT:    orb %cl, %al
+; X64-NEXT:    addb %cl, %al
 ; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
 ;
@@ -125,7 +125,7 @@ define i4 @func3(i4 %x, i4 %y) nounwind {
 ; X86-NEXT:    imull %ecx, %eax
 ; X86-NEXT:    shlb $6, %ah
 ; X86-NEXT:    shrb $2, %al
-; X86-NEXT:    orb %ah, %al
+; X86-NEXT:    addb %ah, %al
 ; X86-NEXT:    # kill: def $al killed $al killed $eax
 ; X86-NEXT:    retl
   %tmp = call i4 @llvm.smul.fix.i4(i4 %x, i4 %y, i32 2)
diff --git a/llvm/test/CodeGen/X86/smul_fix_sat.ll b/llvm/test/CodeGen/X86/smul_fix_sat.ll
index 85c966c447fad6..104e2e64fcf164 100644
--- a/llvm/test/CodeGen/X86/smul_fix_sat.ll
+++ b/llvm/test/CodeGen/X86/smul_fix_sat.ll
@@ -150,7 +150,7 @@ define i4 @func3(i4 %x, i4 %y) nounwind {
 ; X64-NEXT:    shrl $8, %ecx
 ; X64-NEXT:    movl %ecx, %edx
 ; X64-NEXT:    shlb $6, %dl
-; X64-NEXT:    orb %al, %dl
+; X64-NEXT:    addb %al, %dl
 ; X64-NEXT:    movzbl %dl, %eax
 ; X64-NEXT:    cmpb $2, %cl
 ; X64-NEXT:    movl $127, %edx
@@ -175,7 +175,7 @@ define i4 @func3(i4 %x, i4 %y) nounwind {
 ; X86-NEXT:    movb %ah, %cl
 ; X86-NEXT:    shlb $6, %cl
 ; X86-NEXT:    shrb $2, %al
-; X86-NEXT:    orb %cl, %al
+; X86-NEXT:    addb %cl, %al
 ; X86-NEXT:    movzbl %al, %ecx
 ; X86-NEXT:    cmpb $2, %ah
 ; X86-NEXT:    movl $127, %edx
diff --git a/llvm/test/CodeGen/X86/split-store.ll b/llvm/test/CodeGen/X86/split-store.ll
index 416c0cbeeddbd1..d63a541c9f17f9 100644
--- a/llvm/test/CodeGen/X86/split-store.ll
+++ b/llvm/test/CodeGen/X86/split-store.ll
@@ -179,7 +179,7 @@ define void @int12_int12_pair(i12 signext %tmp1, i12 signext %tmp2, ptr %ref.tmp
 ; CHECK-NEXT:    movl %esi, %eax
 ; CHECK-NEXT:    shll $12, %eax
 ; CHECK-NEXT:    andl $4095, %edi # imm = 0xFFF
-; CHECK-NEXT:    orl %eax, %edi
+; CHECK-NEXT:    addl %eax, %edi
 ; CHECK-NEXT:    shrl $4, %esi
 ; CHECK-NEXT:    movb %sil, 2(%rdx)
 ; CHECK-NEXT:    movw %di, (%rdx)
@@ -199,7 +199,7 @@ define void @int7_int7_pair(i7 signext %tmp1, i7 signext %tmp2, ptr %ref.tmp) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    shll $7, %esi
 ; CHECK-NEXT:    andl $127, %edi
-; CHECK-NEXT:    orl %esi, %edi
+; CHECK-NEXT:    addl %esi, %edi
 ; CHECK-NEXT:    andl $16383, %edi # imm = 0x3FFF
 ; CHECK-NEXT:    movw %di, (%rdx)
 ; CHECK-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll b/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll
index 3dde5c1c8a40c1..a45207d6e0e3f9 100644
--- a/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll
+++ b/llvm/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll
@@ -2271,7 +2271,7 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) {
 ; CHECK-SSE2-NEXT:    pmovmskb %xmm2, %ecx
 ; CHECK-SSE2-NEXT:    pmovmskb %xmm3, %edx
 ; CHECK-SSE2-NEXT:    shll $16, %edx
-; CHECK-SSE2-NEXT:    orl %ecx, %edx
+; CHECK-SSE2-NEXT:    addl %ecx, %edx
 ; CHECK-SSE2-NEXT:    movl %edx, (%rdi)
 ; CHECK-SSE2-NEXT:    retq
 ;
@@ -2333,7 +2333,7 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) {
 ; CHECK-SSE41-NEXT:    pmovmskb %xmm2, %ecx
 ; CHECK-SSE41-NEXT:    pmovmskb %xmm3, %edx
 ; CHECK-SSE41-NEXT:    shll $16, %edx
-; CHECK-SSE41-NEXT:    orl %ecx, %edx
+; CHECK-SSE41-NEXT:    addl %ecx, %edx
 ; CHECK-SSE41-NEXT:    movl %edx, (%rdi)
 ; CHECK-SSE41-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/subvectorwise-store-of-vector-splat.ll b/llvm/test/CodeGen/X86/subvectorwise-store-of-vector-splat.ll
index e0f438eb7cc8f7..ec764a88ccf474 100644
--- a/llvm/test/CodeGen/X86/subvectorwise-store-of-vector-splat.ll
+++ b/llvm/test/CodeGen/X86/subvectorwise-store-of-vector-splat.ll
@@ -2394,7 +2394,7 @@ define void @vec384_v3i8(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.p
 ; SCALAR-NEXT:    notb %dil
 ; SCALAR-NEXT:    movzbl %dil, %ecx
 ; SCALAR-NEXT:    shll $8, %ecx
-; SCALAR-NEXT:    orl %r8d, %ecx
+; SCALAR-NEXT:    addl %r8d, %ecx
 ; SCALAR-NEXT:    notb %al
 ; SCALAR-NEXT:    movb %al, 2(%rsi)
 ; SCALAR-NEXT:    movw %cx, (%rsi)
@@ -3065,7 +3065,7 @@ define void @vec384_v3i32(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 ; SCALAR-NEXT:    notl %edi
 ; SCALAR-NEXT:    shlq $32, %rdi
 ; SCALAR-NEXT:    notl %ecx
-; SCALAR-NEXT:    orq %rdi, %rcx
+; SCALAR-NEXT:    addq %rdi, %rcx
 ; SCALAR-NEXT:    notl %eax
 ; SCALAR-NEXT:    movl %eax, 8(%rsi)
 ; SCALAR-NEXT:    movq %rcx, (%rsi)
@@ -3201,7 +3201,7 @@ define void @vec384_v3f32(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 ; SCALAR-NEXT:    notl %edi
 ; SCALAR-NEXT:    shlq $32, %rdi
 ; SCALAR-NEXT:    notl %ecx
-; SCALAR-NEXT:    orq %rdi, %rcx
+; SCALAR-NEXT:    addq %rdi, %rcx
 ; SCALAR-NEXT:    notl %eax
 ; SCALAR-NEXT:    movl %eax, 8(%rsi)
 ; SCALAR-NEXT:    movq %rcx, (%rsi)
@@ -3810,23 +3810,23 @@ define void @vec384_v6i8(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.p
 ; SCALAR-NEXT:    notb %r10b
 ; SCALAR-NEXT:    movzbl %r10b, %r10d
 ; SCALAR-NEXT:    shll $8, %r10d
-; SCALAR-NEXT:    orl %edi, %r10d
+; SCALAR-NEXT:    addl %edi, %r10d
 ; SCALAR-NEXT:    notb %r9b
 ; SCALAR-NEXT:    movzbl %r9b, %edi
 ; SCALAR-NEXT:    notb %r8b
 ; SCALAR-NEXT:    movzbl %r8b, %r8d
 ; SCALAR-NEXT:    shll $8, %r8d
-; SCALAR-NEXT:    orl %edi, %r8d
+; SCALAR-NEXT:    addl %edi, %r8d
 ; SCALAR-NEXT:    notb %cl
 ; SCALAR-NEXT:    movzbl %cl, %ecx
 ; SCALAR-NEXT:    notb %al
 ; SCALAR-NEXT:    movzbl %al, %eax
 ; SCALAR-NEXT:    shll $8, %eax
-; SCALAR-NEXT:    orl %ecx, %eax
+; SCALAR-NEXT:    addl %ecx, %eax
 ; SCALAR-NEXT:    movw %ax, 4(%rsi)
 ; SCALAR-NEXT:    shll $16, %r8d
 ; SCALAR-NEXT:    movzwl %r10w, %ecx
-; SCALAR-NEXT:    orl %r8d, %ecx
+; SCALAR-NEXT:    addl %r8d, %ecx
 ; SCALAR-NEXT:    movl %ecx, (%rsi)
 ; SCALAR-NEXT:    movw %ax, 4(%rdx)
 ; SCALAR-NEXT:    movl %ecx, (%rdx)
@@ -4082,12 +4082,12 @@ define void @vec384_v6i16(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 ; SCALAR-NEXT:    shll $16, %r8d
 ; SCALAR-NEXT:    notl %edi
 ; SCALAR-NEXT:    movzwl %di, %edi
-; SCALAR-NEXT:    orl %r8d, %edi
+; SCALAR-NEXT:    addl %r8d, %edi
 ; SCALAR-NEXT:    notl %ecx
 ; SCALAR-NEXT:    notl %eax
 ; SCALAR-NEXT:    movl %eax, 8(%rsi)
 ; SCALAR-NEXT:    shlq $32, %rdi
-; SCALAR-NEXT:    orq %rdi, %rcx
+; SCALAR-NEXT:    addq %rdi, %rcx
 ; SCALAR-NEXT:    movq %rcx, (%rsi)
 ; SCALAR-NEXT:    movl %eax, 8(%rdx)
 ; SCALAR-NEXT:    movq %rcx, (%rdx)
@@ -4226,15 +4226,15 @@ define void @vec384_v6i32(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 ; SCALAR-NEXT:    notl %r10d
 ; SCALAR-NEXT:    shlq $32, %r10
 ; SCALAR-NEXT:    notl %edi
-; SCALAR-NEXT:    orq %r10, %rdi
+; SCALAR-NEXT:    addq %r10, %rdi
 ; SCALAR-NEXT:    notl %r9d
 ; SCALAR-NEXT:    shlq $32, %r9
 ; SCALAR-NEXT:    notl %ecx
-; SCALAR-NEXT:    orq %r9, %rcx
+; SCALAR-NEXT:    addq %r9, %rcx
 ; SCALAR-NEXT:    notl %r8d
 ; SCALAR-NEXT:    shlq $32, %r8
 ; SCALAR-NEXT:    notl %eax
-; SCALAR-NEXT:    orq %r8, %rax
+; SCALAR-NEXT:    addq %r8, %rax
 ; SCALAR-NEXT:    movq %rax, (%rsi)
 ; SCALAR-NEXT:    movq %rcx, 8(%rsi)
 ; SCALAR-NEXT:    movq %rdi, 16(%rsi)
@@ -4313,15 +4313,15 @@ define void @vec384_v6f32(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 ; SCALAR-NEXT:    notl %r10d
 ; SCALAR-NEXT:    shlq $32, %r10
 ; SCALAR-NEXT:    notl %edi
-; SCALAR-NEXT:    orq %r10, %rdi
+; SCALAR-NEXT:    addq %r10, %rdi
 ; SCALAR-NEXT:    notl %r9d
 ; SCALAR-NEXT:    shlq $32, %r9
 ; SCALAR-NEXT:    notl %ecx
-; SCALAR-NEXT:    orq %r9, %rcx
+; SCALAR-NEXT:    addq %r9, %rcx
 ; SCALAR-NEXT:    notl %r8d
 ; SCALAR-NEXT:    shlq $32, %r8
 ; SCALAR-NEXT:    notl %eax
-; SCALAR-NEXT:    orq %r8, %rax
+; SCALAR-NEXT:    addq %r8, %rax
 ; SCALAR-NEXT:    movq %rax, (%rsi)
 ; SCALAR-NEXT:    movq %rcx, 8(%rsi)
 ; SCALAR-NEXT:    movq %rdi, 16(%rsi)
@@ -4651,49 +4651,49 @@ define void @vec384_v12i8(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 ; SCALAR-NEXT:    notb %r15b
 ; SCALAR-NEXT:    movzbl %r15b, %r15d
 ; SCALAR-NEXT:    shll $8, %r15d
-; SCALAR-NEXT:    orl %r12d, %r15d
+; SCALAR-NEXT:    addl %r12d, %r15d
 ; SCALAR-NEXT:    shll $16, %r15d
 ; SCALAR-NEXT:    notb %r9b
 ; SCALAR-NEXT:    movzbl %r9b, %r9d
 ; SCALAR-NEXT:    notb %bpl
 ; SCALAR-NEXT:    movzbl %bpl, %ebp
 ; SCALAR-NEXT:    shll $8, %ebp
-; SCALAR-NEXT:    orl %r9d, %ebp
+; SCALAR-NEXT:    addl %r9d, %ebp
 ; SCALAR-NEXT:    movzwl %bp, %r9d
-; SCALAR-NEXT:    orl %r15d, %r9d
+; SCALAR-NEXT:    addl %r15d, %r9d
 ; SCALAR-NEXT:    notb %r14b
 ; SCALAR-NEXT:    movzbl %r14b, %ebp
 ; SCALAR-NEXT:    notb %bl
 ; SCALAR-NEXT:    movzbl %bl, %ebx
 ; SCALAR-NEXT:    shll $8, %ebx
-; SCALAR-NEXT:    orl %ebp, %ebx
+; SCALAR-NEXT:    addl %ebp, %ebx
 ; SCALAR-NEXT:    shll $16, %ebx
 ; SCALAR-NEXT:    notb %r11b
 ; SCALAR-NEXT:    movzbl %r11b, %r11d
 ; SCALAR-NEXT:    notb %r10b
 ; SCALAR-NEXT:    movzbl %r10b, %r10d
 ; SCALAR-NEXT:    shll $8, %r10d
-; SCALAR-NEXT:    orl %r11d, %r10d
+; SCALAR-NEXT:    addl %r11d, %r10d
 ; SCALAR-NEXT:    movzwl %r10w, %r10d
-; SCALAR-NEXT:    orl %ebx, %r10d
+; SCALAR-NEXT:    addl %ebx, %r10d
 ; SCALAR-NEXT:    notb %r8b
 ; SCALAR-NEXT:    movzbl %r8b, %r8d
 ; SCALAR-NEXT:    notb %dil
 ; SCALAR-NEXT:    movzbl %dil, %edi
 ; SCALAR-NEXT:    shll $8, %edi
-; SCALAR-NEXT:    orl %r8d, %edi
+; SCALAR-NEXT:    addl %r8d, %edi
 ; SCALAR-NEXT:    shll $16, %edi
 ; SCALAR-NEXT:    notb %cl
 ; SCALAR-NEXT:    movzbl %cl, %ecx
 ; SCALAR-NEXT:    notb %al
 ; SCALAR-NEXT:    movzbl %al, %eax
 ; SCALAR-NEXT:    shll $8, %eax
-; SCALAR-NEXT:    orl %ecx, %eax
+; SCALAR-NEXT:    addl %ecx, %eax
 ; SCALAR-NEXT:    movzwl %ax, %eax
-; SCALAR-NEXT:    orl %edi, %eax
+; SCALAR-NEXT:    addl %edi, %eax
 ; SCALAR-NEXT:    movl %eax, 8(%rsi)
 ; SCALAR-NEXT:    shlq $32, %r10
-; SCALAR-NEXT:    orq %r10, %r9
+; SCALAR-NEXT:    addq %r10, %r9
 ; SCALAR-NEXT:    movq %r9, (%rsi)
 ; SCALAR-NEXT:    movl %eax, 8(%rdx)
 ; SCALAR-NEXT:    movq %r9, (%rdx)
@@ -4846,26 +4846,26 @@ define void @vec384_v12i16(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec
 ; SCALAR-NEXT:    shll $16, %r14d
 ; SCALAR-NEXT:    notl %ebx
 ; SCALAR-NEXT:    movzwl %bx, %ebx
-; SCALAR-NEXT:    orl %r14d, %ebx
+; SCALAR-NEXT:    addl %r14d, %ebx
 ; SCALAR-NEXT:    shlq $32, %rbx
 ; SCALAR-NEXT:    notl %edi
-; SCALAR-NEXT:    orq %rbx, %rdi
+; SCALAR-NEXT:    addq %rbx, %rdi
 ; SCALAR-NEXT:    notl %r11d
 ; SCALAR-NEXT:    shll $16, %r11d
 ; SCALAR-NEXT:    notl %r10d
 ; SCALAR-NEXT:    movzwl %r10w, %r10d
-; SCALAR-NEXT:    orl %r11d, %r10d
+; SCALAR-NEXT:    addl %r11d, %r10d
 ; SCALAR-NEXT:    shlq $32, %r10
 ; SCALAR-NEXT:    notl %ecx
-; SCALAR-NEXT:    orq %r10, %rcx
+; SCALAR-NEXT:    addq %r10, %rcx
 ; SCALAR-NEXT:    notl %r9d
 ; SCALAR-NEXT:    shll $16, %r9d
 ; SCALAR-NEXT:    notl %r8d
 ; SCALAR-NEXT:    movzwl %r8w, %r8d
-; SCALAR-NEXT:    orl %r9d, %r8d
+; SCALAR-NEXT:    addl %r9d, %r8d
 ; SCALAR-NEXT:    shlq $32, %r8
 ; SCALAR-NEXT:    notl %eax
-; SCALAR-NEXT:    orq %r8, %rax
+; SCALAR-NEXT:    addq %r8, %rax
 ; SCALAR-NEXT:    movq %rax, (%rsi)
 ; SCALAR-NEXT:    movq %rcx, 8(%rsi)
 ; SCALAR-NEXT:    movq %rdi, 16(%rsi)
@@ -5136,7 +5136,7 @@ define void @vec384_v24i8(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 ; SCALAR-NEXT:    notb %r9b
 ; SCALAR-NEXT:    movzbl %r9b, %r9d
 ; SCALAR-NEXT:    shll $8, %r9d
-; SCALAR-NEXT:    orl %r10d, %r9d
+; SCALAR-NEXT:    addl %r10d, %r9d
 ; SCALAR-NEXT:    movq %rdi, %r10
 ; SCALAR-NEXT:    shrq $32, %r10
 ; SCALAR-NEXT:    notb %r10b
@@ -5144,12 +5144,12 @@ define void @vec384_v24i8(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 ; SCALAR-NEXT:    notb %r8b
 ; SCALAR-NEXT:    movzbl %r8b, %r8d
 ; SCALAR-NEXT:    shll $8, %r8d
-; SCALAR-NEXT:    orl %r10d, %r8d
+; SCALAR-NEXT:    addl %r10d, %r8d
 ; SCALAR-NEXT:    movl %edi, %r10d
 ; SCALAR-NEXT:    shrl $24, %r10d
 ; SCALAR-NEXT:    shll $16, %r9d
 ; SCALAR-NEXT:    movzwl %r8w, %r8d
-; SCALAR-NEXT:    orl %r9d, %r8d
+; SCALAR-NEXT:    addl %r9d, %r8d
 ; SCALAR-NEXT:    movl %edi, %r9d
 ; SCALAR-NEXT:    shrl $16, %r9d
 ; SCALAR-NEXT:    notb %r9b
@@ -5157,7 +5157,7 @@ define void @vec384_v24i8(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 ; SCALAR-NEXT:    notb %r10b
 ; SCALAR-NEXT:    movzbl %r10b, %r10d
 ; SCALAR-NEXT:    shll $8, %r10d
-; SCALAR-NEXT:    orl %r9d, %r10d
+; SCALAR-NEXT:    addl %r9d, %r10d
 ; SCALAR-NEXT:    movl %edi, %r9d
 ; SCALAR-NEXT:    shrl $8, %r9d
 ; SCALAR-NEXT:    notb %dil
@@ -5165,16 +5165,16 @@ define void @vec384_v24i8(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 ; SCALAR-NEXT:    notb %r9b
 ; SCALAR-NEXT:    movzbl %r9b, %r11d
 ; SCALAR-NEXT:    shll $8, %r11d
-; SCALAR-NEXT:    orl %edi, %r11d
+; SCALAR-NEXT:    addl %edi, %r11d
 ; SCALAR-NEXT:    movq %rcx, %r9
 ; SCALAR-NEXT:    shrq $40, %r9
 ; SCALAR-NEXT:    shll $16, %r10d
 ; SCALAR-NEXT:    movzwl %r11w, %edi
-; SCALAR-NEXT:    orl %r10d, %edi
+; SCALAR-NEXT:    addl %r10d, %edi
 ; SCALAR-NEXT:    movq %rcx, %r10
 ; SCALAR-NEXT:    shrq $56, %r10
 ; SCALAR-NEXT:    shlq $32, %r8
-; SCALAR-NEXT:    orq %r8, %rdi
+; SCALAR-NEXT:    addq %r8, %rdi
 ; SCALAR-NEXT:    movq %rcx, %r8
 ; SCALAR-NEXT:    shrq $48, %r8
 ; SCALAR-NEXT:    notb %r8b
@@ -5182,7 +5182,7 @@ define void @vec384_v24i8(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 ; SCALAR-NEXT:    notb %r10b
 ; SCALAR-NEXT:    movzbl %r10b, %r10d
 ; SCALAR-NEXT:    shll $8, %r10d
-; SCALAR-NEXT:    orl %r8d, %r10d
+; SCALAR-NEXT:    addl %r8d, %r10d
 ; SCALAR-NEXT:    movq %rcx, %r8
 ; SCALAR-NEXT:    shrq $32, %r8
 ; SCALAR-NEXT:    notb %r8b
@@ -5190,12 +5190,12 @@ define void @vec384_v24i8(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 ; SCALAR-NEXT:    notb %r9b
 ; SCALAR-NEXT:    movzbl %r9b, %r9d
 ; SCALAR-NEXT:    shll $8, %r9d
-; SCALAR-NEXT:    orl %r8d, %r9d
+; SCALAR-NEXT:    addl %r8d, %r9d
 ; SCALAR-NEXT:    movl %ecx, %r11d
 ; SCALAR-NEXT:    shrl $24, %r11d
 ; SCALAR-NEXT:    shll $16, %r10d
 ; SCALAR-NEXT:    movzwl %r9w, %r8d
-; SCALAR-NEXT:    orl %r10d, %r8d
+; SCALAR-NEXT:    addl %r10d, %r8d
 ; SCALAR-NEXT:    movl %ecx, %r9d
 ; SCALAR-NEXT:    shrl $16, %r9d
 ; SCALAR-NEXT:    notb %r9b
@@ -5203,7 +5203,7 @@ define void @vec384_v24i8(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 ; SCALAR-NEXT:    notb %r11b
 ; SCALAR-NEXT:    movzbl %r11b, %r10d
 ; SCALAR-NEXT:    shll $8, %r10d
-; SCALAR-NEXT:    orl %r9d, %r10d
+; SCALAR-NEXT:    addl %r9d, %r10d
 ; SCALAR-NEXT:    movl %ecx, %r9d
 ; SCALAR-NEXT:    shrl $8, %r9d
 ; SCALAR-NEXT:    notb %cl
@@ -5211,16 +5211,16 @@ define void @vec384_v24i8(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 ; SCALAR-NEXT:    notb %r9b
 ; SCALAR-NEXT:    movzbl %r9b, %r11d
 ; SCALAR-NEXT:    shll $8, %r11d
-; SCALAR-NEXT:    orl %ecx, %r11d
+; SCALAR-NEXT:    addl %ecx, %r11d
 ; SCALAR-NEXT:    movq %rax, %r9
 ; SCALAR-NEXT:    shrq $40, %r9
 ; SCALAR-NEXT:    shll $16, %r10d
 ; SCALAR-NEXT:    movzwl %r11w, %ecx
-; SCALAR-NEXT:    orl %r10d, %ecx
+; SCALAR-NEXT:    addl %r10d, %ecx
 ; SCALAR-NEXT:    movq %rax, %r10
 ; SCALAR-NEXT:    shrq $56, %r10
 ; SCALAR-NEXT:    shlq $32, %r8
-; SCALAR-NEXT:    orq %r8, %rcx
+; SCALAR-NEXT:    addq %r8, %rcx
 ; SCALAR-NEXT:    movq %rax, %r8
 ; SCALAR-NEXT:    shrq $48, %r8
 ; SCALAR-NEXT:    notb %r8b
@@ -5228,7 +5228,7 @@ define void @vec384_v24i8(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 ; SCALAR-NEXT:    notb %r10b
 ; SCALAR-NEXT:    movzbl %r10b, %r10d
 ; SCALAR-NEXT:    shll $8, %r10d
-; SCALAR-NEXT:    orl %r8d, %r10d
+; SCALAR-NEXT:    addl %r8d, %r10d
 ; SCALAR-NEXT:    movq %rax, %r8
 ; SCALAR-NEXT:    shrq $32, %r8
 ; SCALAR-NEXT:    notb %r8b
@@ -5236,12 +5236,12 @@ define void @vec384_v24i8(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 ; SCALAR-NEXT:    notb %r9b
 ; SCALAR-NEXT:    movzbl %r9b, %r9d
 ; SCALAR-NEXT:    shll $8, %r9d
-; SCALAR-NEXT:    orl %r8d, %r9d
+; SCALAR-NEXT:    addl %r8d, %r9d
 ; SCALAR-NEXT:    movl %eax, %r11d
 ; SCALAR-NEXT:    shrl $24, %r11d
 ; SCALAR-NEXT:    shll $16, %r10d
 ; SCALAR-NEXT:    movzwl %r9w, %r8d
-; SCALAR-NEXT:    orl %r10d, %r8d
+; SCALAR-NEXT:    addl %r10d, %r8d
 ; SCALAR-NEXT:    movl %eax, %r9d
 ; SCALAR-NEXT:    shrl $16, %r9d
 ; SCALAR-NEXT:    notb %r9b
@@ -5249,7 +5249,7 @@ define void @vec384_v24i8(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 ; SCALAR-NEXT:    notb %r11b
 ; SCALAR-NEXT:    movzbl %r11b, %r10d
 ; SCALAR-NEXT:    shll $8, %r10d
-; SCALAR-NEXT:    orl %r9d, %r10d
+; SCALAR-NEXT:    addl %r9d, %r10d
 ; SCALAR-NEXT:    movl %eax, %r9d
 ; SCALAR-NEXT:    shrl $8, %r9d
 ; SCALAR-NEXT:    notb %al
@@ -5257,12 +5257,12 @@ define void @vec384_v24i8(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.
 ; SCALAR-NEXT:    notb %r9b
 ; SCALAR-NEXT:    movzbl %r9b, %r9d
 ; SCALAR-NEXT:    shll $8, %r9d
-; SCALAR-NEXT:    orl %eax, %r9d
+; SCALAR-NEXT:    addl %eax, %r9d
 ; SCALAR-NEXT:    shll $16, %r10d
 ; SCALAR-NEXT:    movzwl %r9w, %eax
-; SCALAR-NEXT:    orl %r10d, %eax
+; SCALAR-NEXT:    addl %r10d, %eax
 ; SCALAR-NEXT:    shlq $32, %r8
-; SCALAR-NEXT:    orq %r8, %rax
+; SCALAR-NEXT:    addq %r8, %rax
 ; SCALAR-NEXT:    movq %rax, (%rsi)
 ; SCALAR-NEXT:    movq %rcx, 8(%rsi)
 ; SCALAR-NEXT:    movq %rdi, 16(%rsi)
diff --git a/llvm/test/CodeGen/X86/umul_fix_sat.ll b/llvm/test/CodeGen/X86/umul_fix_sat.ll
index 8c7078c7263284..df89b33d81ab42 100644
--- a/llvm/test/CodeGen/X86/umul_fix_sat.ll
+++ b/llvm/test/CodeGen/X86/umul_fix_sat.ll
@@ -102,7 +102,7 @@ define i4 @func3(i4 %x, i4 %y) nounwind {
 ; X64-NEXT:    shrl $8, %eax
 ; X64-NEXT:    movl %eax, %edx
 ; X64-NEXT:    shlb $6, %dl
-; X64-NEXT:    orb %cl, %dl
+; X64-NEXT:    addb %cl, %dl
 ; X64-NEXT:    movzbl %dl, %ecx
 ; X64-NEXT:    cmpb $4, %al
 ; X64-NEXT:    movl $255, %eax
@@ -123,7 +123,7 @@ define i4 @func3(i4 %x, i4 %y) nounwind {
 ; X86-NEXT:    movb %ah, %cl
 ; X86-NEXT:    shlb $6, %cl
 ; X86-NEXT:    shrb $2, %al
-; X86-NEXT:    orb %cl, %al
+; X86-NEXT:    addb %cl, %al
 ; X86-NEXT:    movzbl %al, %ecx
 ; X86-NEXT:    cmpb $4, %ah
 ; X86-NEXT:    movl $255, %eax
diff --git a/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-innerouter.ll b/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-innerouter.ll
index 9a8719f9a64fa5..f071c752ea9727 100644
--- a/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-innerouter.ll
+++ b/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-innerouter.ll
@@ -90,7 +90,7 @@ define i64 @out64_constmask(i64 %x, i64 %y) {
 ; CHECK-NOBMI-NEXT:    andq %rdi, %rcx
 ; CHECK-NOBMI-NEXT:    movabsq $-281474976645121, %rax # imm = 0xFFFF00000000FFFF
 ; CHECK-NOBMI-NEXT:    andq %rsi, %rax
-; CHECK-NOBMI-NEXT:    orq %rcx, %rax
+; CHECK-NOBMI-NEXT:    addq %rcx, %rax
 ; CHECK-NOBMI-NEXT:    retq
 ;
 ; CHECK-BMI-LABEL: out64_constmask:
@@ -99,7 +99,7 @@ define i64 @out64_constmask(i64 %x, i64 %y) {
 ; CHECK-BMI-NEXT:    andq %rdi, %rcx
 ; CHECK-BMI-NEXT:    movabsq $-281474976645121, %rax # imm = 0xFFFF00000000FFFF
 ; CHECK-BMI-NEXT:    andq %rsi, %rax
-; CHECK-BMI-NEXT:    orq %rcx, %rax
+; CHECK-BMI-NEXT:    addq %rcx, %rax
 ; CHECK-BMI-NEXT:    retq
   %mx = and i64 %x, 281474976645120
   %my = and i64 %y, -281474976645121
diff --git a/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-interleavedbits.ll b/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-interleavedbits.ll
index c4c4e5ed1fddeb..6a285937d17e0f 100644
--- a/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-interleavedbits.ll
+++ b/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-interleavedbits.ll
@@ -90,7 +90,7 @@ define i64 @out64_constmask(i64 %x, i64 %y) {
 ; CHECK-NOBMI-NEXT:    andq %rdi, %rcx
 ; CHECK-NOBMI-NEXT:    movabsq $-6148914691236517206, %rax # imm = 0xAAAAAAAAAAAAAAAA
 ; CHECK-NOBMI-NEXT:    andq %rsi, %rax
-; CHECK-NOBMI-NEXT:    orq %rcx, %rax
+; CHECK-NOBMI-NEXT:    addq %rcx, %rax
 ; CHECK-NOBMI-NEXT:    retq
 ;
 ; CHECK-BMI-LABEL: out64_constmask:
@@ -99,7 +99,7 @@ define i64 @out64_constmask(i64 %x, i64 %y) {
 ; CHECK-BMI-NEXT:    andq %rdi, %rcx
 ; CHECK-BMI-NEXT:    movabsq $-6148914691236517206, %rax # imm = 0xAAAAAAAAAAAAAAAA
 ; CHECK-BMI-NEXT:    andq %rsi, %rax
-; CHECK-BMI-NEXT:    orq %rcx, %rax
+; CHECK-BMI-NEXT:    addq %rcx, %rax
 ; CHECK-BMI-NEXT:    retq
   %mx = and i64 %x, 6148914691236517205
   %my = and i64 %y, -6148914691236517206
diff --git a/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-interleavedbytehalves.ll b/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-interleavedbytehalves.ll
index 2ea74f39423872..9ae97a14643830 100644
--- a/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-interleavedbytehalves.ll
+++ b/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-interleavedbytehalves.ll
@@ -90,7 +90,7 @@ define i64 @out64_constmask(i64 %x, i64 %y) {
 ; CHECK-NOBMI-NEXT:    andq %rdi, %rcx
 ; CHECK-NOBMI-NEXT:    movabsq $-1085102592571150096, %rax # imm = 0xF0F0F0F0F0F0F0F0
 ; CHECK-NOBMI-NEXT:    andq %rsi, %rax
-; CHECK-NOBMI-NEXT:    orq %rcx, %rax
+; CHECK-NOBMI-NEXT:    addq %rcx, %rax
 ; CHECK-NOBMI-NEXT:    retq
 ;
 ; CHECK-BMI-LABEL: out64_constmask:
@@ -99,7 +99,7 @@ define i64 @out64_constmask(i64 %x, i64 %y) {
 ; CHECK-BMI-NEXT:    andq %rdi, %rcx
 ; CHECK-BMI-NEXT:    movabsq $-1085102592571150096, %rax # imm = 0xF0F0F0F0F0F0F0F0
 ; CHECK-BMI-NEXT:    andq %rsi, %rax
-; CHECK-BMI-NEXT:    orq %rcx, %rax
+; CHECK-BMI-NEXT:    addq %rcx, %rax
 ; CHECK-BMI-NEXT:    retq
   %mx = and i64 %x, 1085102592571150095
   %my = and i64 %y, -1085102592571150096
diff --git a/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-lowhigh.ll b/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-lowhigh.ll
index eb6accd3e623b0..6a1cdae674ddeb 100644
--- a/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-lowhigh.ll
+++ b/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-constmask-lowhigh.ll
@@ -38,7 +38,7 @@ define i16 @out16_constmask(i16 %x, i16 %y) {
 ; CHECK-NOBMI:       # %bb.0:
 ; CHECK-NOBMI-NEXT:    movzbl %dil, %eax
 ; CHECK-NOBMI-NEXT:    andl $-256, %esi
-; CHECK-NOBMI-NEXT:    orl %esi, %eax
+; CHECK-NOBMI-NEXT:    addl %esi, %eax
 ; CHECK-NOBMI-NEXT:    # kill: def $ax killed $ax killed $eax
 ; CHECK-NOBMI-NEXT:    retq
 ;
@@ -46,7 +46,7 @@ define i16 @out16_constmask(i16 %x, i16 %y) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    movzbl %dil, %eax
 ; CHECK-BMI-NEXT:    andl $-256, %esi
-; CHECK-BMI-NEXT:    orl %esi, %eax
+; CHECK-BMI-NEXT:    addl %esi, %eax
 ; CHECK-BMI-NEXT:    # kill: def $ax killed $ax killed $eax
 ; CHECK-BMI-NEXT:    retq
   %mx = and i16 %x, 255
@@ -60,14 +60,14 @@ define i32 @out32_constmask(i32 %x, i32 %y) {
 ; CHECK-NOBMI:       # %bb.0:
 ; CHECK-NOBMI-NEXT:    movzwl %di, %eax
 ; CHECK-NOBMI-NEXT:    andl $-65536, %esi # imm = 0xFFFF0000
-; CHECK-NOBMI-NEXT:    orl %esi, %eax
+; CHECK-NOBMI-NEXT:    addl %esi, %eax
 ; CHECK-NOBMI-NEXT:    retq
 ;
 ; CHECK-BMI-LABEL: out32_constmask:
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    movzwl %di, %eax
 ; CHECK-BMI-NEXT:    andl $-65536, %esi # imm = 0xFFFF0000
-; CHECK-BMI-NEXT:    orl %esi, %eax
+; CHECK-BMI-NEXT:    addl %esi, %eax
 ; CHECK-BMI-NEXT:    retq
   %mx = and i32 %x, 65535
   %my = and i32 %y, -65536
@@ -81,7 +81,7 @@ define i64 @out64_constmask(i64 %x, i64 %y) {
 ; CHECK-NOBMI-NEXT:    movl %edi, %ecx
 ; CHECK-NOBMI-NEXT:    movabsq $-4294967296, %rax # imm = 0xFFFFFFFF00000000
 ; CHECK-NOBMI-NEXT:    andq %rsi, %rax
-; CHECK-NOBMI-NEXT:    orq %rcx, %rax
+; CHECK-NOBMI-NEXT:    addq %rcx, %rax
 ; CHECK-NOBMI-NEXT:    retq
 ;
 ; CHECK-BMI-LABEL: out64_constmask:
@@ -89,7 +89,7 @@ define i64 @out64_constmask(i64 %x, i64 %y) {
 ; CHECK-BMI-NEXT:    movl %edi, %ecx
 ; CHECK-BMI-NEXT:    movabsq $-4294967296, %rax # imm = 0xFFFFFFFF00000000
 ; CHECK-BMI-NEXT:    andq %rsi, %rax
-; CHECK-BMI-NEXT:    orq %rcx, %rax
+; CHECK-BMI-NEXT:    addq %rcx, %rax
 ; CHECK-BMI-NEXT:    retq
   %mx = and i64 %x, 4294967295
   %my = and i64 %y, -4294967296
diff --git a/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-variablemask.ll b/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-variablemask.ll
index 9c9d06921096cb..0e7cd0d14c13c2 100644
--- a/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-variablemask.ll
+++ b/llvm/test/CodeGen/X86/unfold-masked-merge-scalar-variablemask.ll
@@ -10,7 +10,7 @@ define i8 @out8(i8 %x, i8 %y, i8 %mask) {
 ; CHECK-NOBMI-NEXT:    andl %edx, %edi
 ; CHECK-NOBMI-NEXT:    notb %al
 ; CHECK-NOBMI-NEXT:    andb %sil, %al
-; CHECK-NOBMI-NEXT:    orb %dil, %al
+; CHECK-NOBMI-NEXT:    addb %dil, %al
 ; CHECK-NOBMI-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NOBMI-NEXT:    retq
 ;
@@ -20,7 +20,7 @@ define i8 @out8(i8 %x, i8 %y, i8 %mask) {
 ; CHECK-BMI-NEXT:    andl %edx, %edi
 ; CHECK-BMI-NEXT:    notb %al
 ; CHECK-BMI-NEXT:    andb %sil, %al
-; CHECK-BMI-NEXT:    orb %dil, %al
+; CHECK-BMI-NEXT:    addb %dil, %al
 ; CHECK-BMI-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-BMI-NEXT:    retq
   %mx = and i8 %x, %mask
@@ -33,11 +33,12 @@ define i8 @out8(i8 %x, i8 %y, i8 %mask) {
 define i16 @out16(i16 %x, i16 %y, i16 %mask) {
 ; CHECK-NOBMI-LABEL: out16:
 ; CHECK-NOBMI:       # %bb.0:
-; CHECK-NOBMI-NEXT:    movl %edx, %eax
+; CHECK-NOBMI-NEXT:    # kill: def $edx killed $edx def $rdx
+; CHECK-NOBMI-NEXT:    # kill: def $edi killed $edi def $rdi
 ; CHECK-NOBMI-NEXT:    andl %edx, %edi
-; CHECK-NOBMI-NEXT:    notl %eax
-; CHECK-NOBMI-NEXT:    andl %esi, %eax
-; CHECK-NOBMI-NEXT:    orl %edi, %eax
+; CHECK-NOBMI-NEXT:    notl %edx
+; CHECK-NOBMI-NEXT:    andl %esi, %edx
+; CHECK-NOBMI-NEXT:    leal (%rdx,%rdi), %eax
 ; CHECK-NOBMI-NEXT:    # kill: def $ax killed $ax killed $eax
 ; CHECK-NOBMI-NEXT:    retq
 ;
@@ -45,7 +46,7 @@ define i16 @out16(i16 %x, i16 %y, i16 %mask) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    andl %edx, %edi
 ; CHECK-BMI-NEXT:    andnl %esi, %edx, %eax
-; CHECK-BMI-NEXT:    orl %edi, %eax
+; CHECK-BMI-NEXT:    addl %edi, %eax
 ; CHECK-BMI-NEXT:    # kill: def $ax killed $ax killed $eax
 ; CHECK-BMI-NEXT:    retq
   %mx = and i16 %x, %mask
@@ -68,7 +69,7 @@ define i32 @out32(i32 %x, i32 %y, i32 %mask) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    andl %edx, %edi
 ; CHECK-BMI-NEXT:    andnl %esi, %edx, %eax
-; CHECK-BMI-NEXT:    orl %edi, %eax
+; CHECK-BMI-NEXT:    addl %edi, %eax
 ; CHECK-BMI-NEXT:    retq
   %mx = and i32 %x, %mask
   %notmask = xor i32 %mask, -1
@@ -90,7 +91,7 @@ define i64 @out64(i64 %x, i64 %y, i64 %mask) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    andq %rdx, %rdi
 ; CHECK-BMI-NEXT:    andnq %rsi, %rdx, %rax
-; CHECK-BMI-NEXT:    orq %rdi, %rax
+; CHECK-BMI-NEXT:    addq %rdi, %rax
 ; CHECK-BMI-NEXT:    retq
   %mx = and i64 %x, %mask
   %notmask = xor i64 %mask, -1
@@ -116,7 +117,7 @@ define i8 @in8(i8 %x, i8 %y, i8 %mask) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    andnl %esi, %edx, %eax
 ; CHECK-BMI-NEXT:    andl %edx, %edi
-; CHECK-BMI-NEXT:    orl %edi, %eax
+; CHECK-BMI-NEXT:    addl %edi, %eax
 ; CHECK-BMI-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-BMI-NEXT:    retq
   %n0 = xor i8 %x, %y
@@ -139,7 +140,7 @@ define i16 @in16(i16 %x, i16 %y, i16 %mask) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    andnl %esi, %edx, %eax
 ; CHECK-BMI-NEXT:    andl %edx, %edi
-; CHECK-BMI-NEXT:    orl %edi, %eax
+; CHECK-BMI-NEXT:    addl %edi, %eax
 ; CHECK-BMI-NEXT:    # kill: def $ax killed $ax killed $eax
 ; CHECK-BMI-NEXT:    retq
   %n0 = xor i16 %x, %y
@@ -161,7 +162,7 @@ define i32 @in32(i32 %x, i32 %y, i32 %mask) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    andnl %esi, %edx, %eax
 ; CHECK-BMI-NEXT:    andl %edx, %edi
-; CHECK-BMI-NEXT:    orl %edi, %eax
+; CHECK-BMI-NEXT:    addl %edi, %eax
 ; CHECK-BMI-NEXT:    retq
   %n0 = xor i32 %x, %y
   %n1 = and i32 %n0, %mask
@@ -182,7 +183,7 @@ define i64 @in64(i64 %x, i64 %y, i64 %mask) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    andnq %rsi, %rdx, %rax
 ; CHECK-BMI-NEXT:    andq %rdx, %rdi
-; CHECK-BMI-NEXT:    orq %rdi, %rax
+; CHECK-BMI-NEXT:    addq %rdi, %rax
 ; CHECK-BMI-NEXT:    retq
   %n0 = xor i64 %x, %y
   %n1 = and i64 %n0, %mask
@@ -205,7 +206,7 @@ define i32 @in_commutativity_0_0_1(i32 %x, i32 %y, i32 %mask) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    andnl %esi, %edx, %eax
 ; CHECK-BMI-NEXT:    andl %edx, %edi
-; CHECK-BMI-NEXT:    orl %edi, %eax
+; CHECK-BMI-NEXT:    addl %edi, %eax
 ; CHECK-BMI-NEXT:    retq
   %n0 = xor i32 %x, %y
   %n1 = and i32 %mask, %n0 ; swapped
@@ -225,7 +226,7 @@ define i32 @in_commutativity_0_1_0(i32 %x, i32 %y, i32 %mask) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    andnl %esi, %edx, %eax
 ; CHECK-BMI-NEXT:    andl %edx, %edi
-; CHECK-BMI-NEXT:    orl %edi, %eax
+; CHECK-BMI-NEXT:    addl %edi, %eax
 ; CHECK-BMI-NEXT:    retq
   %n0 = xor i32 %x, %y
   %n1 = and i32 %n0, %mask
@@ -245,7 +246,7 @@ define i32 @in_commutativity_0_1_1(i32 %x, i32 %y, i32 %mask) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    andnl %esi, %edx, %eax
 ; CHECK-BMI-NEXT:    andl %edx, %edi
-; CHECK-BMI-NEXT:    orl %edi, %eax
+; CHECK-BMI-NEXT:    addl %edi, %eax
 ; CHECK-BMI-NEXT:    retq
   %n0 = xor i32 %x, %y
   %n1 = and i32 %mask, %n0 ; swapped
@@ -265,7 +266,7 @@ define i32 @in_commutativity_1_0_0(i32 %x, i32 %y, i32 %mask) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    andnl %edi, %edx, %eax
 ; CHECK-BMI-NEXT:    andl %edx, %esi
-; CHECK-BMI-NEXT:    orl %esi, %eax
+; CHECK-BMI-NEXT:    addl %esi, %eax
 ; CHECK-BMI-NEXT:    retq
   %n0 = xor i32 %x, %y
   %n1 = and i32 %n0, %mask
@@ -285,7 +286,7 @@ define i32 @in_commutativity_1_0_1(i32 %x, i32 %y, i32 %mask) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    andnl %edi, %edx, %eax
 ; CHECK-BMI-NEXT:    andl %edx, %esi
-; CHECK-BMI-NEXT:    orl %esi, %eax
+; CHECK-BMI-NEXT:    addl %esi, %eax
 ; CHECK-BMI-NEXT:    retq
   %n0 = xor i32 %x, %y
   %n1 = and i32 %mask, %n0 ; swapped
@@ -305,7 +306,7 @@ define i32 @in_commutativity_1_1_0(i32 %x, i32 %y, i32 %mask) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    andnl %edi, %edx, %eax
 ; CHECK-BMI-NEXT:    andl %edx, %esi
-; CHECK-BMI-NEXT:    orl %esi, %eax
+; CHECK-BMI-NEXT:    addl %esi, %eax
 ; CHECK-BMI-NEXT:    retq
   %n0 = xor i32 %x, %y
   %n1 = and i32 %n0, %mask
@@ -325,7 +326,7 @@ define i32 @in_commutativity_1_1_1(i32 %x, i32 %y, i32 %mask) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    andnl %edi, %edx, %eax
 ; CHECK-BMI-NEXT:    andl %edx, %esi
-; CHECK-BMI-NEXT:    orl %esi, %eax
+; CHECK-BMI-NEXT:    addl %esi, %eax
 ; CHECK-BMI-NEXT:    retq
   %n0 = xor i32 %x, %y
   %n1 = and i32 %mask, %n0 ; swapped
@@ -350,7 +351,7 @@ define i32 @in_complex_y0(i32 %x, i32 %y_hi, i32 %y_low, i32 %mask) {
 ; CHECK-BMI-NEXT:    andl %edx, %esi
 ; CHECK-BMI-NEXT:    andl %ecx, %edi
 ; CHECK-BMI-NEXT:    andnl %esi, %ecx, %eax
-; CHECK-BMI-NEXT:    orl %edi, %eax
+; CHECK-BMI-NEXT:    addl %edi, %eax
 ; CHECK-BMI-NEXT:    retq
   %y = and i32 %y_hi, %y_low
   %n0 = xor i32 %x, %y
@@ -373,7 +374,7 @@ define i32 @in_complex_y1(i32 %x, i32 %y_hi, i32 %y_low, i32 %mask) {
 ; CHECK-BMI-NEXT:    andl %edx, %esi
 ; CHECK-BMI-NEXT:    andl %ecx, %edi
 ; CHECK-BMI-NEXT:    andnl %esi, %ecx, %eax
-; CHECK-BMI-NEXT:    orl %edi, %eax
+; CHECK-BMI-NEXT:    addl %edi, %eax
 ; CHECK-BMI-NEXT:    retq
   %y = and i32 %y_hi, %y_low
   %n0 = xor i32 %x, %y
@@ -399,7 +400,7 @@ define i32 @in_complex_m0(i32 %x, i32 %y, i32 %m_a, i32 %m_b) {
 ; CHECK-BMI-NEXT:    xorl %ecx, %edx
 ; CHECK-BMI-NEXT:    andnl %esi, %edx, %eax
 ; CHECK-BMI-NEXT:    andl %edi, %edx
-; CHECK-BMI-NEXT:    orl %edx, %eax
+; CHECK-BMI-NEXT:    addl %edx, %eax
 ; CHECK-BMI-NEXT:    retq
   %mask = xor i32 %m_a, %m_b
   %n0 = xor i32 %x, %y
@@ -422,7 +423,7 @@ define i32 @in_complex_m1(i32 %x, i32 %y, i32 %m_a, i32 %m_b) {
 ; CHECK-BMI-NEXT:    xorl %ecx, %edx
 ; CHECK-BMI-NEXT:    andnl %esi, %edx, %eax
 ; CHECK-BMI-NEXT:    andl %edi, %edx
-; CHECK-BMI-NEXT:    orl %edx, %eax
+; CHECK-BMI-NEXT:    addl %edx, %eax
 ; CHECK-BMI-NEXT:    retq
   %mask = xor i32 %m_a, %m_b
   %n0 = xor i32 %x, %y
@@ -450,7 +451,7 @@ define i32 @in_complex_y0_m0(i32 %x, i32 %y_hi, i32 %y_low, i32 %m_a, i32 %m_b)
 ; CHECK-BMI-NEXT:    xorl %r8d, %ecx
 ; CHECK-BMI-NEXT:    andnl %esi, %ecx, %eax
 ; CHECK-BMI-NEXT:    andl %edi, %ecx
-; CHECK-BMI-NEXT:    orl %ecx, %eax
+; CHECK-BMI-NEXT:    addl %ecx, %eax
 ; CHECK-BMI-NEXT:    retq
   %y = and i32 %y_hi, %y_low
   %mask = xor i32 %m_a, %m_b
@@ -476,7 +477,7 @@ define i32 @in_complex_y1_m0(i32 %x, i32 %y_hi, i32 %y_low, i32 %m_a, i32 %m_b)
 ; CHECK-BMI-NEXT:    xorl %r8d, %ecx
 ; CHECK-BMI-NEXT:    andnl %esi, %ecx, %eax
 ; CHECK-BMI-NEXT:    andl %edi, %ecx
-; CHECK-BMI-NEXT:    orl %ecx, %eax
+; CHECK-BMI-NEXT:    addl %ecx, %eax
 ; CHECK-BMI-NEXT:    retq
   %y = and i32 %y_hi, %y_low
   %mask = xor i32 %m_a, %m_b
@@ -502,7 +503,7 @@ define i32 @in_complex_y0_m1(i32 %x, i32 %y_hi, i32 %y_low, i32 %m_a, i32 %m_b)
 ; CHECK-BMI-NEXT:    xorl %r8d, %ecx
 ; CHECK-BMI-NEXT:    andnl %esi, %ecx, %eax
 ; CHECK-BMI-NEXT:    andl %edi, %ecx
-; CHECK-BMI-NEXT:    orl %ecx, %eax
+; CHECK-BMI-NEXT:    addl %ecx, %eax
 ; CHECK-BMI-NEXT:    retq
   %y = and i32 %y_hi, %y_low
   %mask = xor i32 %m_a, %m_b
@@ -528,7 +529,7 @@ define i32 @in_complex_y1_m1(i32 %x, i32 %y_hi, i32 %y_low, i32 %m_a, i32 %m_b)
 ; CHECK-BMI-NEXT:    xorl %r8d, %ecx
 ; CHECK-BMI-NEXT:    andnl %esi, %ecx, %eax
 ; CHECK-BMI-NEXT:    andl %edi, %ecx
-; CHECK-BMI-NEXT:    orl %ecx, %eax
+; CHECK-BMI-NEXT:    addl %ecx, %eax
 ; CHECK-BMI-NEXT:    retq
   %y = and i32 %y_hi, %y_low
   %mask = xor i32 %m_a, %m_b
@@ -638,7 +639,7 @@ define i32 @out_constant_varx_42(i32 %x, i32 %y, i32 %mask) {
 ; CHECK-BMI-NEXT:    movl %edx, %eax
 ; CHECK-BMI-NEXT:    notl %eax
 ; CHECK-BMI-NEXT:    andl $42, %eax
-; CHECK-BMI-NEXT:    orl %edi, %eax
+; CHECK-BMI-NEXT:    addl %edi, %eax
 ; CHECK-BMI-NEXT:    retq
   %notmask = xor i32 %mask, -1
   %mx = and i32 %mask, %x
@@ -680,7 +681,7 @@ define i32 @out_constant_varx_42_invmask(i32 %x, i32 %y, i32 %mask) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    andnl %edi, %edx, %eax
 ; CHECK-BMI-NEXT:    andl $42, %edx
-; CHECK-BMI-NEXT:    orl %edx, %eax
+; CHECK-BMI-NEXT:    addl %edx, %eax
 ; CHECK-BMI-NEXT:    retq
   %notmask = xor i32 %mask, -1
   %mx = and i32 %notmask, %x
@@ -703,7 +704,7 @@ define i32 @in_constant_varx_42_invmask(i32 %x, i32 %y, i32 %mask) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    andnl %edi, %edx, %eax
 ; CHECK-BMI-NEXT:    andl $42, %edx
-; CHECK-BMI-NEXT:    orl %edx, %eax
+; CHECK-BMI-NEXT:    addl %edx, %eax
 ; CHECK-BMI-NEXT:    retq
   %notmask = xor i32 %mask, -1
   %n0 = xor i32 %x, 42 ; %x
@@ -803,7 +804,7 @@ define i32 @out_constant_42_vary(i32 %x, i32 %y, i32 %mask) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    andnl %esi, %edx, %eax
 ; CHECK-BMI-NEXT:    andl $42, %edx
-; CHECK-BMI-NEXT:    orl %edx, %eax
+; CHECK-BMI-NEXT:    addl %edx, %eax
 ; CHECK-BMI-NEXT:    retq
   %notmask = xor i32 %mask, -1
   %mx = and i32 %mask, 42
@@ -824,7 +825,7 @@ define i32 @in_constant_42_vary(i32 %x, i32 %y, i32 %mask) {
 ; CHECK-BMI:       # %bb.0:
 ; CHECK-BMI-NEXT:    andnl %esi, %edx, %eax
 ; CHECK-BMI-NEXT:    andl $42, %edx
-; CHECK-BMI-NEXT:    orl %edx, %eax
+; CHECK-BMI-NEXT:    addl %edx, %eax
 ; CHECK-BMI-NEXT:    retq
   %n0 = xor i32 42, %y ; %x
   %n1 = and i32 %n0, %mask
@@ -847,7 +848,7 @@ define i32 @out_constant_42_vary_invmask(i32 %x, i32 %y, i32 %mask) {
 ; CHECK-BMI-NEXT:    movl %edx, %eax
 ; CHECK-BMI-NEXT:    notl %eax
 ; CHECK-BMI-NEXT:    andl $42, %eax
-; CHECK-BMI-NEXT:    orl %esi, %eax
+; CHECK-BMI-NEXT:    addl %esi, %eax
 ; CHECK-BMI-NEXT:    retq
   %notmask = xor i32 %mask, -1
   %mx = and i32 %notmask, 42
diff --git a/llvm/test/CodeGen/X86/unfold-masked-merge-vector-variablemask.ll b/llvm/test/CodeGen/X86/unfold-masked-merge-vector-variablemask.ll
index b1194bedc4e1ca..876e351d6deb59 100644
--- a/llvm/test/CodeGen/X86/unfold-masked-merge-vector-variablemask.ll
+++ b/llvm/test/CodeGen/X86/unfold-masked-merge-vector-variablemask.ll
@@ -20,7 +20,7 @@ define <1 x i8> @out_v1i8(<1 x i8> %x, <1 x i8> %y, <1 x i8> %mask) nounwind {
 ; CHECK-NEXT:    andl %edx, %edi
 ; CHECK-NEXT:    notb %al
 ; CHECK-NEXT:    andb %sil, %al
-; CHECK-NEXT:    orb %dil, %al
+; CHECK-NEXT:    addb %dil, %al
 ; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    retq
   %mx = and <1 x i8> %x, %mask
@@ -44,8 +44,8 @@ define <2 x i8> @out_v2i8(<2 x i8> %x, <2 x i8> %y, <2 x i8> %mask) nounwind {
 ; CHECK-BASELINE-NEXT:    notb %r9b
 ; CHECK-BASELINE-NEXT:    andb %cl, %r9b
 ; CHECK-BASELINE-NEXT:    andb %dl, %al
-; CHECK-BASELINE-NEXT:    orb %dil, %al
-; CHECK-BASELINE-NEXT:    orb %sil, %r9b
+; CHECK-BASELINE-NEXT:    addb %dil, %al
+; CHECK-BASELINE-NEXT:    addb %sil, %r9b
 ; CHECK-BASELINE-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-BASELINE-NEXT:    movl %r9d, %edx
 ; CHECK-BASELINE-NEXT:    retq
@@ -59,8 +59,8 @@ define <2 x i8> @out_v2i8(<2 x i8> %x, <2 x i8> %y, <2 x i8> %mask) nounwind {
 ; CHECK-SSE1-NEXT:    notb %r9b
 ; CHECK-SSE1-NEXT:    andb %cl, %r9b
 ; CHECK-SSE1-NEXT:    andb %dl, %al
-; CHECK-SSE1-NEXT:    orb %dil, %al
-; CHECK-SSE1-NEXT:    orb %sil, %r9b
+; CHECK-SSE1-NEXT:    addb %dil, %al
+; CHECK-SSE1-NEXT:    addb %sil, %r9b
 ; CHECK-SSE1-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-SSE1-NEXT:    movl %r9d, %edx
 ; CHECK-SSE1-NEXT:    retq
@@ -86,11 +86,12 @@ define <2 x i8> @out_v2i8(<2 x i8> %x, <2 x i8> %y, <2 x i8> %mask) nounwind {
 define <1 x i16> @out_v1i16(<1 x i16> %x, <1 x i16> %y, <1 x i16> %mask) nounwind {
 ; CHECK-LABEL: out_v1i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %edx, %eax
+; CHECK-NEXT:    # kill: def $edx killed $edx def $rdx
+; CHECK-NEXT:    # kill: def $edi killed $edi def $rdi
 ; CHECK-NEXT:    andl %edx, %edi
-; CHECK-NEXT:    notl %eax
-; CHECK-NEXT:    andl %esi, %eax
-; CHECK-NEXT:    orl %edi, %eax
+; CHECK-NEXT:    notl %edx
+; CHECK-NEXT:    andl %esi, %edx
+; CHECK-NEXT:    leal (%rdx,%rdi), %eax
 ; CHECK-NEXT:    # kill: def $ax killed $ax killed $eax
 ; CHECK-NEXT:    retq
   %mx = and <1 x i16> %x, %mask
@@ -235,32 +236,38 @@ define <4 x i8> @out_v4i8_undef(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwi
 define <2 x i16> @out_v2i16(<2 x i16> %x, <2 x i16> %y, <2 x i16> %mask) nounwind {
 ; CHECK-BASELINE-LABEL: out_v2i16:
 ; CHECK-BASELINE:       # %bb.0:
-; CHECK-BASELINE-NEXT:    movl %r8d, %eax
+; CHECK-BASELINE-NEXT:    # kill: def $r9d killed $r9d def $r9
+; CHECK-BASELINE-NEXT:    # kill: def $r8d killed $r8d def $r8
+; CHECK-BASELINE-NEXT:    # kill: def $esi killed $esi def $rsi
+; CHECK-BASELINE-NEXT:    # kill: def $edi killed $edi def $rdi
 ; CHECK-BASELINE-NEXT:    andl %r9d, %esi
 ; CHECK-BASELINE-NEXT:    andl %r8d, %edi
-; CHECK-BASELINE-NEXT:    notl %eax
+; CHECK-BASELINE-NEXT:    notl %r8d
 ; CHECK-BASELINE-NEXT:    notl %r9d
 ; CHECK-BASELINE-NEXT:    andl %ecx, %r9d
-; CHECK-BASELINE-NEXT:    orl %esi, %r9d
-; CHECK-BASELINE-NEXT:    andl %edx, %eax
-; CHECK-BASELINE-NEXT:    orl %edi, %eax
+; CHECK-BASELINE-NEXT:    leal (%r9,%rsi), %ecx
+; CHECK-BASELINE-NEXT:    andl %edx, %r8d
+; CHECK-BASELINE-NEXT:    leal (%r8,%rdi), %eax
 ; CHECK-BASELINE-NEXT:    # kill: def $ax killed $ax killed $eax
-; CHECK-BASELINE-NEXT:    movl %r9d, %edx
+; CHECK-BASELINE-NEXT:    movl %ecx, %edx
 ; CHECK-BASELINE-NEXT:    retq
 ;
 ; CHECK-SSE1-LABEL: out_v2i16:
 ; CHECK-SSE1:       # %bb.0:
-; CHECK-SSE1-NEXT:    movl %r8d, %eax
+; CHECK-SSE1-NEXT:    # kill: def $r9d killed $r9d def $r9
+; CHECK-SSE1-NEXT:    # kill: def $r8d killed $r8d def $r8
+; CHECK-SSE1-NEXT:    # kill: def $esi killed $esi def $rsi
+; CHECK-SSE1-NEXT:    # kill: def $edi killed $edi def $rdi
 ; CHECK-SSE1-NEXT:    andl %r9d, %esi
 ; CHECK-SSE1-NEXT:    andl %r8d, %edi
-; CHECK-SSE1-NEXT:    notl %eax
+; CHECK-SSE1-NEXT:    notl %r8d
 ; CHECK-SSE1-NEXT:    notl %r9d
 ; CHECK-SSE1-NEXT:    andl %ecx, %r9d
-; CHECK-SSE1-NEXT:    orl %esi, %r9d
-; CHECK-SSE1-NEXT:    andl %edx, %eax
-; CHECK-SSE1-NEXT:    orl %edi, %eax
+; CHECK-SSE1-NEXT:    leal (%r9,%rsi), %ecx
+; CHECK-SSE1-NEXT:    andl %edx, %r8d
+; CHECK-SSE1-NEXT:    leal (%r8,%rdi), %eax
 ; CHECK-SSE1-NEXT:    # kill: def $ax killed $ax killed $eax
-; CHECK-SSE1-NEXT:    movl %r9d, %edx
+; CHECK-SSE1-NEXT:    movl %ecx, %edx
 ; CHECK-SSE1-NEXT:    retq
 ;
 ; CHECK-SSE2-LABEL: out_v2i16:
diff --git a/llvm/test/CodeGen/X86/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/X86/urem-seteq-illegal-types.ll
index b4e91da920a2fd..3ffc3b62243af7 100644
--- a/llvm/test/CodeGen/X86/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/X86/urem-seteq-illegal-types.ll
@@ -36,7 +36,7 @@ define i1 @test_urem_even(i27 %X) nounwind {
 ; X86-NEXT:    shll $26, %ecx
 ; X86-NEXT:    andl $134217726, %eax # imm = 0x7FFFFFE
 ; X86-NEXT:    shrl %eax
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    andl $134217727, %eax # imm = 0x7FFFFFF
 ; X86-NEXT:    cmpl $9586981, %eax # imm = 0x924925
 ; X86-NEXT:    setb %al
@@ -49,7 +49,7 @@ define i1 @test_urem_even(i27 %X) nounwind {
 ; X64-NEXT:    shll $26, %ecx
 ; X64-NEXT:    andl $134217726, %eax # imm = 0x7FFFFFE
 ; X64-NEXT:    shrl %eax
-; X64-NEXT:    orl %ecx, %eax
+; X64-NEXT:    addl %ecx, %eax
 ; X64-NEXT:    andl $134217727, %eax # imm = 0x7FFFFFF
 ; X64-NEXT:    cmpl $9586981, %eax # imm = 0x924925
 ; X64-NEXT:    setb %al
@@ -113,7 +113,7 @@ define <3 x i1> @test_urem_vec(<3 x i11> %X) nounwind {
 ; X86-NEXT:    shll $10, %ecx
 ; X86-NEXT:    andl $2046, %eax # imm = 0x7FE
 ; X86-NEXT:    shrl %eax
-; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    andl $2047, %eax # imm = 0x7FF
 ; X86-NEXT:    cmpl $342, %eax # imm = 0x156
 ; X86-NEXT:    setae %al
diff --git a/llvm/test/CodeGen/X86/vector-bitreverse.ll b/llvm/test/CodeGen/X86/vector-bitreverse.ll
index d3f357cd179525..78b9702f8e7944 100644
--- a/llvm/test/CodeGen/X86/vector-bitreverse.ll
+++ b/llvm/test/CodeGen/X86/vector-bitreverse.ll
@@ -25,13 +25,13 @@ define i8 @test_bitreverse_i8(i8 %a) nounwind {
 ; SSE-NEXT:    shlb $2, %al
 ; SSE-NEXT:    shrb $2, %dil
 ; SSE-NEXT:    andb $51, %dil
-; SSE-NEXT:    orb %dil, %al
+; SSE-NEXT:    addb %dil, %al
 ; SSE-NEXT:    movl %eax, %ecx
 ; SSE-NEXT:    andb $85, %cl
 ; SSE-NEXT:    addb %cl, %cl
 ; SSE-NEXT:    shrb %al
 ; SSE-NEXT:    andb $85, %al
-; SSE-NEXT:    orb %cl, %al
+; SSE-NEXT:    addb %cl, %al
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test_bitreverse_i8:
@@ -42,13 +42,13 @@ define i8 @test_bitreverse_i8(i8 %a) nounwind {
 ; AVX-NEXT:    shlb $2, %al
 ; AVX-NEXT:    shrb $2, %dil
 ; AVX-NEXT:    andb $51, %dil
-; AVX-NEXT:    orb %dil, %al
+; AVX-NEXT:    addb %dil, %al
 ; AVX-NEXT:    movl %eax, %ecx
 ; AVX-NEXT:    andb $85, %cl
 ; AVX-NEXT:    addb %cl, %cl
 ; AVX-NEXT:    shrb %al
 ; AVX-NEXT:    andb $85, %al
-; AVX-NEXT:    orb %cl, %al
+; AVX-NEXT:    addb %cl, %al
 ; AVX-NEXT:    retq
 ;
 ; XOP-LABEL: test_bitreverse_i8:
@@ -67,13 +67,13 @@ define i8 @test_bitreverse_i8(i8 %a) nounwind {
 ; GFNISSE-NEXT:    shlb $2, %al
 ; GFNISSE-NEXT:    shrb $2, %dil
 ; GFNISSE-NEXT:    andb $51, %dil
-; GFNISSE-NEXT:    orb %dil, %al
+; GFNISSE-NEXT:    addb %dil, %al
 ; GFNISSE-NEXT:    movl %eax, %ecx
 ; GFNISSE-NEXT:    andb $85, %cl
 ; GFNISSE-NEXT:    addb %cl, %cl
 ; GFNISSE-NEXT:    shrb %al
 ; GFNISSE-NEXT:    andb $85, %al
-; GFNISSE-NEXT:    orb %cl, %al
+; GFNISSE-NEXT:    addb %cl, %al
 ; GFNISSE-NEXT:    retq
 ;
 ; GFNIAVX-LABEL: test_bitreverse_i8:
@@ -84,13 +84,13 @@ define i8 @test_bitreverse_i8(i8 %a) nounwind {
 ; GFNIAVX-NEXT:    shlb $2, %al
 ; GFNIAVX-NEXT:    shrb $2, %dil
 ; GFNIAVX-NEXT:    andb $51, %dil
-; GFNIAVX-NEXT:    orb %dil, %al
+; GFNIAVX-NEXT:    addb %dil, %al
 ; GFNIAVX-NEXT:    movl %eax, %ecx
 ; GFNIAVX-NEXT:    andb $85, %cl
 ; GFNIAVX-NEXT:    addb %cl, %cl
 ; GFNIAVX-NEXT:    shrb %al
 ; GFNIAVX-NEXT:    andb $85, %al
-; GFNIAVX-NEXT:    orb %cl, %al
+; GFNIAVX-NEXT:    addb %cl, %al
 ; GFNIAVX-NEXT:    retq
   %b = call i8 @llvm.bitreverse.i8(i8 %a)
   ret i8 %b
@@ -106,7 +106,7 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
 ; SSE-NEXT:    shll $4, %eax
 ; SSE-NEXT:    shrl $4, %edi
 ; SSE-NEXT:    andl $3855, %edi # imm = 0xF0F
-; SSE-NEXT:    orl %eax, %edi
+; SSE-NEXT:    addl %eax, %edi
 ; SSE-NEXT:    movl %edi, %eax
 ; SSE-NEXT:    andl $13107, %eax # imm = 0x3333
 ; SSE-NEXT:    shrl $2, %edi
@@ -129,7 +129,7 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
 ; AVX-NEXT:    shll $4, %eax
 ; AVX-NEXT:    shrl $4, %edi
 ; AVX-NEXT:    andl $3855, %edi # imm = 0xF0F
-; AVX-NEXT:    orl %eax, %edi
+; AVX-NEXT:    addl %eax, %edi
 ; AVX-NEXT:    movl %edi, %eax
 ; AVX-NEXT:    andl $13107, %eax # imm = 0x3333
 ; AVX-NEXT:    shrl $2, %edi
@@ -160,7 +160,7 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
 ; GFNISSE-NEXT:    shll $4, %eax
 ; GFNISSE-NEXT:    shrl $4, %edi
 ; GFNISSE-NEXT:    andl $3855, %edi # imm = 0xF0F
-; GFNISSE-NEXT:    orl %eax, %edi
+; GFNISSE-NEXT:    addl %eax, %edi
 ; GFNISSE-NEXT:    movl %edi, %eax
 ; GFNISSE-NEXT:    andl $13107, %eax # imm = 0x3333
 ; GFNISSE-NEXT:    shrl $2, %edi
@@ -183,7 +183,7 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
 ; GFNIAVX-NEXT:    shll $4, %eax
 ; GFNIAVX-NEXT:    shrl $4, %edi
 ; GFNIAVX-NEXT:    andl $3855, %edi # imm = 0xF0F
-; GFNIAVX-NEXT:    orl %eax, %edi
+; GFNIAVX-NEXT:    addl %eax, %edi
 ; GFNIAVX-NEXT:    movl %edi, %eax
 ; GFNIAVX-NEXT:    andl $13107, %eax # imm = 0x3333
 ; GFNIAVX-NEXT:    shrl $2, %edi
@@ -210,7 +210,7 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind {
 ; SSE-NEXT:    shll $4, %eax
 ; SSE-NEXT:    shrl $4, %edi
 ; SSE-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; SSE-NEXT:    orl %eax, %edi
+; SSE-NEXT:    addl %eax, %edi
 ; SSE-NEXT:    movl %edi, %eax
 ; SSE-NEXT:    andl $858993459, %eax # imm = 0x33333333
 ; SSE-NEXT:    shrl $2, %edi
@@ -232,7 +232,7 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind {
 ; AVX-NEXT:    shll $4, %eax
 ; AVX-NEXT:    shrl $4, %edi
 ; AVX-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; AVX-NEXT:    orl %eax, %edi
+; AVX-NEXT:    addl %eax, %edi
 ; AVX-NEXT:    movl %edi, %eax
 ; AVX-NEXT:    andl $858993459, %eax # imm = 0x33333333
 ; AVX-NEXT:    shrl $2, %edi
@@ -261,7 +261,7 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind {
 ; GFNISSE-NEXT:    shll $4, %eax
 ; GFNISSE-NEXT:    shrl $4, %edi
 ; GFNISSE-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; GFNISSE-NEXT:    orl %eax, %edi
+; GFNISSE-NEXT:    addl %eax, %edi
 ; GFNISSE-NEXT:    movl %edi, %eax
 ; GFNISSE-NEXT:    andl $858993459, %eax # imm = 0x33333333
 ; GFNISSE-NEXT:    shrl $2, %edi
@@ -283,7 +283,7 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind {
 ; GFNIAVX-NEXT:    shll $4, %eax
 ; GFNIAVX-NEXT:    shrl $4, %edi
 ; GFNIAVX-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; GFNIAVX-NEXT:    orl %eax, %edi
+; GFNIAVX-NEXT:    addl %eax, %edi
 ; GFNIAVX-NEXT:    movl %edi, %eax
 ; GFNIAVX-NEXT:    andl $858993459, %eax # imm = 0x33333333
 ; GFNIAVX-NEXT:    shrl $2, %edi
@@ -309,7 +309,7 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
 ; SSE-NEXT:    andq %rcx, %rax
 ; SSE-NEXT:    andq %rcx, %rdi
 ; SSE-NEXT:    shlq $4, %rdi
-; SSE-NEXT:    orq %rax, %rdi
+; SSE-NEXT:    addq %rax, %rdi
 ; SSE-NEXT:    movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
 ; SSE-NEXT:    movq %rdi, %rcx
 ; SSE-NEXT:    andq %rax, %rcx
@@ -333,7 +333,7 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
 ; AVX-NEXT:    andq %rcx, %rax
 ; AVX-NEXT:    andq %rcx, %rdi
 ; AVX-NEXT:    shlq $4, %rdi
-; AVX-NEXT:    orq %rax, %rdi
+; AVX-NEXT:    addq %rax, %rdi
 ; AVX-NEXT:    movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
 ; AVX-NEXT:    movq %rdi, %rcx
 ; AVX-NEXT:    andq %rax, %rcx
@@ -364,7 +364,7 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
 ; GFNISSE-NEXT:    andq %rcx, %rax
 ; GFNISSE-NEXT:    andq %rcx, %rdi
 ; GFNISSE-NEXT:    shlq $4, %rdi
-; GFNISSE-NEXT:    orq %rax, %rdi
+; GFNISSE-NEXT:    addq %rax, %rdi
 ; GFNISSE-NEXT:    movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
 ; GFNISSE-NEXT:    movq %rdi, %rcx
 ; GFNISSE-NEXT:    andq %rax, %rcx
@@ -388,7 +388,7 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
 ; GFNIAVX-NEXT:    andq %rcx, %rax
 ; GFNIAVX-NEXT:    andq %rcx, %rdi
 ; GFNIAVX-NEXT:    shlq $4, %rdi
-; GFNIAVX-NEXT:    orq %rax, %rdi
+; GFNIAVX-NEXT:    addq %rax, %rdi
 ; GFNIAVX-NEXT:    movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
 ; GFNIAVX-NEXT:    movq %rdi, %rcx
 ; GFNIAVX-NEXT:    andq %rax, %rcx
diff --git a/llvm/test/CodeGen/X86/vector-compare-all_of.ll b/llvm/test/CodeGen/X86/vector-compare-all_of.ll
index ec7dca4285a355..fff16579312d77 100644
--- a/llvm/test/CodeGen/X86/vector-compare-all_of.ll
+++ b/llvm/test/CodeGen/X86/vector-compare-all_of.ll
@@ -903,7 +903,7 @@ define i32 @test_v32i8_muti_uses(<32 x i8> %x, <32 x i8>%y, i32 %z) {
 ; SSE-NEXT:    pcmpeqb %xmm3, %xmm1
 ; SSE-NEXT:    pmovmskb %xmm1, %ecx
 ; SSE-NEXT:    shll $16, %ecx
-; SSE-NEXT:    orl %eax, %ecx
+; SSE-NEXT:    addl %eax, %ecx
 ; SSE-NEXT:    cmpl $-1, %ecx
 ; SSE-NEXT:    movl $16, %eax
 ; SSE-NEXT:    cmovnel %ecx, %eax
@@ -918,7 +918,7 @@ define i32 @test_v32i8_muti_uses(<32 x i8> %x, <32 x i8>%y, i32 %z) {
 ; AVX1-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %ecx
 ; AVX1-NEXT:    shll $16, %ecx
-; AVX1-NEXT:    orl %eax, %ecx
+; AVX1-NEXT:    addl %eax, %ecx
 ; AVX1-NEXT:    cmpl $-1, %ecx
 ; AVX1-NEXT:    movl $16, %eax
 ; AVX1-NEXT:    cmovnel %ecx, %eax
diff --git a/llvm/test/CodeGen/X86/vector-compare-results.ll b/llvm/test/CodeGen/X86/vector-compare-results.ll
index f4d6b52377f574..5131837e855420 100644
--- a/llvm/test/CodeGen/X86/vector-compare-results.ll
+++ b/llvm/test/CodeGen/X86/vector-compare-results.ll
@@ -345,7 +345,7 @@ define <32 x i1> @test_cmp_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
 ; SSE-NEXT:    pcmpgtb %xmm3, %xmm1
 ; SSE-NEXT:    pmovmskb %xmm1, %edx
 ; SSE-NEXT:    shll $16, %edx
-; SSE-NEXT:    orl %ecx, %edx
+; SSE-NEXT:    addl %ecx, %edx
 ; SSE-NEXT:    movl %edx, (%rdi)
 ; SSE-NEXT:    retq
 ;
@@ -698,7 +698,7 @@ define <32 x i1> @test_cmp_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
 ; SSE-NEXT:    packsswb %xmm3, %xmm2
 ; SSE-NEXT:    pmovmskb %xmm2, %edx
 ; SSE-NEXT:    shll $16, %edx
-; SSE-NEXT:    orl %ecx, %edx
+; SSE-NEXT:    addl %ecx, %edx
 ; SSE-NEXT:    movl %edx, (%rdi)
 ; SSE-NEXT:    retq
 ;
@@ -764,15 +764,15 @@ define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
 ; SSE-NEXT:    pcmpgtb %xmm5, %xmm1
 ; SSE-NEXT:    pmovmskb %xmm1, %edx
 ; SSE-NEXT:    shll $16, %edx
-; SSE-NEXT:    orl %ecx, %edx
+; SSE-NEXT:    addl %ecx, %edx
 ; SSE-NEXT:    pcmpgtb %xmm6, %xmm2
 ; SSE-NEXT:    pmovmskb %xmm2, %ecx
 ; SSE-NEXT:    pcmpgtb %xmm7, %xmm3
 ; SSE-NEXT:    pmovmskb %xmm3, %esi
 ; SSE-NEXT:    shll $16, %esi
-; SSE-NEXT:    orl %ecx, %esi
+; SSE-NEXT:    addl %ecx, %esi
 ; SSE-NEXT:    shlq $32, %rsi
-; SSE-NEXT:    orq %rdx, %rsi
+; SSE-NEXT:    addq %rdx, %rsi
 ; SSE-NEXT:    movq %rsi, (%rdi)
 ; SSE-NEXT:    retq
 ;
@@ -786,7 +786,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
 ; AVX1-NEXT:    vpcmpgtb %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %edx
 ; AVX1-NEXT:    shll $16, %edx
-; AVX1-NEXT:    orl %ecx, %edx
+; AVX1-NEXT:    addl %ecx, %edx
 ; AVX1-NEXT:    vpcmpgtb %xmm3, %xmm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %ecx
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm0
@@ -794,9 +794,9 @@ define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
 ; AVX1-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %esi
 ; AVX1-NEXT:    shll $16, %esi
-; AVX1-NEXT:    orl %ecx, %esi
+; AVX1-NEXT:    addl %ecx, %esi
 ; AVX1-NEXT:    shlq $32, %rsi
-; AVX1-NEXT:    orq %rdx, %rsi
+; AVX1-NEXT:    addq %rdx, %rsi
 ; AVX1-NEXT:    movq %rsi, (%rdi)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -809,7 +809,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
 ; AVX2-NEXT:    vpcmpgtb %ymm3, %ymm1, %ymm0
 ; AVX2-NEXT:    vpmovmskb %ymm0, %edx
 ; AVX2-NEXT:    shlq $32, %rdx
-; AVX2-NEXT:    orq %rcx, %rdx
+; AVX2-NEXT:    addq %rcx, %rdx
 ; AVX2-NEXT:    movq %rdx, (%rdi)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1003,7 +1003,7 @@ define <32 x i1> @test_cmp_v32f32(<32 x float> %a0, <32 x float> %a1) nounwind {
 ; SSE-NEXT:    packsswb %xmm9, %xmm8
 ; SSE-NEXT:    pmovmskb %xmm8, %edx
 ; SSE-NEXT:    shll $16, %edx
-; SSE-NEXT:    orl %ecx, %edx
+; SSE-NEXT:    addl %ecx, %edx
 ; SSE-NEXT:    movl %edx, (%rdi)
 ; SSE-NEXT:    retq
 ;
@@ -1292,7 +1292,7 @@ define <32 x i1> @test_cmp_v32i32(<32 x i32> %a0, <32 x i32> %a1) nounwind {
 ; SSE-NEXT:    packsswb %xmm6, %xmm4
 ; SSE-NEXT:    pmovmskb %xmm4, %edx
 ; SSE-NEXT:    shll $16, %edx
-; SSE-NEXT:    orl %ecx, %edx
+; SSE-NEXT:    addl %ecx, %edx
 ; SSE-NEXT:    movl %edx, (%rdi)
 ; SSE-NEXT:    retq
 ;
@@ -1384,7 +1384,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
 ; SSE-NEXT:    packsswb %xmm3, %xmm2
 ; SSE-NEXT:    pmovmskb %xmm2, %edx
 ; SSE-NEXT:    shll $16, %edx
-; SSE-NEXT:    orl %ecx, %edx
+; SSE-NEXT:    addl %ecx, %edx
 ; SSE-NEXT:    pcmpgtw {{[0-9]+}}(%rsp), %xmm5
 ; SSE-NEXT:    pcmpgtw {{[0-9]+}}(%rsp), %xmm4
 ; SSE-NEXT:    packsswb %xmm5, %xmm4
@@ -1394,9 +1394,9 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
 ; SSE-NEXT:    packsswb %xmm7, %xmm6
 ; SSE-NEXT:    pmovmskb %xmm6, %esi
 ; SSE-NEXT:    shll $16, %esi
-; SSE-NEXT:    orl %ecx, %esi
+; SSE-NEXT:    addl %ecx, %esi
 ; SSE-NEXT:    shlq $32, %rsi
-; SSE-NEXT:    orq %rdx, %rsi
+; SSE-NEXT:    addq %rdx, %rsi
 ; SSE-NEXT:    movq %rsi, (%rdi)
 ; SSE-NEXT:    retq
 ;
@@ -1416,7 +1416,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
 ; AVX1-NEXT:    vpacksswb %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %edx
 ; AVX1-NEXT:    shll $16, %edx
-; AVX1-NEXT:    orl %ecx, %edx
+; AVX1-NEXT:    addl %ecx, %edx
 ; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm1
 ; AVX1-NEXT:    vpcmpgtw %xmm0, %xmm1, %xmm0
@@ -1430,9 +1430,9 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
 ; AVX1-NEXT:    vpacksswb %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %esi
 ; AVX1-NEXT:    shll $16, %esi
-; AVX1-NEXT:    orl %ecx, %esi
+; AVX1-NEXT:    addl %ecx, %esi
 ; AVX1-NEXT:    shlq $32, %rsi
-; AVX1-NEXT:    orq %rdx, %rsi
+; AVX1-NEXT:    addq %rdx, %rsi
 ; AVX1-NEXT:    movq %rsi, (%rdi)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -1451,7 +1451,7 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
 ; AVX2-NEXT:    vpmovmskb %ymm0, %edx
 ; AVX2-NEXT:    shlq $32, %rdx
-; AVX2-NEXT:    orq %rcx, %rdx
+; AVX2-NEXT:    addq %rcx, %rdx
 ; AVX2-NEXT:    movq %rdx, (%rdi)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
@@ -1528,29 +1528,29 @@ define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
 ; SSE-NEXT:    pcmpgtb {{[0-9]+}}(%rsp), %xmm1
 ; SSE-NEXT:    pmovmskb %xmm1, %edx
 ; SSE-NEXT:    shll $16, %edx
-; SSE-NEXT:    orl %ecx, %edx
+; SSE-NEXT:    addl %ecx, %edx
 ; SSE-NEXT:    pcmpgtb {{[0-9]+}}(%rsp), %xmm2
 ; SSE-NEXT:    pmovmskb %xmm2, %esi
 ; SSE-NEXT:    pcmpgtb {{[0-9]+}}(%rsp), %xmm3
 ; SSE-NEXT:    pmovmskb %xmm3, %ecx
 ; SSE-NEXT:    shll $16, %ecx
-; SSE-NEXT:    orl %esi, %ecx
+; SSE-NEXT:    addl %esi, %ecx
 ; SSE-NEXT:    shlq $32, %rcx
-; SSE-NEXT:    orq %rdx, %rcx
+; SSE-NEXT:    addq %rdx, %rcx
 ; SSE-NEXT:    pcmpgtb {{[0-9]+}}(%rsp), %xmm4
 ; SSE-NEXT:    pmovmskb %xmm4, %edx
 ; SSE-NEXT:    pcmpgtb {{[0-9]+}}(%rsp), %xmm5
 ; SSE-NEXT:    pmovmskb %xmm5, %esi
 ; SSE-NEXT:    shll $16, %esi
-; SSE-NEXT:    orl %edx, %esi
+; SSE-NEXT:    addl %edx, %esi
 ; SSE-NEXT:    pcmpgtb {{[0-9]+}}(%rsp), %xmm6
 ; SSE-NEXT:    pmovmskb %xmm6, %edx
 ; SSE-NEXT:    pcmpgtb {{[0-9]+}}(%rsp), %xmm7
 ; SSE-NEXT:    pmovmskb %xmm7, %edi
 ; SSE-NEXT:    shll $16, %edi
-; SSE-NEXT:    orl %edx, %edi
+; SSE-NEXT:    addl %edx, %edi
 ; SSE-NEXT:    shlq $32, %rdi
-; SSE-NEXT:    orq %rsi, %rdi
+; SSE-NEXT:    addq %rsi, %rdi
 ; SSE-NEXT:    movq %rdi, 8(%rax)
 ; SSE-NEXT:    movq %rcx, (%rax)
 ; SSE-NEXT:    retq
@@ -1565,7 +1565,7 @@ define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
 ; AVX1-NEXT:    vpcmpgtb %xmm4, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %edx
 ; AVX1-NEXT:    shll $16, %edx
-; AVX1-NEXT:    orl %ecx, %edx
+; AVX1-NEXT:    addl %ecx, %edx
 ; AVX1-NEXT:    vpcmpgtb %xmm5, %xmm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %esi
 ; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm0
@@ -1573,9 +1573,9 @@ define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
 ; AVX1-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %ecx
 ; AVX1-NEXT:    shll $16, %ecx
-; AVX1-NEXT:    orl %esi, %ecx
+; AVX1-NEXT:    addl %esi, %ecx
 ; AVX1-NEXT:    shlq $32, %rcx
-; AVX1-NEXT:    orq %rdx, %rcx
+; AVX1-NEXT:    addq %rdx, %rcx
 ; AVX1-NEXT:    vpcmpgtb %xmm6, %xmm2, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %edx
 ; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm0
@@ -1583,7 +1583,7 @@ define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
 ; AVX1-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %esi
 ; AVX1-NEXT:    shll $16, %esi
-; AVX1-NEXT:    orl %edx, %esi
+; AVX1-NEXT:    addl %edx, %esi
 ; AVX1-NEXT:    vpcmpgtb %xmm7, %xmm3, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %edx
 ; AVX1-NEXT:    vextractf128 $1, %ymm7, %xmm0
@@ -1591,9 +1591,9 @@ define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
 ; AVX1-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %edi
 ; AVX1-NEXT:    shll $16, %edi
-; AVX1-NEXT:    orl %edx, %edi
+; AVX1-NEXT:    addl %edx, %edi
 ; AVX1-NEXT:    shlq $32, %rdi
-; AVX1-NEXT:    orq %rsi, %rdi
+; AVX1-NEXT:    addq %rsi, %rdi
 ; AVX1-NEXT:    movq %rdi, 8(%rax)
 ; AVX1-NEXT:    movq %rcx, (%rax)
 ; AVX1-NEXT:    vzeroupper
@@ -1607,13 +1607,13 @@ define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
 ; AVX2-NEXT:    vpcmpgtb %ymm5, %ymm1, %ymm0
 ; AVX2-NEXT:    vpmovmskb %ymm0, %edx
 ; AVX2-NEXT:    shlq $32, %rdx
-; AVX2-NEXT:    orq %rcx, %rdx
+; AVX2-NEXT:    addq %rcx, %rdx
 ; AVX2-NEXT:    vpcmpgtb %ymm6, %ymm2, %ymm0
 ; AVX2-NEXT:    vpmovmskb %ymm0, %ecx
 ; AVX2-NEXT:    vpcmpgtb %ymm7, %ymm3, %ymm0
 ; AVX2-NEXT:    vpmovmskb %ymm0, %esi
 ; AVX2-NEXT:    shlq $32, %rsi
-; AVX2-NEXT:    orq %rcx, %rsi
+; AVX2-NEXT:    addq %rcx, %rsi
 ; AVX2-NEXT:    movq %rsi, 8(%rdi)
 ; AVX2-NEXT:    movq %rdx, (%rdi)
 ; AVX2-NEXT:    vzeroupper
@@ -1773,7 +1773,7 @@ define <32 x i1> @test_cmp_v32f64(<32 x double> %a0, <32 x double> %a1) nounwind
 ; SSE-NEXT:    packsswb %xmm1, %xmm0
 ; SSE-NEXT:    pmovmskb %xmm0, %edx
 ; SSE-NEXT:    shll $16, %edx
-; SSE-NEXT:    orl %ecx, %edx
+; SSE-NEXT:    addl %ecx, %edx
 ; SSE-NEXT:    movl %edx, (%rdi)
 ; SSE-NEXT:    retq
 ;
@@ -2117,7 +2117,7 @@ define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {
 ; SSE2-NEXT:    packsswb %xmm0, %xmm4
 ; SSE2-NEXT:    pmovmskb %xmm4, %edx
 ; SSE2-NEXT:    shll $16, %edx
-; SSE2-NEXT:    orl %ecx, %edx
+; SSE2-NEXT:    addl %ecx, %edx
 ; SSE2-NEXT:    movl %edx, (%rdi)
 ; SSE2-NEXT:    retq
 ;
@@ -2165,7 +2165,7 @@ define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {
 ; SSE42-NEXT:    packsswb %xmm9, %xmm8
 ; SSE42-NEXT:    pmovmskb %xmm8, %edx
 ; SSE42-NEXT:    shll $16, %edx
-; SSE42-NEXT:    orl %ecx, %edx
+; SSE42-NEXT:    addl %ecx, %edx
 ; SSE42-NEXT:    movl %edx, (%rdi)
 ; SSE42-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-pcmp.ll b/llvm/test/CodeGen/X86/vector-pcmp.ll
index 5b43acbe523757..c8340f6c06b28e 100644
--- a/llvm/test/CodeGen/X86/vector-pcmp.ll
+++ b/llvm/test/CodeGen/X86/vector-pcmp.ll
@@ -1845,7 +1845,7 @@ define <32 x i1> @is_positive_mask_v32i8_v32i1(<32 x i8> %x, <32 x i1> %y) {
 ; SSE2-NEXT:    shll $16, %ecx
 ; SSE2-NEXT:    psllw $7, %xmm0
 ; SSE2-NEXT:    pmovmskb %xmm0, %edx
-; SSE2-NEXT:    orl %ecx, %edx
+; SSE2-NEXT:    addl %ecx, %edx
 ; SSE2-NEXT:    movl %edx, (%rdi)
 ; SSE2-NEXT:    retq
 ;
@@ -1894,7 +1894,7 @@ define <32 x i1> @is_positive_mask_v32i8_v32i1(<32 x i8> %x, <32 x i1> %y) {
 ; SSE42-NEXT:    psllw $7, %xmm1
 ; SSE42-NEXT:    pmovmskb %xmm1, %edx
 ; SSE42-NEXT:    shll $16, %edx
-; SSE42-NEXT:    orl %ecx, %edx
+; SSE42-NEXT:    addl %ecx, %edx
 ; SSE42-NEXT:    movl %edx, (%rdi)
 ; SSE42-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll
index 85c1e25c29ed5b..6b22f172214dc2 100644
--- a/llvm/test/CodeGen/X86/vector-sext.ll
+++ b/llvm/test/CodeGen/X86/vector-sext.ll
@@ -3619,7 +3619,7 @@ define <4 x i32> @sext_4i17_to_4i32(ptr %ptr) {
 ; SSE2-NEXT:    movq %rax, %rdx
 ; SSE2-NEXT:    shrq $51, %rdx
 ; SSE2-NEXT:    shll $15, %edx
-; SSE2-NEXT:    orl %ecx, %edx
+; SSE2-NEXT:    addl %ecx, %edx
 ; SSE2-NEXT:    sarl $15, %edx
 ; SSE2-NEXT:    movd %edx, %xmm1
 ; SSE2-NEXT:    shrq $34, %rax
@@ -3648,7 +3648,7 @@ define <4 x i32> @sext_4i17_to_4i32(ptr %ptr) {
 ; SSSE3-NEXT:    movq %rax, %rdx
 ; SSSE3-NEXT:    shrq $51, %rdx
 ; SSSE3-NEXT:    shll $15, %edx
-; SSSE3-NEXT:    orl %ecx, %edx
+; SSSE3-NEXT:    addl %ecx, %edx
 ; SSSE3-NEXT:    sarl $15, %edx
 ; SSSE3-NEXT:    movd %edx, %xmm1
 ; SSSE3-NEXT:    shrq $34, %rax
@@ -3680,7 +3680,7 @@ define <4 x i32> @sext_4i17_to_4i32(ptr %ptr) {
 ; SSE41-NEXT:    shll $28, %ecx
 ; SSE41-NEXT:    shrq $51, %rax
 ; SSE41-NEXT:    shll $15, %eax
-; SSE41-NEXT:    orl %ecx, %eax
+; SSE41-NEXT:    addl %ecx, %eax
 ; SSE41-NEXT:    sarl $15, %eax
 ; SSE41-NEXT:    pinsrd $3, %eax, %xmm0
 ; SSE41-NEXT:    retq
@@ -3706,7 +3706,7 @@ define <4 x i32> @sext_4i17_to_4i32(ptr %ptr) {
 ; AVX-NEXT:    shll $28, %ecx
 ; AVX-NEXT:    shrq $51, %rax
 ; AVX-NEXT:    shll $15, %eax
-; AVX-NEXT:    orl %ecx, %eax
+; AVX-NEXT:    addl %ecx, %eax
 ; AVX-NEXT:    sarl $15, %eax
 ; AVX-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
 ; AVX-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v16.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v16.ll
index ccaaca6c44f050..8560ec3f86b28f 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-128-v16.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v16.ll
@@ -2644,7 +2644,7 @@ define <16 x i8> @PR31364(ptr nocapture readonly %a, ptr nocapture readonly %b)
 ; SSE2-NEXT:    movzbl (%rdi), %eax
 ; SSE2-NEXT:    movzbl (%rsi), %ecx
 ; SSE2-NEXT:    shll $8, %ecx
-; SSE2-NEXT:    orl %eax, %ecx
+; SSE2-NEXT:    addl %eax, %ecx
 ; SSE2-NEXT:    movd %ecx, %xmm1
 ; SSE2-NEXT:    pxor %xmm0, %xmm0
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@@ -2661,7 +2661,7 @@ define <16 x i8> @PR31364(ptr nocapture readonly %a, ptr nocapture readonly %b)
 ; SSSE3-NEXT:    movzbl (%rdi), %eax
 ; SSSE3-NEXT:    movzbl (%rsi), %ecx
 ; SSSE3-NEXT:    shll $8, %ecx
-; SSSE3-NEXT:    orl %eax, %ecx
+; SSSE3-NEXT:    addl %eax, %ecx
 ; SSSE3-NEXT:    movd %ecx, %xmm0
 ; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[1,1,1,1,1,1,1],zero,xmm0[1,1,1,1,1,0,0,0]
 ; SSSE3-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-v1.ll b/llvm/test/CodeGen/X86/vector-shuffle-v1.ll
index c977929b21f452..10afd8108fb885 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-v1.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-v1.ll
@@ -856,10 +856,10 @@ define i64 @shuf64i1_zero(i64 %a) {
 ; AVX512F-NEXT:    kmovw %k0, %eax
 ; AVX512F-NEXT:    kmovw %k0, %ecx
 ; AVX512F-NEXT:    shll $16, %ecx
-; AVX512F-NEXT:    orl %eax, %ecx
+; AVX512F-NEXT:    addl %eax, %ecx
 ; AVX512F-NEXT:    movq %rcx, %rax
 ; AVX512F-NEXT:    shlq $32, %rax
-; AVX512F-NEXT:    orq %rcx, %rax
+; AVX512F-NEXT:    addq %rcx, %rax
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
@@ -872,10 +872,10 @@ define i64 @shuf64i1_zero(i64 %a) {
 ; AVX512VL-NEXT:    kmovw %k0, %eax
 ; AVX512VL-NEXT:    kmovw %k0, %ecx
 ; AVX512VL-NEXT:    shll $16, %ecx
-; AVX512VL-NEXT:    orl %eax, %ecx
+; AVX512VL-NEXT:    addl %eax, %ecx
 ; AVX512VL-NEXT:    movq %rcx, %rax
 ; AVX512VL-NEXT:    shlq $32, %rax
-; AVX512VL-NEXT:    orq %rcx, %rax
+; AVX512VL-NEXT:    addq %rcx, %rax
 ; AVX512VL-NEXT:    vzeroupper
 ; AVX512VL-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-trunc.ll b/llvm/test/CodeGen/X86/vector-trunc.ll
index 1a5f5fd5e6db5d..2c8831be633a3f 100644
--- a/llvm/test/CodeGen/X86/vector-trunc.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc.ll
@@ -2000,7 +2000,7 @@ define i16 @PR66194(i8 %q) {
 ; SSE2-SSSE3-NEXT:    sete %cl
 ; SSE2-SSSE3-NEXT:    movl %ecx, %edx
 ; SSE2-SSSE3-NEXT:    shll $16, %edx
-; SSE2-SSSE3-NEXT:    orl %eax, %edx
+; SSE2-SSSE3-NEXT:    addl %eax, %edx
 ; SSE2-SSSE3-NEXT:    movd %edx, %xmm0
 ; SSE2-SSSE3-NEXT:    pinsrw $2, %eax, %xmm0
 ; SSE2-SSSE3-NEXT:    pinsrw $3, %eax, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-zext.ll b/llvm/test/CodeGen/X86/vector-zext.ll
index 74926f46ffa437..ba407d41469872 100644
--- a/llvm/test/CodeGen/X86/vector-zext.ll
+++ b/llvm/test/CodeGen/X86/vector-zext.ll
@@ -2335,7 +2335,7 @@ define <4 x i32> @zext_4i17_to_4i32(ptr %ptr) {
 ; SSE2-NEXT:    shll $13, %ecx
 ; SSE2-NEXT:    movq %rax, %rdx
 ; SSE2-NEXT:    shrq $51, %rdx
-; SSE2-NEXT:    orl %ecx, %edx
+; SSE2-NEXT:    addl %ecx, %edx
 ; SSE2-NEXT:    movd %edx, %xmm1
 ; SSE2-NEXT:    shrq $34, %rax
 ; SSE2-NEXT:    movd %eax, %xmm2
@@ -2356,7 +2356,7 @@ define <4 x i32> @zext_4i17_to_4i32(ptr %ptr) {
 ; SSSE3-NEXT:    shll $13, %ecx
 ; SSSE3-NEXT:    movq %rax, %rdx
 ; SSSE3-NEXT:    shrq $51, %rdx
-; SSSE3-NEXT:    orl %ecx, %edx
+; SSSE3-NEXT:    addl %ecx, %edx
 ; SSSE3-NEXT:    movd %edx, %xmm1
 ; SSSE3-NEXT:    shrq $34, %rax
 ; SSSE3-NEXT:    movd %eax, %xmm2
@@ -2372,7 +2372,7 @@ define <4 x i32> @zext_4i17_to_4i32(ptr %ptr) {
 ; SSE41-NEXT:    movq (%rdi), %rcx
 ; SSE41-NEXT:    movq %rcx, %rdx
 ; SSE41-NEXT:    shrq $51, %rdx
-; SSE41-NEXT:    orl %eax, %edx
+; SSE41-NEXT:    addl %eax, %edx
 ; SSE41-NEXT:    movq %rcx, %rax
 ; SSE41-NEXT:    shrq $17, %rax
 ; SSE41-NEXT:    movd %ecx, %xmm0
@@ -2390,7 +2390,7 @@ define <4 x i32> @zext_4i17_to_4i32(ptr %ptr) {
 ; AVX1-NEXT:    movq (%rdi), %rcx
 ; AVX1-NEXT:    movq %rcx, %rdx
 ; AVX1-NEXT:    shrq $51, %rdx
-; AVX1-NEXT:    orl %eax, %edx
+; AVX1-NEXT:    addl %eax, %edx
 ; AVX1-NEXT:    movq %rcx, %rax
 ; AVX1-NEXT:    shrq $17, %rax
 ; AVX1-NEXT:    vmovd %ecx, %xmm0
@@ -2408,7 +2408,7 @@ define <4 x i32> @zext_4i17_to_4i32(ptr %ptr) {
 ; AVX2-NEXT:    movq (%rdi), %rcx
 ; AVX2-NEXT:    movq %rcx, %rdx
 ; AVX2-NEXT:    shrq $51, %rdx
-; AVX2-NEXT:    orl %eax, %edx
+; AVX2-NEXT:    addl %eax, %edx
 ; AVX2-NEXT:    movq %rcx, %rax
 ; AVX2-NEXT:    shrq $17, %rax
 ; AVX2-NEXT:    vmovd %ecx, %xmm0
@@ -2427,7 +2427,7 @@ define <4 x i32> @zext_4i17_to_4i32(ptr %ptr) {
 ; AVX512-NEXT:    movq (%rdi), %rcx
 ; AVX512-NEXT:    movq %rcx, %rdx
 ; AVX512-NEXT:    shrq $51, %rdx
-; AVX512-NEXT:    orl %eax, %edx
+; AVX512-NEXT:    addl %eax, %edx
 ; AVX512-NEXT:    movq %rcx, %rax
 ; AVX512-NEXT:    shrq $17, %rax
 ; AVX512-NEXT:    vmovd %ecx, %xmm0
diff --git a/llvm/test/CodeGen/X86/xor-lea.ll b/llvm/test/CodeGen/X86/xor-lea.ll
index 10e9525a2706a3..689c0dfd92f62f 100644
--- a/llvm/test/CodeGen/X86/xor-lea.ll
+++ b/llvm/test/CodeGen/X86/xor-lea.ll
@@ -69,7 +69,7 @@ define i32 @xor_sminval_i32(i32 %x) {
 ; X86-LABEL: xor_sminval_i32:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
-; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: xor_sminval_i32:
@@ -103,7 +103,7 @@ define i64 @xor_sminval_i64(i64 %x) {
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl $-2147483648, %edx # imm = 0x80000000
-; X86-NEXT:    xorl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: xor_sminval_i64:



More information about the llvm-commits mailing list