[llvm] [FastISel][X86] Use getTypeForExtReturn in GetReturnInfo. (PR #80803)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 5 23:01:03 PST 2024


https://github.com/topperc created https://github.com/llvm/llvm-project/pull/80803

The comment and code here seems to match getTypeForExtReturn. The history shows that at the time this code was added, similar code existed in SelectionDAGBuilder. SelectionDAGBuiler code has since been refactored into getTypeForExtReturn.

This patch makes FastISel match SelectionDAGBuilder.

The test changes are because X86 has customization of getTypeForExtReturn. So now we only extend returns to i8.

Stumbled onto this difference by accident.

>From 2d127c7ad5e623f924ae02a7073ebd90d52f2649 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 5 Feb 2024 22:48:17 -0800
Subject: [PATCH] [FastISel][X86] Use getTypeForExtReturn in GetReturnInfo.

The comment and code here seems to match getTypeForExtReturn. The
history shows that at the time this code was added, similar code
existed in SelectionDAGBuilder. SelectionDAGBuiler code has since
been refactored into getTypeForExtReturn.

This patch makes FastISel match SelectionDAGBuilder.

The test changes are because X86 has customization of getTypeForExtReturn.
So now we only extend returns to i8.

Stumbled onto this difference by accident.
---
 llvm/lib/CodeGen/TargetLoweringBase.cpp       |  11 +-
 llvm/lib/Target/X86/X86FastISel.cpp           |  13 +-
 .../X86/avx512-intrinsics-fast-isel.ll        |  24 +--
 .../X86/avx512bwvl-intrinsics-fast-isel.ll    |  36 ++--
 .../X86/avx512vl-intrinsics-fast-isel.ll      |  48 +++---
 llvm/test/CodeGen/X86/fast-isel-fcmp.ll       | 155 ++++--------------
 llvm/test/CodeGen/X86/fast-isel-ret-ext.ll    |   2 +-
 .../X86/keylocker-intrinsics-fast-isel.ll     |  16 --
 llvm/test/CodeGen/X86/xaluo.ll                |  67 ++------
 llvm/test/CodeGen/X86/xmulo.ll                |  63 +++----
 llvm/test/DebugInfo/X86/convert-debugloc.ll   |   2 +-
 11 files changed, 141 insertions(+), 296 deletions(-)

diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index fe7bed760572b..16cd14ba3de9b 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -1738,15 +1738,8 @@ void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType,
     else if (attr.hasRetAttr(Attribute::ZExt))
       ExtendKind = ISD::ZERO_EXTEND;
 
-    // FIXME: C calling convention requires the return type to be promoted to
-    // at least 32-bit. But this is not necessary for non-C calling
-    // conventions. The frontend should mark functions whose return values
-    // require promoting with signext or zeroext attributes.
-    if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
-      MVT MinVT = TLI.getRegisterType(MVT::i32);
-      if (VT.bitsLT(MinVT))
-        VT = MinVT;
-    }
+    if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
+      VT = TLI.getTypeForExtReturn(ReturnType->getContext(), VT, ExtendKind);
 
     unsigned NumParts =
         TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT);
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index 1ce1e6f6a5635..746fa432ade7a 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -1250,8 +1250,6 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
       if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
         return false;
 
-      assert(DstVT == MVT::i32 && "X86 should always ext to i32");
-
       if (SrcVT == MVT::i1) {
         if (Outs[0].Flags.isSExt())
           return false;
@@ -1259,10 +1257,13 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
         SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg);
         SrcVT = MVT::i8;
       }
-      unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND :
-                                             ISD::SIGN_EXTEND;
-      // TODO
-      SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, SrcReg);
+      if (SrcVT != DstVT) {
+        unsigned Op =
+            Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
+        // TODO
+        SrcReg =
+            fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, SrcReg);
+      }
     }
 
     // Make the copy.
diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
index 780abc9f9dc43..1ca870add95b5 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
@@ -21,7 +21,7 @@ define zeroext i16 @test_mm512_kunpackb(<8 x i64> %__A, <8 x i64> %__B, <8 x i64
 ; X86-NEXT:    kunpckbw %k0, %k1, %k1
 ; X86-NEXT:    vpcmpneqd 72(%ebp), %zmm3, %k0 {%k1}
 ; X86-NEXT:    kmovw %k0, %eax
-; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    movl %ebp, %esp
 ; X86-NEXT:    popl %ebp
 ; X86-NEXT:    .cfi_def_cfa %esp, 4
@@ -35,7 +35,7 @@ define zeroext i16 @test_mm512_kunpackb(<8 x i64> %__A, <8 x i64> %__B, <8 x i64
 ; X64-NEXT:    kunpckbw %k0, %k1, %k1
 ; X64-NEXT:    vpcmpneqd %zmm5, %zmm4, %k0 {%k1}
 ; X64-NEXT:    kmovw %k0, %eax
-; X64-NEXT:    movzwl %ax, %eax
+; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
 entry:
@@ -367,7 +367,7 @@ define zeroext i16 @test_mm512_testn_epi32_mask(<8 x i64> %__A, <8 x i64> %__B)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vptestnmd %zmm0, %zmm1, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    movzwl %ax, %eax
+; CHECK-NEXT:    # kill: def $ax killed $ax killed $eax
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
@@ -385,7 +385,7 @@ define zeroext i16 @test_mm512_mask_testn_epi32_mask(i16 zeroext %__U, <8 x i64>
 ; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vptestnmd %zmm0, %zmm1, %k0 {%k1}
 ; X86-NEXT:    kmovw %k0, %eax
-; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    vzeroupper
 ; X86-NEXT:    retl
 ;
@@ -394,7 +394,7 @@ define zeroext i16 @test_mm512_mask_testn_epi32_mask(i16 zeroext %__U, <8 x i64>
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vptestnmd %zmm0, %zmm1, %k0 {%k1}
 ; X64-NEXT:    kmovw %k0, %eax
-; X64-NEXT:    movzwl %ax, %eax
+; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
 entry:
@@ -412,7 +412,7 @@ define zeroext i8 @test_mm512_testn_epi64_mask(<8 x i64> %__A, <8 x i64> %__B) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vptestnmq %zmm0, %zmm1, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
@@ -429,7 +429,7 @@ define zeroext i8 @test_mm512_mask_testn_epi64_mask(i8 zeroext %__U, <8 x i64> %
 ; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vptestnmq %zmm0, %zmm1, %k0 {%k1}
 ; X86-NEXT:    kmovw %k0, %eax
-; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    # kill: def $al killed $al killed $eax
 ; X86-NEXT:    vzeroupper
 ; X86-NEXT:    retl
 ;
@@ -438,7 +438,7 @@ define zeroext i8 @test_mm512_mask_testn_epi64_mask(i8 zeroext %__U, <8 x i64> %
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vptestnmq %zmm0, %zmm1, %k0 {%k1}
 ; X64-NEXT:    kmovw %k0, %eax
-; X64-NEXT:    movzbl %al, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
 entry:
@@ -457,7 +457,7 @@ define zeroext i16 @test_mm512_mask_test_epi32_mask(i16 zeroext %__U, <8 x i64>
 ; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vptestmd %zmm0, %zmm1, %k0 {%k1}
 ; X86-NEXT:    kmovw %k0, %eax
-; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    vzeroupper
 ; X86-NEXT:    retl
 ;
@@ -466,7 +466,7 @@ define zeroext i16 @test_mm512_mask_test_epi32_mask(i16 zeroext %__U, <8 x i64>
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vptestmd %zmm0, %zmm1, %k0 {%k1}
 ; X64-NEXT:    kmovw %k0, %eax
-; X64-NEXT:    movzwl %ax, %eax
+; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
 entry:
@@ -486,7 +486,7 @@ define zeroext i8 @test_mm512_mask_test_epi64_mask(i8 zeroext %__U, <8 x i64> %_
 ; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vptestmq %zmm0, %zmm1, %k0 {%k1}
 ; X86-NEXT:    kmovw %k0, %eax
-; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    # kill: def $al killed $al killed $eax
 ; X86-NEXT:    vzeroupper
 ; X86-NEXT:    retl
 ;
@@ -495,7 +495,7 @@ define zeroext i8 @test_mm512_mask_test_epi64_mask(i8 zeroext %__U, <8 x i64> %_
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vptestmq %zmm0, %zmm1, %k0 {%k1}
 ; X64-NEXT:    kmovw %k0, %eax
-; X64-NEXT:    movzbl %al, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
 entry:
diff --git a/llvm/test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll
index a32b84986e895..00729262473da 100644
--- a/llvm/test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/avx512bwvl-intrinsics-fast-isel.ll
@@ -9,7 +9,7 @@ define zeroext i16 @test_mm_test_epi8_mask(<2 x i64> %__A, <2 x i64> %__B) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vptestmb %xmm0, %xmm1, %k0
 ; CHECK-NEXT:    kmovd %k0, %eax
-; CHECK-NEXT:    movzwl %ax, %eax
+; CHECK-NEXT:    # kill: def $ax killed $ax killed $eax
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %and.i.i = and <2 x i64> %__B, %__A
@@ -25,7 +25,7 @@ define zeroext i16 @test_mm_mask_test_epi8_mask(i16 zeroext %__U, <2 x i64> %__A
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X86-NEXT:    vptestmb %xmm0, %xmm1, %k0 {%k1}
 ; X86-NEXT:    kmovd %k0, %eax
-; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mask_test_epi8_mask:
@@ -33,7 +33,7 @@ define zeroext i16 @test_mm_mask_test_epi8_mask(i16 zeroext %__U, <2 x i64> %__A
 ; X64-NEXT:    kmovd %edi, %k1
 ; X64-NEXT:    vptestmb %xmm0, %xmm1, %k0 {%k1}
 ; X64-NEXT:    kmovd %k0, %eax
-; X64-NEXT:    movzwl %ax, %eax
+; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
 entry:
   %and.i.i = and <2 x i64> %__B, %__A
@@ -91,7 +91,7 @@ define zeroext i8 @test_mm_test_epi16_mask(<2 x i64> %__A, <2 x i64> %__B) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vptestmw %xmm0, %xmm1, %k0
 ; CHECK-NEXT:    kmovd %k0, %eax
-; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %and.i.i = and <2 x i64> %__B, %__A
@@ -108,7 +108,7 @@ define zeroext i8 @test_mm_mask_test_epi16_mask(i8 zeroext %__U, <2 x i64> %__A,
 ; X86-NEXT:    kmovd %eax, %k1
 ; X86-NEXT:    vptestmw %xmm0, %xmm1, %k0 {%k1}
 ; X86-NEXT:    kmovd %k0, %eax
-; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    # kill: def $al killed $al killed $eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mask_test_epi16_mask:
@@ -116,7 +116,7 @@ define zeroext i8 @test_mm_mask_test_epi16_mask(i8 zeroext %__U, <2 x i64> %__A,
 ; X64-NEXT:    kmovd %edi, %k1
 ; X64-NEXT:    vptestmw %xmm0, %xmm1, %k0 {%k1}
 ; X64-NEXT:    kmovd %k0, %eax
-; X64-NEXT:    movzbl %al, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
 entry:
   %and.i.i = and <2 x i64> %__B, %__A
@@ -133,7 +133,7 @@ define zeroext i16 @test_mm256_test_epi16_mask(<4 x i64> %__A, <4 x i64> %__B) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vptestmw %ymm0, %ymm1, %k0
 ; CHECK-NEXT:    kmovd %k0, %eax
-; CHECK-NEXT:    movzwl %ax, %eax
+; CHECK-NEXT:    # kill: def $ax killed $ax killed $eax
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
@@ -150,7 +150,7 @@ define zeroext i16 @test_mm256_mask_test_epi16_mask(i16 zeroext %__U, <4 x i64>
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X86-NEXT:    vptestmw %ymm0, %ymm1, %k0 {%k1}
 ; X86-NEXT:    kmovd %k0, %eax
-; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    vzeroupper
 ; X86-NEXT:    retl
 ;
@@ -159,7 +159,7 @@ define zeroext i16 @test_mm256_mask_test_epi16_mask(i16 zeroext %__U, <4 x i64>
 ; X64-NEXT:    kmovd %edi, %k1
 ; X64-NEXT:    vptestmw %ymm0, %ymm1, %k0 {%k1}
 ; X64-NEXT:    kmovd %k0, %eax
-; X64-NEXT:    movzwl %ax, %eax
+; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
 entry:
@@ -177,7 +177,7 @@ define zeroext i16 @test_mm_testn_epi8_mask(<2 x i64> %__A, <2 x i64> %__B) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vptestnmb %xmm0, %xmm1, %k0
 ; CHECK-NEXT:    kmovd %k0, %eax
-; CHECK-NEXT:    movzwl %ax, %eax
+; CHECK-NEXT:    # kill: def $ax killed $ax killed $eax
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %and.i.i = and <2 x i64> %__B, %__A
@@ -193,7 +193,7 @@ define zeroext i16 @test_mm_mask_testn_epi8_mask(i16 zeroext %__U, <2 x i64> %__
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X86-NEXT:    vptestnmb %xmm0, %xmm1, %k0 {%k1}
 ; X86-NEXT:    kmovd %k0, %eax
-; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mask_testn_epi8_mask:
@@ -201,7 +201,7 @@ define zeroext i16 @test_mm_mask_testn_epi8_mask(i16 zeroext %__U, <2 x i64> %__
 ; X64-NEXT:    kmovd %edi, %k1
 ; X64-NEXT:    vptestnmb %xmm0, %xmm1, %k0 {%k1}
 ; X64-NEXT:    kmovd %k0, %eax
-; X64-NEXT:    movzwl %ax, %eax
+; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
 entry:
   %and.i.i = and <2 x i64> %__B, %__A
@@ -259,7 +259,7 @@ define zeroext i8 @test_mm_testn_epi16_mask(<2 x i64> %__A, <2 x i64> %__B) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vptestnmw %xmm0, %xmm1, %k0
 ; CHECK-NEXT:    kmovd %k0, %eax
-; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %and.i.i = and <2 x i64> %__B, %__A
@@ -276,7 +276,7 @@ define zeroext i8 @test_mm_mask_testn_epi16_mask(i8 zeroext %__U, <2 x i64> %__A
 ; X86-NEXT:    kmovd %eax, %k1
 ; X86-NEXT:    vptestnmw %xmm0, %xmm1, %k0 {%k1}
 ; X86-NEXT:    kmovd %k0, %eax
-; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    # kill: def $al killed $al killed $eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mask_testn_epi16_mask:
@@ -284,7 +284,7 @@ define zeroext i8 @test_mm_mask_testn_epi16_mask(i8 zeroext %__U, <2 x i64> %__A
 ; X64-NEXT:    kmovd %edi, %k1
 ; X64-NEXT:    vptestnmw %xmm0, %xmm1, %k0 {%k1}
 ; X64-NEXT:    kmovd %k0, %eax
-; X64-NEXT:    movzbl %al, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
 entry:
   %and.i.i = and <2 x i64> %__B, %__A
@@ -301,7 +301,7 @@ define zeroext i16 @test_mm256_testn_epi16_mask(<4 x i64> %__A, <4 x i64> %__B)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vptestnmw %ymm0, %ymm1, %k0
 ; CHECK-NEXT:    kmovd %k0, %eax
-; CHECK-NEXT:    movzwl %ax, %eax
+; CHECK-NEXT:    # kill: def $ax killed $ax killed $eax
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
@@ -318,7 +318,7 @@ define zeroext i16 @test_mm256_mask_testn_epi16_mask(i16 zeroext %__U, <4 x i64>
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
 ; X86-NEXT:    vptestnmw %ymm0, %ymm1, %k0 {%k1}
 ; X86-NEXT:    kmovd %k0, %eax
-; X86-NEXT:    movzwl %ax, %eax
+; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    vzeroupper
 ; X86-NEXT:    retl
 ;
@@ -327,7 +327,7 @@ define zeroext i16 @test_mm256_mask_testn_epi16_mask(i16 zeroext %__U, <4 x i64>
 ; X64-NEXT:    kmovd %edi, %k1
 ; X64-NEXT:    vptestnmw %ymm0, %ymm1, %k0 {%k1}
 ; X64-NEXT:    kmovd %k0, %eax
-; X64-NEXT:    movzwl %ax, %eax
+; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
 entry:
diff --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
index 173e2bad8aceb..06e7096e430bb 100644
--- a/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
@@ -1547,7 +1547,7 @@ define zeroext i8 @test_mm_test_epi32_mask(<2 x i64> %__A, <2 x i64> %__B) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vptestmd %xmm0, %xmm1, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %and.i.i = and <2 x i64> %__B, %__A
@@ -1565,7 +1565,7 @@ define zeroext i8 @test_mm_mask_test_epi32_mask(i8 zeroext %__U, <2 x i64> %__A,
 ; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vptestmd %xmm0, %xmm1, %k0 {%k1}
 ; X86-NEXT:    kmovw %k0, %eax
-; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    # kill: def $al killed $al killed $eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mask_test_epi32_mask:
@@ -1573,7 +1573,7 @@ define zeroext i8 @test_mm_mask_test_epi32_mask(i8 zeroext %__U, <2 x i64> %__A,
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vptestmd %xmm0, %xmm1, %k0 {%k1}
 ; X64-NEXT:    kmovw %k0, %eax
-; X64-NEXT:    movzbl %al, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
 entry:
   %and.i.i = and <2 x i64> %__B, %__A
@@ -1592,7 +1592,7 @@ define zeroext i8 @test_mm256_test_epi32_mask(<4 x i64> %__A, <4 x i64> %__B) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vptestmd %ymm0, %ymm1, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
@@ -1610,7 +1610,7 @@ define zeroext i8 @test_mm256_mask_test_epi32_mask(i8 zeroext %__U, <4 x i64> %_
 ; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vptestmd %ymm0, %ymm1, %k0 {%k1}
 ; X86-NEXT:    kmovw %k0, %eax
-; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    # kill: def $al killed $al killed $eax
 ; X86-NEXT:    vzeroupper
 ; X86-NEXT:    retl
 ;
@@ -1619,7 +1619,7 @@ define zeroext i8 @test_mm256_mask_test_epi32_mask(i8 zeroext %__U, <4 x i64> %_
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vptestmd %ymm0, %ymm1, %k0 {%k1}
 ; X64-NEXT:    kmovw %k0, %eax
-; X64-NEXT:    movzbl %al, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
 entry:
@@ -1637,7 +1637,7 @@ define zeroext i8 @test_mm_test_epi64_mask(<2 x i64> %__A, <2 x i64> %__B) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vptestmq %xmm0, %xmm1, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %and.i.i = and <2 x i64> %__B, %__A
@@ -1654,7 +1654,7 @@ define zeroext i8 @test_mm_mask_test_epi64_mask(i8 zeroext %__U, <2 x i64> %__A,
 ; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vptestmq %xmm0, %xmm1, %k0 {%k1}
 ; X86-NEXT:    kmovw %k0, %eax
-; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    # kill: def $al killed $al killed $eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mask_test_epi64_mask:
@@ -1662,7 +1662,7 @@ define zeroext i8 @test_mm_mask_test_epi64_mask(i8 zeroext %__U, <2 x i64> %__A,
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vptestmq %xmm0, %xmm1, %k0 {%k1}
 ; X64-NEXT:    kmovw %k0, %eax
-; X64-NEXT:    movzbl %al, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
 entry:
   %and.i.i = and <2 x i64> %__B, %__A
@@ -1680,7 +1680,7 @@ define zeroext i8 @test_mm256_test_epi64_mask(<4 x i64> %__A, <4 x i64> %__B) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vptestmq %ymm0, %ymm1, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
@@ -1698,7 +1698,7 @@ define zeroext i8 @test_mm256_mask_test_epi64_mask(i8 zeroext %__U, <4 x i64> %_
 ; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vptestmq %ymm0, %ymm1, %k0 {%k1}
 ; X86-NEXT:    kmovw %k0, %eax
-; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    # kill: def $al killed $al killed $eax
 ; X86-NEXT:    vzeroupper
 ; X86-NEXT:    retl
 ;
@@ -1707,7 +1707,7 @@ define zeroext i8 @test_mm256_mask_test_epi64_mask(i8 zeroext %__U, <4 x i64> %_
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vptestmq %ymm0, %ymm1, %k0 {%k1}
 ; X64-NEXT:    kmovw %k0, %eax
-; X64-NEXT:    movzbl %al, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
 entry:
@@ -1726,7 +1726,7 @@ define zeroext i8 @test_mm_testn_epi32_mask(<2 x i64> %__A, <2 x i64> %__B) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vptestnmd %xmm0, %xmm1, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %and.i.i = and <2 x i64> %__B, %__A
@@ -1744,7 +1744,7 @@ define zeroext i8 @test_mm_mask_testn_epi32_mask(i8 zeroext %__U, <2 x i64> %__A
 ; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vptestnmd %xmm0, %xmm1, %k0 {%k1}
 ; X86-NEXT:    kmovw %k0, %eax
-; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    # kill: def $al killed $al killed $eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mask_testn_epi32_mask:
@@ -1752,7 +1752,7 @@ define zeroext i8 @test_mm_mask_testn_epi32_mask(i8 zeroext %__U, <2 x i64> %__A
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vptestnmd %xmm0, %xmm1, %k0 {%k1}
 ; X64-NEXT:    kmovw %k0, %eax
-; X64-NEXT:    movzbl %al, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
 entry:
   %and.i.i = and <2 x i64> %__B, %__A
@@ -1771,7 +1771,7 @@ define zeroext i8 @test_mm256_testn_epi32_mask(<4 x i64> %__A, <4 x i64> %__B) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vptestnmd %ymm0, %ymm1, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
@@ -1789,7 +1789,7 @@ define zeroext i8 @test_mm256_mask_testn_epi32_mask(i8 zeroext %__U, <4 x i64> %
 ; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vptestnmd %ymm0, %ymm1, %k0 {%k1}
 ; X86-NEXT:    kmovw %k0, %eax
-; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    # kill: def $al killed $al killed $eax
 ; X86-NEXT:    vzeroupper
 ; X86-NEXT:    retl
 ;
@@ -1798,7 +1798,7 @@ define zeroext i8 @test_mm256_mask_testn_epi32_mask(i8 zeroext %__U, <4 x i64> %
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vptestnmd %ymm0, %ymm1, %k0 {%k1}
 ; X64-NEXT:    kmovw %k0, %eax
-; X64-NEXT:    movzbl %al, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
 entry:
@@ -1816,7 +1816,7 @@ define zeroext i8 @test_mm_testn_epi64_mask(<2 x i64> %__A, <2 x i64> %__B) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vptestnmq %xmm0, %xmm1, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %and.i.i = and <2 x i64> %__B, %__A
@@ -1833,7 +1833,7 @@ define zeroext i8 @test_mm_mask_testn_epi64_mask(i8 zeroext %__U, <2 x i64> %__A
 ; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vptestnmq %xmm0, %xmm1, %k0 {%k1}
 ; X86-NEXT:    kmovw %k0, %eax
-; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    # kill: def $al killed $al killed $eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mask_testn_epi64_mask:
@@ -1841,7 +1841,7 @@ define zeroext i8 @test_mm_mask_testn_epi64_mask(i8 zeroext %__U, <2 x i64> %__A
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vptestnmq %xmm0, %xmm1, %k0 {%k1}
 ; X64-NEXT:    kmovw %k0, %eax
-; X64-NEXT:    movzbl %al, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
 entry:
   %and.i.i = and <2 x i64> %__B, %__A
@@ -1859,7 +1859,7 @@ define zeroext i8 @test_mm256_testn_epi64_mask(<4 x i64> %__A, <4 x i64> %__B) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vptestnmq %ymm0, %ymm1, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    movzbl %al, %eax
+; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
@@ -1877,7 +1877,7 @@ define zeroext i8 @test_mm256_mask_testn_epi64_mask(i8 zeroext %__U, <4 x i64> %
 ; X86-NEXT:    kmovw %eax, %k1
 ; X86-NEXT:    vptestnmq %ymm0, %ymm1, %k0 {%k1}
 ; X86-NEXT:    kmovw %k0, %eax
-; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    # kill: def $al killed $al killed $eax
 ; X86-NEXT:    vzeroupper
 ; X86-NEXT:    retl
 ;
@@ -1886,7 +1886,7 @@ define zeroext i8 @test_mm256_mask_testn_epi64_mask(i8 zeroext %__U, <4 x i64> %
 ; X64-NEXT:    kmovw %edi, %k1
 ; X64-NEXT:    vptestnmq %ymm0, %ymm1, %k0 {%k1}
 ; X64-NEXT:    kmovw %k0, %eax
-; X64-NEXT:    movzbl %al, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    vzeroupper
 ; X64-NEXT:    retq
 entry:
diff --git a/llvm/test/CodeGen/X86/fast-isel-fcmp.ll b/llvm/test/CodeGen/X86/fast-isel-fcmp.ll
index c6ad2171aa895..b9ef3154cd1c3 100644
--- a/llvm/test/CodeGen/X86/fast-isel-fcmp.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-fcmp.ll
@@ -16,21 +16,19 @@ define zeroext i1 @fcmp_oeq(float %x, float %y) {
 ; FAST_NOAVX-LABEL: fcmp_oeq:
 ; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
-; FAST_NOAVX-NEXT:    sete %al
-; FAST_NOAVX-NEXT:    setnp %cl
-; FAST_NOAVX-NEXT:    andb %al, %cl
-; FAST_NOAVX-NEXT:    andb $1, %cl
-; FAST_NOAVX-NEXT:    movzbl %cl, %eax
+; FAST_NOAVX-NEXT:    sete %cl
+; FAST_NOAVX-NEXT:    setnp %al
+; FAST_NOAVX-NEXT:    andb %cl, %al
+; FAST_NOAVX-NEXT:    andb $1, %al
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_oeq:
 ; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
-; FAST_AVX-NEXT:    sete %al
-; FAST_AVX-NEXT:    setnp %cl
-; FAST_AVX-NEXT:    andb %al, %cl
-; FAST_AVX-NEXT:    andb $1, %cl
-; FAST_AVX-NEXT:    movzbl %cl, %eax
+; FAST_AVX-NEXT:    sete %cl
+; FAST_AVX-NEXT:    setnp %al
+; FAST_AVX-NEXT:    andb %cl, %al
+; FAST_AVX-NEXT:    andb $1, %al
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp oeq float %x, %y
   ret i1 %1
@@ -48,7 +46,6 @@ define zeroext i1 @fcmp_ogt(float %x, float %y) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    seta %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ogt:
@@ -56,7 +53,6 @@ define zeroext i1 @fcmp_ogt(float %x, float %y) {
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    seta %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp ogt float %x, %y
   ret i1 %1
@@ -74,7 +70,6 @@ define zeroext i1 @fcmp_oge(float %x, float %y) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setae %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_oge:
@@ -82,7 +77,6 @@ define zeroext i1 @fcmp_oge(float %x, float %y) {
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setae %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp oge float %x, %y
   ret i1 %1
@@ -100,7 +94,6 @@ define zeroext i1 @fcmp_olt(float %x, float %y) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm1
 ; FAST_NOAVX-NEXT:    seta %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_olt:
@@ -108,7 +101,6 @@ define zeroext i1 @fcmp_olt(float %x, float %y) {
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm1
 ; FAST_AVX-NEXT:    seta %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp olt float %x, %y
   ret i1 %1
@@ -126,7 +118,6 @@ define zeroext i1 @fcmp_ole(float %x, float %y) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm1
 ; FAST_NOAVX-NEXT:    setae %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ole:
@@ -134,7 +125,6 @@ define zeroext i1 @fcmp_ole(float %x, float %y) {
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm1
 ; FAST_AVX-NEXT:    setae %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp ole float %x, %y
   ret i1 %1
@@ -152,7 +142,6 @@ define zeroext i1 @fcmp_one(float %x, float %y) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setne %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_one:
@@ -160,7 +149,6 @@ define zeroext i1 @fcmp_one(float %x, float %y) {
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setne %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp one float %x, %y
   ret i1 %1
@@ -178,7 +166,6 @@ define zeroext i1 @fcmp_ord(float %x, float %y) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setnp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ord:
@@ -186,7 +173,6 @@ define zeroext i1 @fcmp_ord(float %x, float %y) {
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setnp %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp ord float %x, %y
   ret i1 %1
@@ -204,7 +190,6 @@ define zeroext i1 @fcmp_uno(float %x, float %y) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_uno:
@@ -212,7 +197,6 @@ define zeroext i1 @fcmp_uno(float %x, float %y) {
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setp %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp uno float %x, %y
   ret i1 %1
@@ -230,7 +214,6 @@ define zeroext i1 @fcmp_ueq(float %x, float %y) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    sete %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ueq:
@@ -238,7 +221,6 @@ define zeroext i1 @fcmp_ueq(float %x, float %y) {
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    sete %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp ueq float %x, %y
   ret i1 %1
@@ -256,7 +238,6 @@ define zeroext i1 @fcmp_ugt(float %x, float %y) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm1
 ; FAST_NOAVX-NEXT:    setb %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ugt:
@@ -264,7 +245,6 @@ define zeroext i1 @fcmp_ugt(float %x, float %y) {
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm1
 ; FAST_AVX-NEXT:    setb %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp ugt float %x, %y
   ret i1 %1
@@ -282,7 +262,6 @@ define zeroext i1 @fcmp_uge(float %x, float %y) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm1
 ; FAST_NOAVX-NEXT:    setbe %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_uge:
@@ -290,7 +269,6 @@ define zeroext i1 @fcmp_uge(float %x, float %y) {
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm1
 ; FAST_AVX-NEXT:    setbe %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp uge float %x, %y
   ret i1 %1
@@ -308,7 +286,6 @@ define zeroext i1 @fcmp_ult(float %x, float %y) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setb %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ult:
@@ -316,7 +293,6 @@ define zeroext i1 @fcmp_ult(float %x, float %y) {
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setb %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp ult float %x, %y
   ret i1 %1
@@ -334,7 +310,6 @@ define zeroext i1 @fcmp_ule(float %x, float %y) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setbe %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ule:
@@ -342,7 +317,6 @@ define zeroext i1 @fcmp_ule(float %x, float %y) {
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setbe %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp ule float %x, %y
   ret i1 %1
@@ -360,21 +334,19 @@ define zeroext i1 @fcmp_une(float %x, float %y) {
 ; FAST_NOAVX-LABEL: fcmp_une:
 ; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
-; FAST_NOAVX-NEXT:    setne %al
-; FAST_NOAVX-NEXT:    setp %cl
-; FAST_NOAVX-NEXT:    orb %al, %cl
-; FAST_NOAVX-NEXT:    andb $1, %cl
-; FAST_NOAVX-NEXT:    movzbl %cl, %eax
+; FAST_NOAVX-NEXT:    setne %cl
+; FAST_NOAVX-NEXT:    setp %al
+; FAST_NOAVX-NEXT:    orb %cl, %al
+; FAST_NOAVX-NEXT:    andb $1, %al
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_une:
 ; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
-; FAST_AVX-NEXT:    setne %al
-; FAST_AVX-NEXT:    setp %cl
-; FAST_AVX-NEXT:    orb %al, %cl
-; FAST_AVX-NEXT:    andb $1, %cl
-; FAST_AVX-NEXT:    movzbl %cl, %eax
+; FAST_AVX-NEXT:    setne %cl
+; FAST_AVX-NEXT:    setp %al
+; FAST_AVX-NEXT:    orb %cl, %al
+; FAST_AVX-NEXT:    andb $1, %al
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp une float %x, %y
   ret i1 %1
@@ -392,7 +364,6 @@ define zeroext i1 @icmp_eq(i32 %x, i32 %y) {
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    sete %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %1 = icmp eq i32 %x, %y
   ret i1 %1
@@ -410,7 +381,6 @@ define zeroext i1 @icmp_ne(i32 %x, i32 %y) {
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    setne %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %1 = icmp ne i32 %x, %y
   ret i1 %1
@@ -428,7 +398,6 @@ define zeroext i1 @icmp_ugt(i32 %x, i32 %y) {
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    seta %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %1 = icmp ugt i32 %x, %y
   ret i1 %1
@@ -446,7 +415,6 @@ define zeroext i1 @icmp_uge(i32 %x, i32 %y) {
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    setae %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %1 = icmp uge i32 %x, %y
   ret i1 %1
@@ -464,7 +432,6 @@ define zeroext i1 @icmp_ult(i32 %x, i32 %y) {
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    setb %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %1 = icmp ult i32 %x, %y
   ret i1 %1
@@ -482,7 +449,6 @@ define zeroext i1 @icmp_ule(i32 %x, i32 %y) {
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    setbe %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %1 = icmp ule i32 %x, %y
   ret i1 %1
@@ -500,7 +466,6 @@ define zeroext i1 @icmp_sgt(i32 %x, i32 %y) {
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    setg %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %1 = icmp sgt i32 %x, %y
   ret i1 %1
@@ -518,7 +483,6 @@ define zeroext i1 @icmp_sge(i32 %x, i32 %y) {
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    setge %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %1 = icmp sge i32 %x, %y
   ret i1 %1
@@ -536,7 +500,6 @@ define zeroext i1 @icmp_slt(i32 %x, i32 %y) {
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    setl %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %1 = icmp slt i32 %x, %y
   ret i1 %1
@@ -554,7 +517,6 @@ define zeroext i1 @icmp_sle(i32 %x, i32 %y) {
 ; FAST-NEXT:    cmpl %esi, %edi
 ; FAST-NEXT:    setle %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %1 = icmp sle i32 %x, %y
   ret i1 %1
@@ -573,7 +535,6 @@ define zeroext i1 @fcmp_oeq2(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setnp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_oeq2:
@@ -581,7 +542,6 @@ define zeroext i1 @fcmp_oeq2(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setnp %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp oeq float %x, %x
   ret i1 %1
@@ -601,22 +561,20 @@ define zeroext i1 @fcmp_oeq3(float %x) {
 ; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    xorps %xmm1, %xmm1
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
-; FAST_NOAVX-NEXT:    sete %al
-; FAST_NOAVX-NEXT:    setnp %cl
-; FAST_NOAVX-NEXT:    andb %al, %cl
-; FAST_NOAVX-NEXT:    andb $1, %cl
-; FAST_NOAVX-NEXT:    movzbl %cl, %eax
+; FAST_NOAVX-NEXT:    sete %cl
+; FAST_NOAVX-NEXT:    setnp %al
+; FAST_NOAVX-NEXT:    andb %cl, %al
+; FAST_NOAVX-NEXT:    andb $1, %al
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_oeq3:
 ; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
-; FAST_AVX-NEXT:    sete %al
-; FAST_AVX-NEXT:    setnp %cl
-; FAST_AVX-NEXT:    andb %al, %cl
-; FAST_AVX-NEXT:    andb $1, %cl
-; FAST_AVX-NEXT:    movzbl %cl, %eax
+; FAST_AVX-NEXT:    sete %cl
+; FAST_AVX-NEXT:    setnp %al
+; FAST_AVX-NEXT:    andb %cl, %al
+; FAST_AVX-NEXT:    andb $1, %al
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp oeq float %x, 0.000000e+00
   ret i1 %1
@@ -632,7 +590,7 @@ define zeroext i1 @fcmp_ogt2(float %x) {
 ; FAST:       ## %bb.0:
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    ## kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
   %1 = fcmp ogt float %x, %x
   ret i1 %1
@@ -652,7 +610,6 @@ define zeroext i1 @fcmp_ogt3(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    seta %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ogt3:
@@ -661,7 +618,6 @@ define zeroext i1 @fcmp_ogt3(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    seta %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp ogt float %x, 0.000000e+00
   ret i1 %1
@@ -679,7 +635,6 @@ define zeroext i1 @fcmp_oge2(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setnp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_oge2:
@@ -687,7 +642,6 @@ define zeroext i1 @fcmp_oge2(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setnp %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp oge float %x, %x
   ret i1 %1
@@ -707,7 +661,6 @@ define zeroext i1 @fcmp_oge3(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setae %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_oge3:
@@ -716,7 +669,6 @@ define zeroext i1 @fcmp_oge3(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setae %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp oge float %x, 0.000000e+00
   ret i1 %1
@@ -732,7 +684,7 @@ define zeroext i1 @fcmp_olt2(float %x) {
 ; FAST:       ## %bb.0:
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    ## kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
   %1 = fcmp olt float %x, %x
   ret i1 %1
@@ -752,7 +704,6 @@ define zeroext i1 @fcmp_olt3(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm1
 ; FAST_NOAVX-NEXT:    seta %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_olt3:
@@ -761,7 +712,6 @@ define zeroext i1 @fcmp_olt3(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm1
 ; FAST_AVX-NEXT:    seta %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp olt float %x, 0.000000e+00
   ret i1 %1
@@ -779,7 +729,6 @@ define zeroext i1 @fcmp_ole2(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setnp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ole2:
@@ -787,7 +736,6 @@ define zeroext i1 @fcmp_ole2(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setnp %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp ole float %x, %x
   ret i1 %1
@@ -807,7 +755,6 @@ define zeroext i1 @fcmp_ole3(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm1
 ; FAST_NOAVX-NEXT:    setae %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ole3:
@@ -816,7 +763,6 @@ define zeroext i1 @fcmp_ole3(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm1
 ; FAST_AVX-NEXT:    setae %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp ole float %x, 0.000000e+00
   ret i1 %1
@@ -832,7 +778,7 @@ define zeroext i1 @fcmp_one2(float %x) {
 ; FAST:       ## %bb.0:
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    ## kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
   %1 = fcmp one float %x, %x
   ret i1 %1
@@ -852,7 +798,6 @@ define zeroext i1 @fcmp_one3(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setne %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_one3:
@@ -861,7 +806,6 @@ define zeroext i1 @fcmp_one3(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setne %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp one float %x, 0.000000e+00
   ret i1 %1
@@ -879,7 +823,6 @@ define zeroext i1 @fcmp_ord2(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setnp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ord2:
@@ -887,7 +830,6 @@ define zeroext i1 @fcmp_ord2(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setnp %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp ord float %x, %x
   ret i1 %1
@@ -905,7 +847,6 @@ define zeroext i1 @fcmp_ord3(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setnp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ord3:
@@ -913,7 +854,6 @@ define zeroext i1 @fcmp_ord3(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setnp %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp ord float %x, 0.000000e+00
   ret i1 %1
@@ -931,7 +871,6 @@ define zeroext i1 @fcmp_uno2(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_uno2:
@@ -939,7 +878,6 @@ define zeroext i1 @fcmp_uno2(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setp %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp uno float %x, %x
   ret i1 %1
@@ -957,7 +895,6 @@ define zeroext i1 @fcmp_uno3(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_uno3:
@@ -965,7 +902,6 @@ define zeroext i1 @fcmp_uno3(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setp %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp uno float %x, 0.000000e+00
   ret i1 %1
@@ -981,7 +917,6 @@ define zeroext i1 @fcmp_ueq2(float %x) {
 ; FAST:       ## %bb.0:
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %1 = fcmp ueq float %x, %x
   ret i1 %1
@@ -1001,7 +936,6 @@ define zeroext i1 @fcmp_ueq3(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    sete %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ueq3:
@@ -1010,7 +944,6 @@ define zeroext i1 @fcmp_ueq3(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    sete %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp ueq float %x, 0.000000e+00
   ret i1 %1
@@ -1028,7 +961,6 @@ define zeroext i1 @fcmp_ugt2(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ugt2:
@@ -1036,7 +968,6 @@ define zeroext i1 @fcmp_ugt2(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setp %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp ugt float %x, %x
   ret i1 %1
@@ -1056,7 +987,6 @@ define zeroext i1 @fcmp_ugt3(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm1
 ; FAST_NOAVX-NEXT:    setb %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ugt3:
@@ -1065,7 +995,6 @@ define zeroext i1 @fcmp_ugt3(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm1
 ; FAST_AVX-NEXT:    setb %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp ugt float %x, 0.000000e+00
   ret i1 %1
@@ -1081,7 +1010,6 @@ define zeroext i1 @fcmp_uge2(float %x) {
 ; FAST:       ## %bb.0:
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %1 = fcmp uge float %x, %x
   ret i1 %1
@@ -1101,7 +1029,6 @@ define zeroext i1 @fcmp_uge3(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm1
 ; FAST_NOAVX-NEXT:    setbe %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_uge3:
@@ -1110,7 +1037,6 @@ define zeroext i1 @fcmp_uge3(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm1
 ; FAST_AVX-NEXT:    setbe %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp uge float %x, 0.000000e+00
   ret i1 %1
@@ -1128,7 +1054,6 @@ define zeroext i1 @fcmp_ult2(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ult2:
@@ -1136,7 +1061,6 @@ define zeroext i1 @fcmp_ult2(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setp %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp ult float %x, %x
   ret i1 %1
@@ -1156,7 +1080,6 @@ define zeroext i1 @fcmp_ult3(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setb %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ult3:
@@ -1165,7 +1088,6 @@ define zeroext i1 @fcmp_ult3(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setb %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp ult float %x, 0.000000e+00
   ret i1 %1
@@ -1181,7 +1103,6 @@ define zeroext i1 @fcmp_ule2(float %x) {
 ; FAST:       ## %bb.0:
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %1 = fcmp ule float %x, %x
   ret i1 %1
@@ -1201,7 +1122,6 @@ define zeroext i1 @fcmp_ule3(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
 ; FAST_NOAVX-NEXT:    setbe %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_ule3:
@@ -1210,7 +1130,6 @@ define zeroext i1 @fcmp_ule3(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
 ; FAST_AVX-NEXT:    setbe %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp ule float %x, 0.000000e+00
   ret i1 %1
@@ -1228,7 +1147,6 @@ define zeroext i1 @fcmp_une2(float %x) {
 ; FAST_NOAVX-NEXT:    ucomiss %xmm0, %xmm0
 ; FAST_NOAVX-NEXT:    setp %al
 ; FAST_NOAVX-NEXT:    andb $1, %al
-; FAST_NOAVX-NEXT:    movzbl %al, %eax
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_une2:
@@ -1236,7 +1154,6 @@ define zeroext i1 @fcmp_une2(float %x) {
 ; FAST_AVX-NEXT:    vucomiss %xmm0, %xmm0
 ; FAST_AVX-NEXT:    setp %al
 ; FAST_AVX-NEXT:    andb $1, %al
-; FAST_AVX-NEXT:    movzbl %al, %eax
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp une float %x, %x
   ret i1 %1
@@ -1256,22 +1173,20 @@ define zeroext i1 @fcmp_une3(float %x) {
 ; FAST_NOAVX:       ## %bb.0:
 ; FAST_NOAVX-NEXT:    xorps %xmm1, %xmm1
 ; FAST_NOAVX-NEXT:    ucomiss %xmm1, %xmm0
-; FAST_NOAVX-NEXT:    setne %al
-; FAST_NOAVX-NEXT:    setp %cl
-; FAST_NOAVX-NEXT:    orb %al, %cl
-; FAST_NOAVX-NEXT:    andb $1, %cl
-; FAST_NOAVX-NEXT:    movzbl %cl, %eax
+; FAST_NOAVX-NEXT:    setne %cl
+; FAST_NOAVX-NEXT:    setp %al
+; FAST_NOAVX-NEXT:    orb %cl, %al
+; FAST_NOAVX-NEXT:    andb $1, %al
 ; FAST_NOAVX-NEXT:    retq
 ;
 ; FAST_AVX-LABEL: fcmp_une3:
 ; FAST_AVX:       ## %bb.0:
 ; FAST_AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; FAST_AVX-NEXT:    vucomiss %xmm1, %xmm0
-; FAST_AVX-NEXT:    setne %al
-; FAST_AVX-NEXT:    setp %cl
-; FAST_AVX-NEXT:    orb %al, %cl
-; FAST_AVX-NEXT:    andb $1, %cl
-; FAST_AVX-NEXT:    movzbl %cl, %eax
+; FAST_AVX-NEXT:    setne %cl
+; FAST_AVX-NEXT:    setp %al
+; FAST_AVX-NEXT:    orb %cl, %al
+; FAST_AVX-NEXT:    andb $1, %al
 ; FAST_AVX-NEXT:    retq
   %1 = fcmp une float %x, 0.000000e+00
   ret i1 %1
diff --git a/llvm/test/CodeGen/X86/fast-isel-ret-ext.ll b/llvm/test/CodeGen/X86/fast-isel-ret-ext.ll
index cd3439fcddc5f..0341694fe826b 100644
--- a/llvm/test/CodeGen/X86/fast-isel-ret-ext.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-ret-ext.ll
@@ -34,5 +34,5 @@ define zeroext i1 @test5(i32 %y) nounwind {
   ret i1 %conv
   ; CHECK-LABEL: test5:
   ; CHECK: andb $1
-  ; CHECK: movzbl {{.*}}, %eax
+  ; CHECK-NEXT: ret
 }
diff --git a/llvm/test/CodeGen/X86/keylocker-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/keylocker-intrinsics-fast-isel.ll
index ae046be9a5083..f91c4ad0e8e34 100644
--- a/llvm/test/CodeGen/X86/keylocker-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/keylocker-intrinsics-fast-isel.ll
@@ -119,7 +119,6 @@ entry:
 define zeroext i8 @test_mm_aesenc256kl_u8(ptr %odata, <2 x i64> %idata, ptr %h) {
 ; CHECK-LABEL: test_mm_aesenc256kl_u8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    aesenc256kl (%rsi), %xmm0
 ; CHECK-NEXT:    sete %al
 ; CHECK-NEXT:    movaps %xmm0, (%rdi)
@@ -127,7 +126,6 @@ define zeroext i8 @test_mm_aesenc256kl_u8(ptr %odata, <2 x i64> %idata, ptr %h)
 ;
 ; EGPR-LABEL: test_mm_aesenc256kl_u8:
 ; EGPR:       # %bb.0: # %entry
-; EGPR-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; EGPR-NEXT:    aesenc256kl (%rsi), %xmm0 # EVEX TO LEGACY Compression encoding: [0xf3,0x0f,0x38,0xde,0x06]
 ; EGPR-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
 ; EGPR-NEXT:    movaps %xmm0, (%rdi) # encoding: [0x0f,0x29,0x07]
@@ -143,7 +141,6 @@ entry:
 define zeroext i8 @test_mm_aesdec256kl_u8(ptr %odata, <2 x i64> %idata, ptr %h) {
 ; CHECK-LABEL: test_mm_aesdec256kl_u8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    aesdec256kl (%rsi), %xmm0
 ; CHECK-NEXT:    sete %al
 ; CHECK-NEXT:    movaps %xmm0, (%rdi)
@@ -151,7 +148,6 @@ define zeroext i8 @test_mm_aesdec256kl_u8(ptr %odata, <2 x i64> %idata, ptr %h)
 ;
 ; EGPR-LABEL: test_mm_aesdec256kl_u8:
 ; EGPR:       # %bb.0: # %entry
-; EGPR-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; EGPR-NEXT:    aesdec256kl (%rsi), %xmm0 # EVEX TO LEGACY Compression encoding: [0xf3,0x0f,0x38,0xdf,0x06]
 ; EGPR-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
 ; EGPR-NEXT:    movaps %xmm0, (%rdi) # encoding: [0x0f,0x29,0x07]
@@ -167,7 +163,6 @@ entry:
 define zeroext i8 @test_mm_aesenc128kl_u8(ptr %odata, <2 x i64> %idata, ptr %h) {
 ; CHECK-LABEL: test_mm_aesenc128kl_u8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    aesenc128kl (%rsi), %xmm0
 ; CHECK-NEXT:    sete %al
 ; CHECK-NEXT:    movaps %xmm0, (%rdi)
@@ -175,7 +170,6 @@ define zeroext i8 @test_mm_aesenc128kl_u8(ptr %odata, <2 x i64> %idata, ptr %h)
 ;
 ; EGPR-LABEL: test_mm_aesenc128kl_u8:
 ; EGPR:       # %bb.0: # %entry
-; EGPR-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; EGPR-NEXT:    aesenc128kl (%rsi), %xmm0 # EVEX TO LEGACY Compression encoding: [0xf3,0x0f,0x38,0xdc,0x06]
 ; EGPR-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
 ; EGPR-NEXT:    movaps %xmm0, (%rdi) # encoding: [0x0f,0x29,0x07]
@@ -191,7 +185,6 @@ entry:
 define zeroext i8 @test_mm_aesdec128kl_u8(ptr %odata, <2 x i64> %idata, ptr %h) {
 ; CHECK-LABEL: test_mm_aesdec128kl_u8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    aesdec128kl (%rsi), %xmm0
 ; CHECK-NEXT:    sete %al
 ; CHECK-NEXT:    movaps %xmm0, (%rdi)
@@ -199,7 +192,6 @@ define zeroext i8 @test_mm_aesdec128kl_u8(ptr %odata, <2 x i64> %idata, ptr %h)
 ;
 ; EGPR-LABEL: test_mm_aesdec128kl_u8:
 ; EGPR:       # %bb.0: # %entry
-; EGPR-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; EGPR-NEXT:    aesdec128kl (%rsi), %xmm0 # EVEX TO LEGACY Compression encoding: [0xf3,0x0f,0x38,0xdd,0x06]
 ; EGPR-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
 ; EGPR-NEXT:    movaps %xmm0, (%rdi) # encoding: [0x0f,0x29,0x07]
@@ -223,7 +215,6 @@ define zeroext i8 @test__mm_aesencwide128kl_u8(ptr %odata, ptr %idata, ptr %h) {
 ; CHECK-NEXT:    movaps 80(%rsi), %xmm5
 ; CHECK-NEXT:    movaps 96(%rsi), %xmm6
 ; CHECK-NEXT:    movaps 112(%rsi), %xmm7
-; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    aesencwide128kl (%rdx)
 ; CHECK-NEXT:    sete %al
 ; CHECK-NEXT:    movaps %xmm0, (%rdi)
@@ -246,7 +237,6 @@ define zeroext i8 @test__mm_aesencwide128kl_u8(ptr %odata, ptr %idata, ptr %h) {
 ; EGPR-NEXT:    movaps 80(%rsi), %xmm5 # encoding: [0x0f,0x28,0x6e,0x50]
 ; EGPR-NEXT:    movaps 96(%rsi), %xmm6 # encoding: [0x0f,0x28,0x76,0x60]
 ; EGPR-NEXT:    movaps 112(%rsi), %xmm7 # encoding: [0x0f,0x28,0x7e,0x70]
-; EGPR-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; EGPR-NEXT:    aesencwide128kl (%rdx) # EVEX TO LEGACY Compression encoding: [0xf3,0x0f,0x38,0xd8,0x02]
 ; EGPR-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
 ; EGPR-NEXT:    movaps %xmm0, (%rdi) # encoding: [0x0f,0x29,0x07]
@@ -313,7 +303,6 @@ define zeroext i8 @test__mm_aesdecwide128kl_u8(ptr %odata, ptr %idata, ptr %h) {
 ; CHECK-NEXT:    movaps 80(%rsi), %xmm5
 ; CHECK-NEXT:    movaps 96(%rsi), %xmm6
 ; CHECK-NEXT:    movaps 112(%rsi), %xmm7
-; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    aesdecwide128kl (%rdx)
 ; CHECK-NEXT:    sete %al
 ; CHECK-NEXT:    movaps %xmm0, (%rdi)
@@ -336,7 +325,6 @@ define zeroext i8 @test__mm_aesdecwide128kl_u8(ptr %odata, ptr %idata, ptr %h) {
 ; EGPR-NEXT:    movaps 80(%rsi), %xmm5 # encoding: [0x0f,0x28,0x6e,0x50]
 ; EGPR-NEXT:    movaps 96(%rsi), %xmm6 # encoding: [0x0f,0x28,0x76,0x60]
 ; EGPR-NEXT:    movaps 112(%rsi), %xmm7 # encoding: [0x0f,0x28,0x7e,0x70]
-; EGPR-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; EGPR-NEXT:    aesdecwide128kl (%rdx) # EVEX TO LEGACY Compression encoding: [0xf3,0x0f,0x38,0xd8,0x0a]
 ; EGPR-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
 ; EGPR-NEXT:    movaps %xmm0, (%rdi) # encoding: [0x0f,0x29,0x07]
@@ -403,7 +391,6 @@ define zeroext i8 @test__mm_aesencwide256kl_u8(ptr %odata, ptr %idata, ptr %h) {
 ; CHECK-NEXT:    movaps 80(%rsi), %xmm5
 ; CHECK-NEXT:    movaps 96(%rsi), %xmm6
 ; CHECK-NEXT:    movaps 112(%rsi), %xmm7
-; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    aesencwide256kl (%rdx)
 ; CHECK-NEXT:    sete %al
 ; CHECK-NEXT:    movaps %xmm0, (%rdi)
@@ -426,7 +413,6 @@ define zeroext i8 @test__mm_aesencwide256kl_u8(ptr %odata, ptr %idata, ptr %h) {
 ; EGPR-NEXT:    movaps 80(%rsi), %xmm5 # encoding: [0x0f,0x28,0x6e,0x50]
 ; EGPR-NEXT:    movaps 96(%rsi), %xmm6 # encoding: [0x0f,0x28,0x76,0x60]
 ; EGPR-NEXT:    movaps 112(%rsi), %xmm7 # encoding: [0x0f,0x28,0x7e,0x70]
-; EGPR-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; EGPR-NEXT:    aesencwide256kl (%rdx) # EVEX TO LEGACY Compression encoding: [0xf3,0x0f,0x38,0xd8,0x12]
 ; EGPR-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
 ; EGPR-NEXT:    movaps %xmm0, (%rdi) # encoding: [0x0f,0x29,0x07]
@@ -493,7 +479,6 @@ define zeroext i8 @test__mm_aesdecwide256kl_u8(ptr %odata, ptr %idata, ptr %h) {
 ; CHECK-NEXT:    movaps 80(%rsi), %xmm5
 ; CHECK-NEXT:    movaps 96(%rsi), %xmm6
 ; CHECK-NEXT:    movaps 112(%rsi), %xmm7
-; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    aesdecwide256kl (%rdx)
 ; CHECK-NEXT:    sete %al
 ; CHECK-NEXT:    movaps %xmm0, (%rdi)
@@ -516,7 +501,6 @@ define zeroext i8 @test__mm_aesdecwide256kl_u8(ptr %odata, ptr %idata, ptr %h) {
 ; EGPR-NEXT:    movaps 80(%rsi), %xmm5 # encoding: [0x0f,0x28,0x6e,0x50]
 ; EGPR-NEXT:    movaps 96(%rsi), %xmm6 # encoding: [0x0f,0x28,0x76,0x60]
 ; EGPR-NEXT:    movaps 112(%rsi), %xmm7 # encoding: [0x0f,0x28,0x7e,0x70]
-; EGPR-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; EGPR-NEXT:    aesdecwide256kl (%rdx) # EVEX TO LEGACY Compression encoding: [0xf3,0x0f,0x38,0xd8,0x1a]
 ; EGPR-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
 ; EGPR-NEXT:    movaps %xmm0, (%rdi) # encoding: [0x0f,0x29,0x07]
diff --git a/llvm/test/CodeGen/X86/xaluo.ll b/llvm/test/CodeGen/X86/xaluo.ll
index 5796e485f6314..c2a8002c949ce 100644
--- a/llvm/test/CodeGen/X86/xaluo.ll
+++ b/llvm/test/CodeGen/X86/xaluo.ll
@@ -21,7 +21,6 @@ define zeroext i1 @saddoi8(i8 signext %v1, i8 signext %v2, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movb %dil, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v1, i8 %v2)
   %val = extractvalue {i8, i1} %t, 0
@@ -44,7 +43,6 @@ define zeroext i1 @saddoi16(i16 %v1, i16 %v2, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movw %di, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %v1, i16 %v2)
   %val = extractvalue {i16, i1} %t, 0
@@ -67,7 +65,6 @@ define zeroext i1 @saddoi32(i32 %v1, i32 %v2, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movl %edi, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
@@ -90,7 +87,6 @@ define zeroext i1 @saddoi64(i64 %v1, i64 %v2, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movq %rdi, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
@@ -114,7 +110,6 @@ define zeroext i1 @saddoinci8(i8 %v1, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movb %dil, (%rsi)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v1, i8 1)
   %val = extractvalue {i8, i1} %t, 0
@@ -137,7 +132,6 @@ define zeroext i1 @saddoinci16(i16 %v1, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movw %di, (%rsi)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %v1, i16 1)
   %val = extractvalue {i16, i1} %t, 0
@@ -160,7 +154,6 @@ define zeroext i1 @saddoinci32(i32 %v1, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movl %edi, (%rsi)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 1)
   %val = extractvalue {i32, i1} %t, 0
@@ -183,7 +176,6 @@ define zeroext i1 @saddoinci64(i64 %v1, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movq %rdi, (%rsi)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 1)
   %val = extractvalue {i64, i1} %t, 0
@@ -207,7 +199,6 @@ define zeroext i1 @saddoi64imm1(i64 %v1, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movq %rdi, (%rsi)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 2, i64 %v1)
   %val = extractvalue {i64, i1} %t, 0
@@ -231,7 +222,6 @@ define zeroext i1 @saddoi64imm2(i64 %v1, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movq %rdi, (%rsi)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -2147483648)
   %val = extractvalue {i64, i1} %t, 0
@@ -251,12 +241,11 @@ define zeroext i1 @saddoi64imm3(i64 %v1, ptr %res) {
 ;
 ; FAST-LABEL: saddoi64imm3:
 ; FAST:       ## %bb.0:
-; FAST-NEXT:    movabsq $-21474836489, %rax ## imm = 0xFFFFFFFAFFFFFFF7
-; FAST-NEXT:    addq %rdi, %rax
-; FAST-NEXT:    seto %cl
-; FAST-NEXT:    movq %rax, (%rsi)
-; FAST-NEXT:    andb $1, %cl
-; FAST-NEXT:    movzbl %cl, %eax
+; FAST-NEXT:    movabsq $-21474836489, %rcx ## imm = 0xFFFFFFFAFFFFFFF7
+; FAST-NEXT:    addq %rdi, %rcx
+; FAST-NEXT:    seto %al
+; FAST-NEXT:    movq %rcx, (%rsi)
+; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    retq
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -21474836489)
   %val = extractvalue {i64, i1} %t, 0
@@ -279,7 +268,6 @@ define zeroext i1 @saddoi64imm4(i64 %v1, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movq %rdi, (%rsi)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 2147483647)
   %val = extractvalue {i64, i1} %t, 0
@@ -298,12 +286,11 @@ define zeroext i1 @saddoi64imm5(i64 %v1, ptr %res) {
 ;
 ; FAST-LABEL: saddoi64imm5:
 ; FAST:       ## %bb.0:
-; FAST-NEXT:    movl $2147483648, %eax ## imm = 0x80000000
-; FAST-NEXT:    addq %rdi, %rax
-; FAST-NEXT:    seto %cl
-; FAST-NEXT:    movq %rax, (%rsi)
-; FAST-NEXT:    andb $1, %cl
-; FAST-NEXT:    movzbl %cl, %eax
+; FAST-NEXT:    movl $2147483648, %ecx ## imm = 0x80000000
+; FAST-NEXT:    addq %rdi, %rcx
+; FAST-NEXT:    seto %al
+; FAST-NEXT:    movq %rcx, (%rsi)
+; FAST-NEXT:    andb $1, %al
 ; FAST-NEXT:    retq
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 2147483648)
   %val = extractvalue {i64, i1} %t, 0
@@ -327,7 +314,6 @@ define zeroext i1 @uaddoi32(i32 %v1, i32 %v2, ptr %res) {
 ; FAST-NEXT:    setb %al
 ; FAST-NEXT:    movl %edi, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
@@ -350,7 +336,6 @@ define zeroext i1 @uaddoi64(i64 %v1, i64 %v2, ptr %res) {
 ; FAST-NEXT:    setb %al
 ; FAST-NEXT:    movq %rdi, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
@@ -374,7 +359,6 @@ define zeroext i1 @uaddoinci8(i8 %v1, ptr %res) {
 ; FAST-NEXT:    setb %al
 ; FAST-NEXT:    movb %dil, (%rsi)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %v1, i8 1)
   %val = extractvalue {i8, i1} %t, 0
@@ -397,7 +381,6 @@ define zeroext i1 @uaddoinci16(i16 %v1, ptr %res) {
 ; FAST-NEXT:    setb %al
 ; FAST-NEXT:    movw %di, (%rsi)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i16, i1} @llvm.uadd.with.overflow.i16(i16 %v1, i16 1)
   %val = extractvalue {i16, i1} %t, 0
@@ -420,7 +403,6 @@ define zeroext i1 @uaddoinci32(i32 %v1, ptr %res) {
 ; FAST-NEXT:    setb %al
 ; FAST-NEXT:    movl %edi, (%rsi)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 1)
   %val = extractvalue {i32, i1} %t, 0
@@ -443,7 +425,6 @@ define zeroext i1 @uaddoinci64(i64 %v1, ptr %res) {
 ; FAST-NEXT:    setb %al
 ; FAST-NEXT:    movq %rdi, (%rsi)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 1)
   %val = extractvalue {i64, i1} %t, 0
@@ -467,7 +448,6 @@ define zeroext i1 @ssuboi32(i32 %v1, i32 %v2, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movl %edi, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
@@ -490,7 +470,6 @@ define zeroext i1 @ssuboi64(i64 %v1, i64 %v2, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movq %rdi, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
@@ -514,7 +493,6 @@ define zeroext i1 @usuboi32(i32 %v1, i32 %v2, ptr %res) {
 ; FAST-NEXT:    setb %al
 ; FAST-NEXT:    movl %edi, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
@@ -537,7 +515,6 @@ define zeroext i1 @usuboi64(i64 %v1, i64 %v2, ptr %res) {
 ; FAST-NEXT:    setb %al
 ; FAST-NEXT:    movq %rdi, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
   %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
@@ -679,12 +656,11 @@ define zeroext i1 @saddobri32(i32 %v1, i32 %v2) {
 ; FAST-NEXT:  ## %bb.2: ## %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ; FAST-NEXT:  LBB31_1: ## %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    ## kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
   %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
@@ -717,12 +693,11 @@ define zeroext i1 @saddobri64(i64 %v1, i64 %v2) {
 ; FAST-NEXT:  ## %bb.2: ## %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ; FAST-NEXT:  LBB32_1: ## %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    ## kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
@@ -755,12 +730,11 @@ define zeroext i1 @uaddobri32(i32 %v1, i32 %v2) {
 ; FAST-NEXT:  ## %bb.2: ## %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ; FAST-NEXT:  LBB33_1: ## %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    ## kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
@@ -793,12 +767,11 @@ define zeroext i1 @uaddobri64(i64 %v1, i64 %v2) {
 ; FAST-NEXT:  ## %bb.2: ## %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ; FAST-NEXT:  LBB34_1: ## %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    ## kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
   %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
@@ -831,12 +804,11 @@ define zeroext i1 @ssubobri32(i32 %v1, i32 %v2) {
 ; FAST-NEXT:  ## %bb.2: ## %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ; FAST-NEXT:  LBB35_1: ## %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    ## kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
   %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
@@ -869,12 +841,11 @@ define zeroext i1 @ssubobri64(i64 %v1, i64 %v2) {
 ; FAST-NEXT:  ## %bb.2: ## %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ; FAST-NEXT:  LBB36_1: ## %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    ## kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
   %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
@@ -907,12 +878,11 @@ define zeroext i1 @usubobri32(i32 %v1, i32 %v2) {
 ; FAST-NEXT:  ## %bb.2: ## %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ; FAST-NEXT:  LBB37_1: ## %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    ## kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
   %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
@@ -945,12 +915,11 @@ define zeroext i1 @usubobri64(i64 %v1, i64 %v2) {
 ; FAST-NEXT:  ## %bb.2: ## %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ; FAST-NEXT:  LBB38_1: ## %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    ## kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
   %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
diff --git a/llvm/test/CodeGen/X86/xmulo.ll b/llvm/test/CodeGen/X86/xmulo.ll
index 6eb34b4e773e8..2169b39b9dfa0 100644
--- a/llvm/test/CodeGen/X86/xmulo.ll
+++ b/llvm/test/CodeGen/X86/xmulo.ll
@@ -76,7 +76,7 @@ define zeroext i1 @smuloi8(i8 %v1, i8 %v2, ptr %res) {
 ; FAST-NEXT:    seto %cl
 ; FAST-NEXT:    movb %al, (%rdx)
 ; FAST-NEXT:    andb $1, %cl
-; FAST-NEXT:    movzbl %cl, %eax
+; FAST-NEXT:    movl %ecx, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: smuloi8:
@@ -118,7 +118,6 @@ define zeroext i1 @smuloi16(i16 %v1, i16 %v2, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movw %di, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: smuloi16:
@@ -157,7 +156,6 @@ define zeroext i1 @smuloi32(i32 %v1, i32 %v2, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movl %edi, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: smuloi32:
@@ -196,7 +194,6 @@ define zeroext i1 @smuloi64(i64 %v1, i64 %v2, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movq %rdi, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: smuloi64:
@@ -306,7 +303,7 @@ define zeroext i1 @umuloi8(i8 %v1, i8 %v2, ptr %res) {
 ; FAST-NEXT:    seto %cl
 ; FAST-NEXT:    movb %al, (%rdx)
 ; FAST-NEXT:    andb $1, %cl
-; FAST-NEXT:    movzbl %cl, %eax
+; FAST-NEXT:    movl %ecx, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: umuloi8:
@@ -355,7 +352,7 @@ define zeroext i1 @umuloi16(i16 %v1, i16 %v2, ptr %res) {
 ; FAST-NEXT:    seto %dl
 ; FAST-NEXT:    movw %ax, (%rcx)
 ; FAST-NEXT:    andb $1, %dl
-; FAST-NEXT:    movzbl %dl, %eax
+; FAST-NEXT:    movl %edx, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: umuloi16:
@@ -404,7 +401,7 @@ define zeroext i1 @umuloi32(i32 %v1, i32 %v2, ptr %res) {
 ; FAST-NEXT:    seto %dl
 ; FAST-NEXT:    movl %eax, (%rcx)
 ; FAST-NEXT:    andb $1, %dl
-; FAST-NEXT:    movzbl %dl, %eax
+; FAST-NEXT:    movl %edx, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: umuloi32:
@@ -453,7 +450,7 @@ define zeroext i1 @umuloi64(i64 %v1, i64 %v2, ptr %res) {
 ; FAST-NEXT:    seto %dl
 ; FAST-NEXT:    movq %rax, (%rcx)
 ; FAST-NEXT:    andb $1, %dl
-; FAST-NEXT:    movzbl %dl, %eax
+; FAST-NEXT:    movl %edx, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: umuloi64:
@@ -779,12 +776,11 @@ define zeroext i1 @smulobri8(i8 %v1, i8 %v2) {
 ; FAST-NEXT:  # %bb.2: # %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ; FAST-NEXT:  .LBB15_1: # %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    # kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: smulobri8:
@@ -843,12 +839,11 @@ define zeroext i1 @smulobri16(i16 %v1, i16 %v2) {
 ; FAST-NEXT:  # %bb.2: # %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ; FAST-NEXT:  .LBB16_1: # %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    # kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: smulobri16:
@@ -904,12 +899,11 @@ define zeroext i1 @smulobri32(i32 %v1, i32 %v2) {
 ; FAST-NEXT:  # %bb.2: # %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ; FAST-NEXT:  .LBB17_1: # %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    # kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: smulobri32:
@@ -965,12 +959,11 @@ define zeroext i1 @smulobri64(i64 %v1, i64 %v2) {
 ; FAST-NEXT:  # %bb.2: # %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ; FAST-NEXT:  .LBB18_1: # %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    # kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: smulobri64:
@@ -1094,12 +1087,11 @@ define zeroext i1 @umulobri8(i8 %v1, i8 %v2) {
 ; FAST-NEXT:  # %bb.2: # %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ; FAST-NEXT:  .LBB19_1: # %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    # kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: umulobri8:
@@ -1162,12 +1154,11 @@ define zeroext i1 @umulobri16(i16 %v1, i16 %v2) {
 ; FAST-NEXT:  # %bb.2: # %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ; FAST-NEXT:  .LBB20_1: # %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    # kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: umulobri16:
@@ -1226,12 +1217,11 @@ define zeroext i1 @umulobri32(i32 %v1, i32 %v2) {
 ; FAST-NEXT:  # %bb.2: # %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ; FAST-NEXT:  .LBB21_1: # %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    # kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: umulobri32:
@@ -1290,12 +1280,11 @@ define zeroext i1 @umulobri64(i64 %v1, i64 %v2) {
 ; FAST-NEXT:  # %bb.2: # %continue
 ; FAST-NEXT:    movb $1, %al
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ; FAST-NEXT:  .LBB22_1: # %overflow
 ; FAST-NEXT:    xorl %eax, %eax
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
+; FAST-NEXT:    # kill: def $al killed $al killed $eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: umulobri64:
@@ -1422,7 +1411,7 @@ define zeroext i1 @smuloi8_load(ptr %ptr1, i8 %v2, ptr %res) {
 ; FAST-NEXT:    seto %cl
 ; FAST-NEXT:    movb %al, (%rdx)
 ; FAST-NEXT:    andb $1, %cl
-; FAST-NEXT:    movzbl %cl, %eax
+; FAST-NEXT:    movl %ecx, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: smuloi8_load:
@@ -1471,7 +1460,7 @@ define zeroext i1 @smuloi8_load2(i8 %v1, ptr %ptr2, ptr %res) {
 ; FAST-NEXT:    seto %cl
 ; FAST-NEXT:    movb %al, (%rdx)
 ; FAST-NEXT:    andb $1, %cl
-; FAST-NEXT:    movzbl %cl, %eax
+; FAST-NEXT:    movl %ecx, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: smuloi8_load2:
@@ -1515,7 +1504,6 @@ define zeroext i1 @smuloi16_load(ptr %ptr1, i16 %v2, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movw %si, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: smuloi16_load:
@@ -1556,7 +1544,6 @@ define zeroext i1 @smuloi16_load2(i16 %v1, ptr %ptr2, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movw %di, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: smuloi16_load2:
@@ -1597,7 +1584,6 @@ define zeroext i1 @smuloi32_load(ptr %ptr1, i32 %v2, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movl %esi, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: smuloi32_load:
@@ -1638,7 +1624,6 @@ define zeroext i1 @smuloi32_load2(i32 %v1, ptr %ptr2, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movl %edi, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: smuloi32_load2:
@@ -1679,7 +1664,6 @@ define zeroext i1 @smuloi64_load(ptr %ptr1, i64 %v2, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movq %rsi, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: smuloi64_load:
@@ -1789,7 +1773,6 @@ define zeroext i1 @smuloi64_load2(i64 %v1, ptr %ptr2, ptr %res) {
 ; FAST-NEXT:    seto %al
 ; FAST-NEXT:    movq %rdi, (%rdx)
 ; FAST-NEXT:    andb $1, %al
-; FAST-NEXT:    movzbl %al, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: smuloi64_load2:
@@ -1898,7 +1881,7 @@ define zeroext i1 @umuloi8_load(ptr %ptr1, i8 %v2, ptr %res) {
 ; FAST-NEXT:    seto %cl
 ; FAST-NEXT:    movb %al, (%rdx)
 ; FAST-NEXT:    andb $1, %cl
-; FAST-NEXT:    movzbl %cl, %eax
+; FAST-NEXT:    movl %ecx, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: umuloi8_load:
@@ -1947,7 +1930,7 @@ define zeroext i1 @umuloi8_load2(i8 %v1, ptr %ptr2, ptr %res) {
 ; FAST-NEXT:    seto %cl
 ; FAST-NEXT:    movb %al, (%rdx)
 ; FAST-NEXT:    andb $1, %cl
-; FAST-NEXT:    movzbl %cl, %eax
+; FAST-NEXT:    movl %ecx, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: umuloi8_load2:
@@ -1997,7 +1980,7 @@ define zeroext i1 @umuloi16_load(ptr %ptr1, i16 %v2, ptr %res) {
 ; FAST-NEXT:    seto %dl
 ; FAST-NEXT:    movw %ax, (%rcx)
 ; FAST-NEXT:    andb $1, %dl
-; FAST-NEXT:    movzbl %dl, %eax
+; FAST-NEXT:    movl %edx, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: umuloi16_load:
@@ -2050,7 +2033,7 @@ define zeroext i1 @umuloi16_load2(i16 %v1, ptr %ptr2, ptr %res) {
 ; FAST-NEXT:    seto %dl
 ; FAST-NEXT:    movw %ax, (%rcx)
 ; FAST-NEXT:    andb $1, %dl
-; FAST-NEXT:    movzbl %dl, %eax
+; FAST-NEXT:    movl %edx, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: umuloi16_load2:
@@ -2101,7 +2084,7 @@ define zeroext i1 @umuloi32_load(ptr %ptr1, i32 %v2, ptr %res) {
 ; FAST-NEXT:    seto %dl
 ; FAST-NEXT:    movl %eax, (%rcx)
 ; FAST-NEXT:    andb $1, %dl
-; FAST-NEXT:    movzbl %dl, %eax
+; FAST-NEXT:    movl %edx, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: umuloi32_load:
@@ -2152,7 +2135,7 @@ define zeroext i1 @umuloi32_load2(i32 %v1, ptr %ptr2, ptr %res) {
 ; FAST-NEXT:    seto %dl
 ; FAST-NEXT:    movl %eax, (%rcx)
 ; FAST-NEXT:    andb $1, %dl
-; FAST-NEXT:    movzbl %dl, %eax
+; FAST-NEXT:    movl %edx, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: umuloi32_load2:
@@ -2203,7 +2186,7 @@ define zeroext i1 @umuloi64_load(ptr %ptr1, i64 %v2, ptr %res) {
 ; FAST-NEXT:    seto %dl
 ; FAST-NEXT:    movq %rax, (%rcx)
 ; FAST-NEXT:    andb $1, %dl
-; FAST-NEXT:    movzbl %dl, %eax
+; FAST-NEXT:    movl %edx, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: umuloi64_load:
@@ -2280,7 +2263,7 @@ define zeroext i1 @umuloi64_load2(i64 %v1, ptr %ptr2, ptr %res) {
 ; FAST-NEXT:    seto %dl
 ; FAST-NEXT:    movq %rax, (%rcx)
 ; FAST-NEXT:    andb $1, %dl
-; FAST-NEXT:    movzbl %dl, %eax
+; FAST-NEXT:    movl %edx, %eax
 ; FAST-NEXT:    retq
 ;
 ; WIN64-LABEL: umuloi64_load2:
diff --git a/llvm/test/DebugInfo/X86/convert-debugloc.ll b/llvm/test/DebugInfo/X86/convert-debugloc.ll
index de0857d538327..ad3f48c05de99 100644
--- a/llvm/test/DebugInfo/X86/convert-debugloc.ll
+++ b/llvm/test/DebugInfo/X86/convert-debugloc.ll
@@ -27,7 +27,7 @@
 ; RUN:   | FileCheck %s --check-prefix=VERBOSE --check-prefix=CONV "--implicit-check-not={{DW_TAG|NULL}}"
 
 
-; SPLITCONV: Compile Unit:{{.*}} DWO_id = 0x24191746f389535f
+; SPLITCONV: Compile Unit:{{.*}} DWO_id = 0x06580df9fdd5b54b
 ; SPLIT: DW_TAG_skeleton_unit
 
 ; CONV: DW_TAG_compile_unit



More information about the llvm-commits mailing list