[llvm] [X86] Handle BSF/BSF "zero-input pass through" behaviour (PR #123623)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 20 09:41:05 PST 2025


https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/123623

>From fe9b29b14ab64b394636acbcc5ff5b1ce6db09e3 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Wed, 21 Aug 2024 16:10:40 +0100
Subject: [PATCH] [X86] Handle BSF/BSF "zero-input fall through" behaviour

Intel docs have been updated to be similar to AMD and now describe BSF/BSF as not changing the destination register if the input value was zero, which allows us to support CTTZ/CTLZ zero-input cases by setting the destination to support a NumBits result (BSR is a bit messy as it has to XOR'd to create a CTLZ result). VIA/Zhaoxin x86_64 CPUs have also been confirmed to match this behaviour.

There are still some limits to this - its only supported for x86_64 capable processors (and I've only enabled it for x86_64 codegen), and there are some Intel CPUs that don't correctly zero the upper 32-bits of a pass through register when used for BSR32/BSF32 with a zero source value (i.e. the whole 64bits may get p[assed through).
---
 llvm/lib/Target/X86/X86ISelLowering.cpp       |  58 +++++---
 llvm/lib/Target/X86/X86InstrCompiler.td       |   8 -
 llvm/lib/Target/X86/X86InstrFragments.td      |   4 +-
 llvm/lib/Target/X86/X86InstrMisc.td           |  50 +++----
 llvm/lib/Target/X86/X86Subtarget.h            |   5 +
 llvm/test/CodeGen/X86/bit_ceil.ll             |  12 +-
 llvm/test/CodeGen/X86/combine-or.ll           |   6 +-
 llvm/test/CodeGen/X86/ctlo.ll                 |  14 +-
 llvm/test/CodeGen/X86/ctlz.ll                 |  31 ++--
 llvm/test/CodeGen/X86/cttz.ll                 |  22 +--
 llvm/test/CodeGen/X86/dagcombine-select.ll    |  46 +++---
 llvm/test/CodeGen/X86/pr40090.ll              |   1 +
 llvm/test/CodeGen/X86/pr92569.ll              |   9 +-
 .../CodeGen/X86/scheduler-backtracking.ll     | 140 ++++++++----------
 llvm/test/TableGen/x86-fold-tables.inc        |  12 +-
 .../X86/BtVer2/clear-super-register-1.s       |   6 +-
 16 files changed, 210 insertions(+), 214 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 33ddcb57e9b08b..1a1782160f8fea 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -434,11 +434,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
 
   if (!Subtarget.hasBMI()) {
     setOperationAction(ISD::CTTZ           , MVT::i32  , Custom);
-    setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32  , Legal);
+    setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32  , Custom);
     if (Subtarget.is64Bit()) {
       setOperationPromotedToType(ISD::CTTZ , MVT::i32, MVT::i64);
       setOperationAction(ISD::CTTZ         , MVT::i64  , Custom);
-      setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
+      setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom);
     }
   }
 
@@ -3386,15 +3386,18 @@ bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT,
 }
 
 bool X86TargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
-  // Speculate cttz only if we can directly use TZCNT or can promote to i32/i64.
+  // Speculate cttz only if we can directly use TZCNT/CMOV, can promote to
+  // i32/i64 or can rely on BSF passthrough value.
   return Subtarget.hasBMI() || Subtarget.canUseCMOV() ||
-         (!Ty->isVectorTy() &&
-          Ty->getScalarSizeInBits() < (Subtarget.is64Bit() ? 64u : 32u));
+         Subtarget.hasBitScanPassThrough() ||
+         (!Ty->isVectorTy() && Ty->getScalarSizeInBits() < 32u);
 }
 
 bool X86TargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
-  // Speculate ctlz only if we can directly use LZCNT.
-  return Subtarget.hasLZCNT() || Subtarget.canUseCMOV();
+  // Speculate ctlz only if we can directly use LZCNT/CMOV, or can rely on BSR
+  // passthrough value.
+  return Subtarget.hasLZCNT() || Subtarget.canUseCMOV() ||
+         Subtarget.hasBitScanPassThrough();
 }
 
 bool X86TargetLowering::ShouldShrinkFPConstant(EVT VT) const {
@@ -28694,11 +28697,18 @@ static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
     Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
   }
 
+  // Check if we can safely pass a result though BSR for zero sources.
+  SDValue PassThru = DAG.getUNDEF(OpVT);
+  if (Opc == ISD::CTLZ && Subtarget.hasBitScanPassThrough() &&
+      !DAG.isKnownNeverZero(Op))
+    PassThru = DAG.getConstant(NumBits + NumBits - 1, dl, OpVT);
+
   // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
   SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
-  Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
+  Op = DAG.getNode(X86ISD::BSR, dl, VTs, PassThru, Op);
 
-  if (Opc == ISD::CTLZ) {
+  // Skip CMOV if we're using a pass through value.
+  if (Opc == ISD::CTLZ && PassThru.isUndef()) {
     // If src is zero (i.e. bsr sets ZF), returns NumBits.
     SDValue Ops[] = {Op, DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
                      DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
@@ -28721,16 +28731,22 @@ static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
   unsigned NumBits = VT.getScalarSizeInBits();
   SDValue N0 = Op.getOperand(0);
   SDLoc dl(Op);
+  unsigned Opc = Op.getOpcode();
+  bool NonZeroSrc = DAG.isKnownNeverZero(N0);
+
+  assert(!VT.isVector() && "Only scalar CTTZ requires custom lowering");
 
-  assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
-         "Only scalar CTTZ requires custom lowering");
+  // Check if we can safely pass a result though BSF for zero sources.
+  SDValue PassThru = DAG.getUNDEF(VT);
+  if (Opc == ISD::CTTZ && !NonZeroSrc && Subtarget.hasBitScanPassThrough())
+    PassThru = DAG.getConstant(NumBits, dl, VT);
 
   // Issue a bsf (scan bits forward) which also sets EFLAGS.
   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
-  Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
+  Op = DAG.getNode(X86ISD::BSF, dl, VTs, PassThru, N0);
 
-  // If src is known never zero we can skip the CMOV.
-  if (DAG.isKnownNeverZero(N0))
+  // Skip CMOV if src is never zero or we're using a pass through value.
+  if (Opc != ISD::CTTZ || NonZeroSrc || !PassThru.isUndef())
     return Op;
 
   // If src is zero (i.e. bsf sets ZF), returns NumBits.
@@ -38194,12 +38210,18 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
     Known = KnownBits::mul(Known, Known2);
     break;
   }
-  case X86ISD::BSR:
-    // BSR(0) is undef, but any use of BSR already accounts for non-zero inputs.
-    // Similar KnownBits behaviour to CTLZ_ZERO_UNDEF.
+  case X86ISD::BSR: {
     // TODO: Bound with input known bits?
     Known.Zero.setBitsFrom(Log2_32(BitWidth));
+
+    if (!Op.getOperand(0).isUndef() &&
+        !DAG.isKnownNeverZero(Op.getOperand(1), Depth + 1)) {
+      KnownBits Known2;
+      Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+      Known = Known.intersectWith(Known2);
+    }
     break;
+  }
   case X86ISD::SETCC:
     Known.Zero.setBitsFrom(1);
     break;
@@ -54244,7 +54266,7 @@ static SDValue combineXorSubCTLZ(SDNode *N, const SDLoc &DL, SelectionDAG &DAG,
   }
 
   SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
-  Op = DAG.getNode(X86ISD::BSR, DL, VTs, Op);
+  Op = DAG.getNode(X86ISD::BSR, DL, VTs, DAG.getUNDEF(OpVT), Op);
   if (VT == MVT::i8)
     Op = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Op);
 
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index 7d4c5c0e10e492..fa69595094de8a 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -2212,14 +2212,6 @@ def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
 def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
           (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
 
-// Bit scan instruction patterns to match explicit zero-undef behavior.
-def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
-def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
-def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
-def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
-def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
-def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;
-
 // When HasMOVBE is enabled it is possible to get a non-legalized
 // register-register 16 bit bswap. This maps it to a ROL instruction.
 let Predicates = [HasMOVBE] in {
diff --git a/llvm/lib/Target/X86/X86InstrFragments.td b/llvm/lib/Target/X86/X86InstrFragments.td
index ea7af893ce103f..a3029301c6699a 100644
--- a/llvm/lib/Target/X86/X86InstrFragments.td
+++ b/llvm/lib/Target/X86/X86InstrFragments.td
@@ -134,8 +134,8 @@ def SDTX86Cmpccxadd : SDTypeProfile<1, 4, [SDTCisSameAs<0, 2>,
 def X86MFence : SDNode<"X86ISD::MFENCE", SDTNone, [SDNPHasChain]>;
 
 
-def X86bsf     : SDNode<"X86ISD::BSF",      SDTUnaryArithWithFlags>;
-def X86bsr     : SDNode<"X86ISD::BSR",      SDTUnaryArithWithFlags>;
+def X86bsf     : SDNode<"X86ISD::BSF",      SDTBinaryArithWithFlags>;
+def X86bsr     : SDNode<"X86ISD::BSR",      SDTBinaryArithWithFlags>;
 def X86fshl    : SDNode<"X86ISD::FSHL",     SDTIntShiftDOp>;
 def X86fshr    : SDNode<"X86ISD::FSHR",     SDTIntShiftDOp>;
 
diff --git a/llvm/lib/Target/X86/X86InstrMisc.td b/llvm/lib/Target/X86/X86InstrMisc.td
index 43c02c4f85844c..290d91bb2ce699 100644
--- a/llvm/lib/Target/X86/X86InstrMisc.td
+++ b/llvm/lib/Target/X86/X86InstrMisc.td
@@ -247,55 +247,55 @@ def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
 } // Constraints = "$src = $dst", SchedRW
 
 // Bit scan instructions.
-let Defs = [EFLAGS] in {
-def BSF16rr  : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
+let Defs = [EFLAGS], Constraints = "$fallback = $dst" in {
+def BSF16rr  : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$fallback, GR16:$src),
                  "bsf{w}\t{$src, $dst|$dst, $src}",
-                 [(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))]>,
+                 [(set GR16:$dst, EFLAGS, (X86bsf GR16:$fallback, GR16:$src))]>,
                   TB, OpSize16, Sched<[WriteBSF]>;
-def BSF16rm  : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
+def BSF16rm  : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins GR16:$fallback, i16mem:$src),
                  "bsf{w}\t{$src, $dst|$dst, $src}",
-                 [(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))]>,
+                 [(set GR16:$dst, EFLAGS, (X86bsf GR16:$fallback, (loadi16 addr:$src)))]>,
                  TB, OpSize16, Sched<[WriteBSFLd]>;
-def BSF32rr  : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
+def BSF32rr  : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$fallback, GR32:$src),
                  "bsf{l}\t{$src, $dst|$dst, $src}",
-                 [(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))]>,
+                 [(set GR32:$dst, EFLAGS, (X86bsf GR32:$fallback, GR32:$src))]>,
                  TB, OpSize32, Sched<[WriteBSF]>;
-def BSF32rm  : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
+def BSF32rm  : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins GR32:$fallback, i32mem:$src),
                  "bsf{l}\t{$src, $dst|$dst, $src}",
-                 [(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))]>,
+                 [(set GR32:$dst, EFLAGS, (X86bsf GR32:$fallback, (loadi32 addr:$src)))]>,
                  TB, OpSize32, Sched<[WriteBSFLd]>;
-def BSF64rr  : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
+def BSF64rr  : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$fallback, GR64:$src),
                   "bsf{q}\t{$src, $dst|$dst, $src}",
-                  [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>,
+                  [(set GR64:$dst, EFLAGS, (X86bsf GR64:$fallback, GR64:$src))]>,
                   TB, Sched<[WriteBSF]>;
-def BSF64rm  : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
+def BSF64rm  : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins GR64:$fallback, i64mem:$src),
                   "bsf{q}\t{$src, $dst|$dst, $src}",
-                  [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>,
+                  [(set GR64:$dst, EFLAGS, (X86bsf GR64:$fallback, (loadi64 addr:$src)))]>,
                   TB, Sched<[WriteBSFLd]>;
 
-def BSR16rr  : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
+def BSR16rr  : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$fallback, GR16:$src),
                  "bsr{w}\t{$src, $dst|$dst, $src}",
-                 [(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))]>,
+                 [(set GR16:$dst, EFLAGS, (X86bsr GR16:$fallback, GR16:$src))]>,
                  TB, OpSize16, Sched<[WriteBSR]>;
-def BSR16rm  : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
+def BSR16rm  : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins GR16:$fallback, i16mem:$src),
                  "bsr{w}\t{$src, $dst|$dst, $src}",
-                 [(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))]>,
+                 [(set GR16:$dst, EFLAGS, (X86bsr GR16:$fallback, (loadi16 addr:$src)))]>,
                  TB, OpSize16, Sched<[WriteBSRLd]>;
-def BSR32rr  : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
+def BSR32rr  : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$fallback, GR32:$src),
                  "bsr{l}\t{$src, $dst|$dst, $src}",
-                 [(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))]>,
+                 [(set GR32:$dst, EFLAGS, (X86bsr GR32:$fallback, GR32:$src))]>,
                  TB, OpSize32, Sched<[WriteBSR]>;
-def BSR32rm  : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
+def BSR32rm  : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins GR32:$fallback, i32mem:$src),
                  "bsr{l}\t{$src, $dst|$dst, $src}",
-                 [(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))]>,
+                 [(set GR32:$dst, EFLAGS, (X86bsr GR32:$fallback, (loadi32 addr:$src)))]>,
                  TB, OpSize32, Sched<[WriteBSRLd]>;
-def BSR64rr  : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
+def BSR64rr  : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$fallback, GR64:$src),
                   "bsr{q}\t{$src, $dst|$dst, $src}",
-                  [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>,
+                  [(set GR64:$dst, EFLAGS, (X86bsr GR64:$fallback, GR64:$src))]>,
                   TB, Sched<[WriteBSR]>;
-def BSR64rm  : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
+def BSR64rm  : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins GR64:$fallback, i64mem:$src),
                   "bsr{q}\t{$src, $dst|$dst, $src}",
-                  [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>,
+                  [(set GR64:$dst, EFLAGS, (X86bsr GR64:$fallback, (loadi64 addr:$src)))]>,
                   TB, Sched<[WriteBSRLd]>;
 } // Defs = [EFLAGS]
 
diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/llvm/lib/Target/X86/X86Subtarget.h
index e3cb9ee8ce1909..c399989f115d75 100644
--- a/llvm/lib/Target/X86/X86Subtarget.h
+++ b/llvm/lib/Target/X86/X86Subtarget.h
@@ -263,6 +263,11 @@ class X86Subtarget final : public X86GenSubtargetInfo {
     return hasBWI() && useAVX512Regs();
   }
 
+  // Returns true if the destination register of a BSF/BSR instruction is
+  // not touched if the source register is zero.
+  // NOTE: i32->i64 implicit zext isn't guaranteed by BSR/BSF pass through.
+  bool hasBitScanPassThrough() const { return is64Bit(); }
+
   bool isXRaySupported() const override { return is64Bit(); }
 
   /// Use clflush if we have SSE2 or we're on x86-64 (even if we asked for
diff --git a/llvm/test/CodeGen/X86/bit_ceil.ll b/llvm/test/CodeGen/X86/bit_ceil.ll
index 823453087f6180..1f21fcac8341d5 100644
--- a/llvm/test/CodeGen/X86/bit_ceil.ll
+++ b/llvm/test/CodeGen/X86/bit_ceil.ll
@@ -10,9 +10,8 @@ define i32 @bit_ceil_i32(i32 %x) {
 ; NOBMI:       # %bb.0:
 ; NOBMI-NEXT:    # kill: def $edi killed $edi def $rdi
 ; NOBMI-NEXT:    leal -1(%rdi), %eax
-; NOBMI-NEXT:    bsrl %eax, %eax
 ; NOBMI-NEXT:    movl $63, %ecx
-; NOBMI-NEXT:    cmovnel %eax, %ecx
+; NOBMI-NEXT:    bsrl %eax, %ecx
 ; NOBMI-NEXT:    xorl $31, %ecx
 ; NOBMI-NEXT:    negb %cl
 ; NOBMI-NEXT:    movl $1, %edx
@@ -47,9 +46,8 @@ define i32 @bit_ceil_i32(i32 %x) {
 define i32 @bit_ceil_i32_plus1(i32 noundef %x) {
 ; NOBMI-LABEL: bit_ceil_i32_plus1:
 ; NOBMI:       # %bb.0: # %entry
-; NOBMI-NEXT:    bsrl %edi, %eax
 ; NOBMI-NEXT:    movl $63, %ecx
-; NOBMI-NEXT:    cmovnel %eax, %ecx
+; NOBMI-NEXT:    bsrl %edi, %ecx
 ; NOBMI-NEXT:    xorl $31, %ecx
 ; NOBMI-NEXT:    negb %cl
 ; NOBMI-NEXT:    movl $1, %edx
@@ -86,9 +84,8 @@ define i64 @bit_ceil_i64(i64 %x) {
 ; NOBMI-LABEL: bit_ceil_i64:
 ; NOBMI:       # %bb.0:
 ; NOBMI-NEXT:    leaq -1(%rdi), %rax
-; NOBMI-NEXT:    bsrq %rax, %rax
 ; NOBMI-NEXT:    movl $127, %ecx
-; NOBMI-NEXT:    cmovneq %rax, %rcx
+; NOBMI-NEXT:    bsrq %rax, %rcx
 ; NOBMI-NEXT:    xorl $63, %ecx
 ; NOBMI-NEXT:    negb %cl
 ; NOBMI-NEXT:    movl $1, %edx
@@ -122,9 +119,8 @@ define i64 @bit_ceil_i64(i64 %x) {
 define i64 @bit_ceil_i64_plus1(i64 noundef %x) {
 ; NOBMI-LABEL: bit_ceil_i64_plus1:
 ; NOBMI:       # %bb.0: # %entry
-; NOBMI-NEXT:    bsrq %rdi, %rax
 ; NOBMI-NEXT:    movl $127, %ecx
-; NOBMI-NEXT:    cmovneq %rax, %rcx
+; NOBMI-NEXT:    bsrq %rdi, %rcx
 ; NOBMI-NEXT:    xorl $63, %ecx
 ; NOBMI-NEXT:    negb %cl
 ; NOBMI-NEXT:    movl $1, %edx
diff --git a/llvm/test/CodeGen/X86/combine-or.ll b/llvm/test/CodeGen/X86/combine-or.ll
index d9c6d7053be746..08262e4d34b269 100644
--- a/llvm/test/CodeGen/X86/combine-or.ll
+++ b/llvm/test/CodeGen/X86/combine-or.ll
@@ -227,9 +227,8 @@ define i64 @PR89533(<64 x i8> %a0) {
 ; SSE-NEXT:    orl %eax, %edx
 ; SSE-NEXT:    shlq $32, %rdx
 ; SSE-NEXT:    orq %rcx, %rdx
-; SSE-NEXT:    bsfq %rdx, %rcx
 ; SSE-NEXT:    movl $64, %eax
-; SSE-NEXT:    cmovneq %rcx, %rax
+; SSE-NEXT:    rep bsfq %rdx, %rax
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: PR89533:
@@ -255,9 +254,8 @@ define i64 @PR89533(<64 x i8> %a0) {
 ; AVX1-NEXT:    orl %eax, %edx
 ; AVX1-NEXT:    shlq $32, %rdx
 ; AVX1-NEXT:    orq %rcx, %rdx
-; AVX1-NEXT:    bsfq %rdx, %rcx
 ; AVX1-NEXT:    movl $64, %eax
-; AVX1-NEXT:    cmovneq %rcx, %rax
+; AVX1-NEXT:    rep bsfq %rdx, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/ctlo.ll b/llvm/test/CodeGen/X86/ctlo.ll
index 2f4fef82f1f17a..fecb62fbc5aea6 100644
--- a/llvm/test/CodeGen/X86/ctlo.ll
+++ b/llvm/test/CodeGen/X86/ctlo.ll
@@ -44,10 +44,9 @@ define i8 @ctlo_i8(i8 %x) {
 ; X64-LABEL: ctlo_i8:
 ; X64:       # %bb.0:
 ; X64-NEXT:    notb %dil
-; X64-NEXT:    movzbl %dil, %eax
-; X64-NEXT:    bsrl %eax, %ecx
+; X64-NEXT:    movzbl %dil, %ecx
 ; X64-NEXT:    movl $15, %eax
-; X64-NEXT:    cmovnel %ecx, %eax
+; X64-NEXT:    bsrl %ecx, %eax
 ; X64-NEXT:    xorl $7, %eax
 ; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
@@ -146,9 +145,8 @@ define i16 @ctlo_i16(i16 %x) {
 ; X64-LABEL: ctlo_i16:
 ; X64:       # %bb.0:
 ; X64-NEXT:    notl %edi
-; X64-NEXT:    bsrw %di, %cx
 ; X64-NEXT:    movw $31, %ax
-; X64-NEXT:    cmovnew %cx, %ax
+; X64-NEXT:    bsrw %di, %ax
 ; X64-NEXT:    xorl $15, %eax
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
@@ -232,9 +230,8 @@ define i32 @ctlo_i32(i32 %x) {
 ; X64-LABEL: ctlo_i32:
 ; X64:       # %bb.0:
 ; X64-NEXT:    notl %edi
-; X64-NEXT:    bsrl %edi, %ecx
 ; X64-NEXT:    movl $63, %eax
-; X64-NEXT:    cmovnel %ecx, %eax
+; X64-NEXT:    bsrl %edi, %eax
 ; X64-NEXT:    xorl $31, %eax
 ; X64-NEXT:    retq
 ;
@@ -335,9 +332,8 @@ define i64 @ctlo_i64(i64 %x) {
 ; X64-LABEL: ctlo_i64:
 ; X64:       # %bb.0:
 ; X64-NEXT:    notq %rdi
-; X64-NEXT:    bsrq %rdi, %rcx
 ; X64-NEXT:    movl $127, %eax
-; X64-NEXT:    cmovneq %rcx, %rax
+; X64-NEXT:    bsrq %rdi, %rax
 ; X64-NEXT:    xorq $63, %rax
 ; X64-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/ctlz.ll b/llvm/test/CodeGen/X86/ctlz.ll
index 68defaff78d37d..0eabfeae853f79 100644
--- a/llvm/test/CodeGen/X86/ctlz.ll
+++ b/llvm/test/CodeGen/X86/ctlz.ll
@@ -246,10 +246,9 @@ define i8 @ctlz_i8_zero_test(i8 %n) {
 ;
 ; X64-LABEL: ctlz_i8_zero_test:
 ; X64:       # %bb.0:
-; X64-NEXT:    movzbl %dil, %eax
-; X64-NEXT:    bsrl %eax, %ecx
+; X64-NEXT:    movzbl %dil, %ecx
 ; X64-NEXT:    movl $15, %eax
-; X64-NEXT:    cmovnel %ecx, %eax
+; X64-NEXT:    bsrl %ecx, %eax
 ; X64-NEXT:    xorl $7, %eax
 ; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
@@ -317,9 +316,8 @@ define i16 @ctlz_i16_zero_test(i16 %n) {
 ;
 ; X64-LABEL: ctlz_i16_zero_test:
 ; X64:       # %bb.0:
-; X64-NEXT:    bsrw %di, %cx
 ; X64-NEXT:    movw $31, %ax
-; X64-NEXT:    cmovnew %cx, %ax
+; X64-NEXT:    bsrw %di, %ax
 ; X64-NEXT:    xorl $15, %eax
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
@@ -372,9 +370,8 @@ define i32 @ctlz_i32_zero_test(i32 %n) {
 ;
 ; X64-LABEL: ctlz_i32_zero_test:
 ; X64:       # %bb.0:
-; X64-NEXT:    bsrl %edi, %ecx
 ; X64-NEXT:    movl $63, %eax
-; X64-NEXT:    cmovnel %ecx, %eax
+; X64-NEXT:    bsrl %edi, %eax
 ; X64-NEXT:    xorl $31, %eax
 ; X64-NEXT:    retq
 ;
@@ -442,9 +439,8 @@ define i64 @ctlz_i64_zero_test(i64 %n) {
 ;
 ; X64-LABEL: ctlz_i64_zero_test:
 ; X64:       # %bb.0:
-; X64-NEXT:    bsrq %rdi, %rcx
 ; X64-NEXT:    movl $127, %eax
-; X64-NEXT:    cmovneq %rcx, %rax
+; X64-NEXT:    bsrq %rdi, %rax
 ; X64-NEXT:    xorq $63, %rax
 ; X64-NEXT:    retq
 ;
@@ -613,9 +609,8 @@ define i32 @ctlz_bsr_zero_test(i32 %n) {
 ;
 ; X64-LABEL: ctlz_bsr_zero_test:
 ; X64:       # %bb.0:
-; X64-NEXT:    bsrl %edi, %ecx
 ; X64-NEXT:    movl $63, %eax
-; X64-NEXT:    cmovnel %ecx, %eax
+; X64-NEXT:    bsrl %edi, %eax
 ; X64-NEXT:    retq
 ;
 ; X86-CLZ-LABEL: ctlz_bsr_zero_test:
@@ -983,10 +978,9 @@ define i8 @ctlz_xor7_i8_false(i8 %x) {
 ;
 ; X64-LABEL: ctlz_xor7_i8_false:
 ; X64:       # %bb.0:
-; X64-NEXT:    movzbl %dil, %eax
-; X64-NEXT:    bsrl %eax, %ecx
+; X64-NEXT:    movzbl %dil, %ecx
 ; X64-NEXT:    movl $15, %eax
-; X64-NEXT:    cmovnel %ecx, %eax
+; X64-NEXT:    bsrl %ecx, %eax
 ; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
 ;
@@ -1094,9 +1088,8 @@ define i32 @ctlz_xor31_i32_false(i32 %x) {
 ;
 ; X64-LABEL: ctlz_xor31_i32_false:
 ; X64:       # %bb.0:
-; X64-NEXT:    bsrl %edi, %ecx
 ; X64-NEXT:    movl $63, %eax
-; X64-NEXT:    cmovnel %ecx, %eax
+; X64-NEXT:    bsrl %edi, %eax
 ; X64-NEXT:    retq
 ;
 ; X86-CLZ-LABEL: ctlz_xor31_i32_false:
@@ -1239,9 +1232,8 @@ define i64 @ctlz_i32_sext(i32 %x) {
 ;
 ; X64-LABEL: ctlz_i32_sext:
 ; X64:       # %bb.0:
-; X64-NEXT:    bsrl %edi, %ecx
 ; X64-NEXT:    movl $63, %eax
-; X64-NEXT:    cmovnel %ecx, %eax
+; X64-NEXT:    bsrl %edi, %eax
 ; X64-NEXT:    retq
 ;
 ; X86-CLZ-LABEL: ctlz_i32_sext:
@@ -1302,9 +1294,8 @@ define i64 @ctlz_i32_zext(i32 %x) {
 ;
 ; X64-LABEL: ctlz_i32_zext:
 ; X64:       # %bb.0:
-; X64-NEXT:    bsrl %edi, %ecx
 ; X64-NEXT:    movl $63, %eax
-; X64-NEXT:    cmovnel %ecx, %eax
+; X64-NEXT:    bsrl %edi, %eax
 ; X64-NEXT:    retq
 ;
 ; X86-CLZ-LABEL: ctlz_i32_zext:
diff --git a/llvm/test/CodeGen/X86/cttz.ll b/llvm/test/CodeGen/X86/cttz.ll
index 30e5cccfb21982..e4a605a4cf7821 100644
--- a/llvm/test/CodeGen/X86/cttz.ll
+++ b/llvm/test/CodeGen/X86/cttz.ll
@@ -379,23 +379,22 @@ define i64 @cttz_i64_zero_test(i64 %n) {
 ;
 ; X86-CMOV-LABEL: cttz_i64_zero_test:
 ; X86-CMOV:       # %bb.0:
-; X86-CMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-CMOV-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-CMOV-NOT:     rep
-; X86-CMOV-NEXT:    bsfl {{[0-9]+}}(%esp), %ecx
+; X86-CMOV-NEXT:    bsfl {{[0-9]+}}(%esp), %eax
 ; X86-CMOV-NEXT:    movl $32, %edx
-; X86-CMOV-NEXT:    cmovnel %ecx, %edx
+; X86-CMOV-NEXT:    cmovnel %eax, %edx
 ; X86-CMOV-NEXT:    addl $32, %edx
-; X86-CMOV-NOT:     rep
-; X86-CMOV-NEXT:    bsfl %eax, %eax
+; X86-CMOV-NEXT:    rep bsfl %ecx, %eax
+; X86-CMOV-NEXT:    testl %ecx, %ecx
 ; X86-CMOV-NEXT:    cmovel %edx, %eax
 ; X86-CMOV-NEXT:    xorl %edx, %edx
 ; X86-CMOV-NEXT:    retl
 ;
 ; X64-LABEL: cttz_i64_zero_test:
 ; X64:       # %bb.0:
-; X64-NEXT:    bsfq %rdi, %rcx
 ; X64-NEXT:    movl $64, %eax
-; X64-NEXT:    cmovneq %rcx, %rax
+; X64-NEXT:    rep bsfq %rdi, %rax
 ; X64-NEXT:    retq
 ;
 ; X86-CLZ-LABEL: cttz_i64_zero_test:
@@ -670,11 +669,12 @@ define i64 @cttz_i32_sext(i32 %x) {
 ; X86-NOCMOV-NEXT:    je .LBB12_1
 ; X86-NOCMOV-NEXT:  # %bb.2: # %cond.false
 ; X86-NOCMOV-NEXT:    rep bsfl %eax, %eax
-; X86-NOCMOV-NEXT:    xorl %edx, %edx
-; X86-NOCMOV-NEXT:    retl
+; X86-NOCMOV-NEXT:    jmp .LBB12_3
 ; X86-NOCMOV-NEXT:  .LBB12_1:
 ; X86-NOCMOV-NEXT:    movl $32, %eax
-; X86-NOCMOV-NEXT:    xorl %edx, %edx
+; X86-NOCMOV-NEXT:  .LBB12_3: # %cond.end
+; X86-NOCMOV-NEXT:    movl %eax, %edx
+; X86-NOCMOV-NEXT:    sarl $31, %edx
 ; X86-NOCMOV-NEXT:    retl
 ;
 ; X86-CMOV-LABEL: cttz_i32_sext:
@@ -691,6 +691,7 @@ define i64 @cttz_i32_sext(i32 %x) {
 ; X64-NEXT:    movabsq $4294967296, %rax # imm = 0x100000000
 ; X64-NEXT:    orq %rdi, %rax
 ; X64-NEXT:    rep bsfq %rax, %rax
+; X64-NEXT:    movl %eax, %eax
 ; X64-NEXT:    retq
 ;
 ; X86-CLZ-LABEL: cttz_i32_sext:
@@ -748,6 +749,7 @@ define i64 @cttz_i32_zext(i32 %x) {
 ; X64-NEXT:    movabsq $4294967296, %rax # imm = 0x100000000
 ; X64-NEXT:    orq %rdi, %rax
 ; X64-NEXT:    rep bsfq %rax, %rax
+; X64-NEXT:    movl %eax, %eax
 ; X64-NEXT:    retq
 ;
 ; X86-CLZ-LABEL: cttz_i32_zext:
diff --git a/llvm/test/CodeGen/X86/dagcombine-select.ll b/llvm/test/CodeGen/X86/dagcombine-select.ll
index 1380c02663ee0e..309276ed69f189 100644
--- a/llvm/test/CodeGen/X86/dagcombine-select.ll
+++ b/llvm/test/CodeGen/X86/dagcombine-select.ll
@@ -325,10 +325,11 @@ declare i64 @llvm.cttz.i64(i64, i1)
 define i64 @cttz_64_eq_select(i64 %v) nounwind {
 ; NOBMI-LABEL: cttz_64_eq_select:
 ; NOBMI:       # %bb.0:
-; NOBMI-NEXT:    bsfq %rdi, %rcx
-; NOBMI-NEXT:    movq $-1, %rax
+; NOBMI-NEXT:    rep bsfq %rdi, %rcx
+; NOBMI-NEXT:    addq $6, %rcx
+; NOBMI-NEXT:    testq %rdi, %rdi
+; NOBMI-NEXT:    movl $5, %eax
 ; NOBMI-NEXT:    cmovneq %rcx, %rax
-; NOBMI-NEXT:    addq $6, %rax
 ; NOBMI-NEXT:    retq
 ;
 ; BMI-LABEL: cttz_64_eq_select:
@@ -349,10 +350,11 @@ define i64 @cttz_64_eq_select(i64 %v) nounwind {
 define i64 @cttz_64_ne_select(i64 %v) nounwind {
 ; NOBMI-LABEL: cttz_64_ne_select:
 ; NOBMI:       # %bb.0:
-; NOBMI-NEXT:    bsfq %rdi, %rcx
-; NOBMI-NEXT:    movq $-1, %rax
+; NOBMI-NEXT:    rep bsfq %rdi, %rcx
+; NOBMI-NEXT:    addq $6, %rcx
+; NOBMI-NEXT:    testq %rdi, %rdi
+; NOBMI-NEXT:    movl $5, %eax
 ; NOBMI-NEXT:    cmovneq %rcx, %rax
-; NOBMI-NEXT:    addq $6, %rax
 ; NOBMI-NEXT:    retq
 ;
 ; BMI-LABEL: cttz_64_ne_select:
@@ -374,10 +376,11 @@ declare i32 @llvm.cttz.i32(i32, i1)
 define i32 @cttz_32_eq_select(i32 %v) nounwind {
 ; NOBMI-LABEL: cttz_32_eq_select:
 ; NOBMI:       # %bb.0:
-; NOBMI-NEXT:    bsfl %edi, %ecx
-; NOBMI-NEXT:    movl $-1, %eax
+; NOBMI-NEXT:    rep bsfl %edi, %ecx
+; NOBMI-NEXT:    addl $6, %ecx
+; NOBMI-NEXT:    testl %edi, %edi
+; NOBMI-NEXT:    movl $5, %eax
 ; NOBMI-NEXT:    cmovnel %ecx, %eax
-; NOBMI-NEXT:    addl $6, %eax
 ; NOBMI-NEXT:    retq
 ;
 ; BMI-LABEL: cttz_32_eq_select:
@@ -398,10 +401,11 @@ define i32 @cttz_32_eq_select(i32 %v) nounwind {
 define i32 @cttz_32_ne_select(i32 %v) nounwind {
 ; NOBMI-LABEL: cttz_32_ne_select:
 ; NOBMI:       # %bb.0:
-; NOBMI-NEXT:    bsfl %edi, %ecx
-; NOBMI-NEXT:    movl $-1, %eax
+; NOBMI-NEXT:    rep bsfl %edi, %ecx
+; NOBMI-NEXT:    addl $6, %ecx
+; NOBMI-NEXT:    testl %edi, %edi
+; NOBMI-NEXT:    movl $5, %eax
 ; NOBMI-NEXT:    cmovnel %ecx, %eax
-; NOBMI-NEXT:    addl $6, %eax
 ; NOBMI-NEXT:    retq
 ;
 ; BMI-LABEL: cttz_32_ne_select:
@@ -423,10 +427,10 @@ define i32 @cttz_32_ne_select(i32 %v) nounwind {
 define i32 @cttz_32_eq_select_ffs(i32 %v) nounwind {
 ; NOBMI-LABEL: cttz_32_eq_select_ffs:
 ; NOBMI:       # %bb.0:
-; NOBMI-NEXT:    bsfl %edi, %ecx
-; NOBMI-NEXT:    movl $-1, %eax
-; NOBMI-NEXT:    cmovnel %ecx, %eax
+; NOBMI-NEXT:    rep bsfl %edi, %eax
 ; NOBMI-NEXT:    incl %eax
+; NOBMI-NEXT:    testl %edi, %edi
+; NOBMI-NEXT:    cmovel %edi, %eax
 ; NOBMI-NEXT:    retq
 ;
 ; BMI-LABEL: cttz_32_eq_select_ffs:
@@ -447,10 +451,10 @@ define i32 @cttz_32_eq_select_ffs(i32 %v) nounwind {
 define i32 @cttz_32_ne_select_ffs(i32 %v) nounwind {
 ; NOBMI-LABEL: cttz_32_ne_select_ffs:
 ; NOBMI:       # %bb.0:
-; NOBMI-NEXT:    bsfl %edi, %ecx
-; NOBMI-NEXT:    movl $-1, %eax
-; NOBMI-NEXT:    cmovnel %ecx, %eax
+; NOBMI-NEXT:    rep bsfl %edi, %eax
 ; NOBMI-NEXT:    incl %eax
+; NOBMI-NEXT:    testl %edi, %edi
+; NOBMI-NEXT:    cmovel %edi, %eax
 ; NOBMI-NEXT:    retq
 ;
 ; BMI-LABEL: cttz_32_ne_select_ffs:
@@ -472,7 +476,8 @@ define i32 @cttz_32_ne_select_ffs(i32 %v) nounwind {
 define i32 @cttz_32_eq_select_ffs_m1(i32 %v) nounwind {
 ; NOBMI-LABEL: cttz_32_eq_select_ffs_m1:
 ; NOBMI:       # %bb.0:
-; NOBMI-NEXT:    bsfl %edi, %ecx
+; NOBMI-NEXT:    rep bsfl %edi, %ecx
+; NOBMI-NEXT:    testl %edi, %edi
 ; NOBMI-NEXT:    movl $-1, %eax
 ; NOBMI-NEXT:    cmovnel %ecx, %eax
 ; NOBMI-NEXT:    retq
@@ -493,7 +498,8 @@ define i32 @cttz_32_eq_select_ffs_m1(i32 %v) nounwind {
 define i32 @cttz_32_ne_select_ffs_m1(i32 %v) nounwind {
 ; NOBMI-LABEL: cttz_32_ne_select_ffs_m1:
 ; NOBMI:       # %bb.0:
-; NOBMI-NEXT:    bsfl %edi, %ecx
+; NOBMI-NEXT:    rep bsfl %edi, %ecx
+; NOBMI-NEXT:    testl %edi, %edi
 ; NOBMI-NEXT:    movl $-1, %eax
 ; NOBMI-NEXT:    cmovnel %ecx, %eax
 ; NOBMI-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/pr40090.ll b/llvm/test/CodeGen/X86/pr40090.ll
index f0aaf09e359dd3..fa0b8392b4921d 100644
--- a/llvm/test/CodeGen/X86/pr40090.ll
+++ b/llvm/test/CodeGen/X86/pr40090.ll
@@ -7,6 +7,7 @@ define i64 @foo(i64 %x, i64 %y) {
 ; CHECK-NEXT:    bsrq %rdi, %rax
 ; CHECK-NEXT:    orq $64, %rax
 ; CHECK-NEXT:    bsrq %rsi, %rcx
+; CHECK-NEXT:    testq %rsi, %rsi
 ; CHECK-NEXT:    cmoveq %rax, %rcx
 ; CHECK-NEXT:    movl $63, %eax
 ; CHECK-NEXT:    subq %rcx, %rax
diff --git a/llvm/test/CodeGen/X86/pr92569.ll b/llvm/test/CodeGen/X86/pr92569.ll
index 0fb4ed7905287c..f13e0313206d29 100644
--- a/llvm/test/CodeGen/X86/pr92569.ll
+++ b/llvm/test/CodeGen/X86/pr92569.ll
@@ -4,12 +4,11 @@
 define void @PR92569(i64 %arg, <8 x i8> %arg1) {
 ; CHECK-LABEL: PR92569:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    bsfq %rdi, %rax
-; CHECK-NEXT:    movl $64, %ecx
-; CHECK-NEXT:    cmovneq %rax, %rcx
-; CHECK-NEXT:    shrb $3, %cl
+; CHECK-NEXT:    movl $64, %eax
+; CHECK-NEXT:    rep bsfq %rdi, %rax
+; CHECK-NEXT:    shrb $3, %al
 ; CHECK-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT:    movzbl %cl, %eax
+; CHECK-NEXT:    movzbl %al, %eax
 ; CHECK-NEXT:    andl $15, %eax
 ; CHECK-NEXT:    movzbl -24(%rsp,%rax), %eax
 ; CHECK-NEXT:    movl %eax, 0
diff --git a/llvm/test/CodeGen/X86/scheduler-backtracking.ll b/llvm/test/CodeGen/X86/scheduler-backtracking.ll
index 6be79edbe51e10..426587a84ce179 100644
--- a/llvm/test/CodeGen/X86/scheduler-backtracking.ll
+++ b/llvm/test/CodeGen/X86/scheduler-backtracking.ll
@@ -234,16 +234,15 @@ define i256 @test2(i256 %a) nounwind {
 ; ILP-NEXT:    xorq $63, %rdx
 ; ILP-NEXT:    andq %rsi, %r11
 ; ILP-NEXT:    movl $127, %esi
-; ILP-NEXT:    bsrq %r11, %r8
-; ILP-NEXT:    cmoveq %rsi, %r8
-; ILP-NEXT:    xorq $63, %r8
-; ILP-NEXT:    addq $64, %r8
+; ILP-NEXT:    bsrq %r11, %rsi
+; ILP-NEXT:    xorq $63, %rsi
+; ILP-NEXT:    addq $64, %rsi
 ; ILP-NEXT:    testq %r10, %r10
-; ILP-NEXT:    cmovneq %rdx, %r8
-; ILP-NEXT:    subq $-128, %r8
+; ILP-NEXT:    cmovneq %rdx, %rsi
+; ILP-NEXT:    subq $-128, %rsi
 ; ILP-NEXT:    orq %rdi, %r9
-; ILP-NEXT:    cmovneq %rcx, %r8
-; ILP-NEXT:    movq %r8, (%rax)
+; ILP-NEXT:    cmovneq %rcx, %rsi
+; ILP-NEXT:    movq %rsi, (%rax)
 ; ILP-NEXT:    movq $0, 8(%rax)
 ; ILP-NEXT:    retq
 ;
@@ -274,16 +273,15 @@ define i256 @test2(i256 %a) nounwind {
 ; HYBRID-NEXT:    xorq $63, %rdx
 ; HYBRID-NEXT:    andq %rsi, %r11
 ; HYBRID-NEXT:    movl $127, %esi
-; HYBRID-NEXT:    bsrq %r11, %r8
-; HYBRID-NEXT:    cmoveq %rsi, %r8
-; HYBRID-NEXT:    xorq $63, %r8
-; HYBRID-NEXT:    addq $64, %r8
+; HYBRID-NEXT:    bsrq %r11, %rsi
+; HYBRID-NEXT:    xorq $63, %rsi
+; HYBRID-NEXT:    addq $64, %rsi
 ; HYBRID-NEXT:    testq %r10, %r10
-; HYBRID-NEXT:    cmovneq %rdx, %r8
-; HYBRID-NEXT:    subq $-128, %r8
+; HYBRID-NEXT:    cmovneq %rdx, %rsi
+; HYBRID-NEXT:    subq $-128, %rsi
 ; HYBRID-NEXT:    orq %rdi, %r9
-; HYBRID-NEXT:    cmovneq %rcx, %r8
-; HYBRID-NEXT:    movq %r8, (%rax)
+; HYBRID-NEXT:    cmovneq %rcx, %rsi
+; HYBRID-NEXT:    movq %rsi, (%rax)
 ; HYBRID-NEXT:    movq $0, 8(%rax)
 ; HYBRID-NEXT:    retq
 ;
@@ -314,16 +312,15 @@ define i256 @test2(i256 %a) nounwind {
 ; BURR-NEXT:    xorq $63, %rdx
 ; BURR-NEXT:    andq %rsi, %r11
 ; BURR-NEXT:    movl $127, %esi
-; BURR-NEXT:    bsrq %r11, %r8
-; BURR-NEXT:    cmoveq %rsi, %r8
-; BURR-NEXT:    xorq $63, %r8
-; BURR-NEXT:    addq $64, %r8
+; BURR-NEXT:    bsrq %r11, %rsi
+; BURR-NEXT:    xorq $63, %rsi
+; BURR-NEXT:    addq $64, %rsi
 ; BURR-NEXT:    testq %r10, %r10
-; BURR-NEXT:    cmovneq %rdx, %r8
-; BURR-NEXT:    subq $-128, %r8
+; BURR-NEXT:    cmovneq %rdx, %rsi
+; BURR-NEXT:    subq $-128, %rsi
 ; BURR-NEXT:    orq %rdi, %r9
-; BURR-NEXT:    cmovneq %rcx, %r8
-; BURR-NEXT:    movq %r8, (%rax)
+; BURR-NEXT:    cmovneq %rcx, %rsi
+; BURR-NEXT:    movq %rsi, (%rax)
 ; BURR-NEXT:    movq $0, 8(%rax)
 ; BURR-NEXT:    retq
 ;
@@ -351,19 +348,18 @@ define i256 @test2(i256 %a) nounwind {
 ; SRC-NEXT:    cmovneq %rcx, %rdx
 ; SRC-NEXT:    bsrq %r10, %rcx
 ; SRC-NEXT:    xorq $63, %rcx
+; SRC-NEXT:    movl $127, %esi
 ; SRC-NEXT:    bsrq %r11, %rsi
-; SRC-NEXT:    movl $127, %r8d
-; SRC-NEXT:    cmovneq %rsi, %r8
-; SRC-NEXT:    xorq $63, %r8
-; SRC-NEXT:    addq $64, %r8
+; SRC-NEXT:    xorq $63, %rsi
+; SRC-NEXT:    addq $64, %rsi
 ; SRC-NEXT:    testq %r10, %r10
-; SRC-NEXT:    cmovneq %rcx, %r8
-; SRC-NEXT:    subq $-128, %r8
+; SRC-NEXT:    cmovneq %rcx, %rsi
+; SRC-NEXT:    subq $-128, %rsi
 ; SRC-NEXT:    orq %r9, %rdi
-; SRC-NEXT:    cmovneq %rdx, %r8
+; SRC-NEXT:    cmovneq %rdx, %rsi
 ; SRC-NEXT:    xorps %xmm0, %xmm0
 ; SRC-NEXT:    movaps %xmm0, 16(%rax)
-; SRC-NEXT:    movq %r8, (%rax)
+; SRC-NEXT:    movq %rsi, (%rax)
 ; SRC-NEXT:    movq $0, 8(%rax)
 ; SRC-NEXT:    retq
 ;
@@ -372,12 +368,11 @@ define i256 @test2(i256 %a) nounwind {
 ; LIN-NEXT:    movq %rdi, %rax
 ; LIN-NEXT:    xorps %xmm0, %xmm0
 ; LIN-NEXT:    movaps %xmm0, 16(%rdi)
-; LIN-NEXT:    movq %rsi, %rdi
-; LIN-NEXT:    negq %rdi
-; LIN-NEXT:    andq %rsi, %rdi
-; LIN-NEXT:    bsrq %rdi, %rsi
 ; LIN-NEXT:    movl $127, %edi
-; LIN-NEXT:    cmovneq %rsi, %rdi
+; LIN-NEXT:    movq %rsi, %r9
+; LIN-NEXT:    negq %r9
+; LIN-NEXT:    andq %rsi, %r9
+; LIN-NEXT:    bsrq %r9, %rdi
 ; LIN-NEXT:    xorq $63, %rdi
 ; LIN-NEXT:    addq $64, %rdi
 ; LIN-NEXT:    xorl %esi, %esi
@@ -415,7 +410,6 @@ define i256 @test2(i256 %a) nounwind {
 define i256 @test3(i256 %n) nounwind {
 ; ILP-LABEL: test3:
 ; ILP:       # %bb.0:
-; ILP-NEXT:    pushq %rbx
 ; ILP-NEXT:    movq %rdi, %rax
 ; ILP-NEXT:    xorps %xmm0, %xmm0
 ; ILP-NEXT:    movaps %xmm0, 16(%rdi)
@@ -429,34 +423,32 @@ define i256 @test3(i256 %n) nounwind {
 ; ILP-NEXT:    sbbq %r8, %r9
 ; ILP-NEXT:    notq %r8
 ; ILP-NEXT:    andq %r9, %r8
-; ILP-NEXT:    bsrq %r8, %rbx
+; ILP-NEXT:    bsrq %r8, %r9
 ; ILP-NEXT:    notq %rdx
 ; ILP-NEXT:    andq %r10, %rdx
-; ILP-NEXT:    bsrq %rdx, %r9
-; ILP-NEXT:    xorq $63, %rbx
+; ILP-NEXT:    bsrq %rdx, %r10
+; ILP-NEXT:    xorq $63, %r9
 ; ILP-NEXT:    notq %rcx
 ; ILP-NEXT:    andq %r11, %rcx
-; ILP-NEXT:    bsrq %rcx, %r10
+; ILP-NEXT:    bsrq %rcx, %r11
+; ILP-NEXT:    xorq $63, %r11
+; ILP-NEXT:    orq $64, %r11
+; ILP-NEXT:    testq %r8, %r8
+; ILP-NEXT:    cmovneq %r9, %r11
 ; ILP-NEXT:    xorq $63, %r10
-; ILP-NEXT:    orq $64, %r10
 ; ILP-NEXT:    notq %rsi
-; ILP-NEXT:    testq %r8, %r8
-; ILP-NEXT:    cmovneq %rbx, %r10
-; ILP-NEXT:    xorq $63, %r9
 ; ILP-NEXT:    andq %rdi, %rsi
 ; ILP-NEXT:    movl $127, %edi
-; ILP-NEXT:    bsrq %rsi, %rsi
-; ILP-NEXT:    cmoveq %rdi, %rsi
-; ILP-NEXT:    xorq $63, %rsi
-; ILP-NEXT:    addq $64, %rsi
+; ILP-NEXT:    bsrq %rsi, %rdi
+; ILP-NEXT:    xorq $63, %rdi
+; ILP-NEXT:    addq $64, %rdi
 ; ILP-NEXT:    testq %rdx, %rdx
-; ILP-NEXT:    cmovneq %r9, %rsi
-; ILP-NEXT:    subq $-128, %rsi
+; ILP-NEXT:    cmovneq %r10, %rdi
+; ILP-NEXT:    subq $-128, %rdi
 ; ILP-NEXT:    orq %r8, %rcx
-; ILP-NEXT:    cmovneq %r10, %rsi
-; ILP-NEXT:    movq %rsi, (%rax)
+; ILP-NEXT:    cmovneq %r11, %rdi
+; ILP-NEXT:    movq %rdi, (%rax)
 ; ILP-NEXT:    movq $0, 8(%rax)
-; ILP-NEXT:    popq %rbx
 ; ILP-NEXT:    retq
 ;
 ; HYBRID-LABEL: test3:
@@ -491,16 +483,15 @@ define i256 @test3(i256 %n) nounwind {
 ; HYBRID-NEXT:    notq %rsi
 ; HYBRID-NEXT:    andq %rdi, %rsi
 ; HYBRID-NEXT:    movl $127, %edi
-; HYBRID-NEXT:    bsrq %rsi, %rsi
-; HYBRID-NEXT:    cmoveq %rdi, %rsi
-; HYBRID-NEXT:    xorq $63, %rsi
-; HYBRID-NEXT:    addq $64, %rsi
+; HYBRID-NEXT:    bsrq %rsi, %rdi
+; HYBRID-NEXT:    xorq $63, %rdi
+; HYBRID-NEXT:    addq $64, %rdi
 ; HYBRID-NEXT:    testq %rdx, %rdx
-; HYBRID-NEXT:    cmovneq %r10, %rsi
-; HYBRID-NEXT:    subq $-128, %rsi
+; HYBRID-NEXT:    cmovneq %r10, %rdi
+; HYBRID-NEXT:    subq $-128, %rdi
 ; HYBRID-NEXT:    orq %r8, %rcx
-; HYBRID-NEXT:    cmovneq %r9, %rsi
-; HYBRID-NEXT:    movq %rsi, (%rax)
+; HYBRID-NEXT:    cmovneq %r9, %rdi
+; HYBRID-NEXT:    movq %rdi, (%rax)
 ; HYBRID-NEXT:    movq $0, 8(%rax)
 ; HYBRID-NEXT:    popq %rbx
 ; HYBRID-NEXT:    retq
@@ -537,16 +528,15 @@ define i256 @test3(i256 %n) nounwind {
 ; BURR-NEXT:    notq %rsi
 ; BURR-NEXT:    andq %rdi, %rsi
 ; BURR-NEXT:    movl $127, %edi
-; BURR-NEXT:    bsrq %rsi, %rsi
-; BURR-NEXT:    cmoveq %rdi, %rsi
-; BURR-NEXT:    xorq $63, %rsi
-; BURR-NEXT:    addq $64, %rsi
+; BURR-NEXT:    bsrq %rsi, %rdi
+; BURR-NEXT:    xorq $63, %rdi
+; BURR-NEXT:    addq $64, %rdi
 ; BURR-NEXT:    testq %rdx, %rdx
-; BURR-NEXT:    cmovneq %r10, %rsi
-; BURR-NEXT:    subq $-128, %rsi
+; BURR-NEXT:    cmovneq %r10, %rdi
+; BURR-NEXT:    subq $-128, %rdi
 ; BURR-NEXT:    orq %r8, %rcx
-; BURR-NEXT:    cmovneq %r9, %rsi
-; BURR-NEXT:    movq %rsi, (%rax)
+; BURR-NEXT:    cmovneq %r9, %rdi
+; BURR-NEXT:    movq %rdi, (%rax)
 ; BURR-NEXT:    movq $0, 8(%rax)
 ; BURR-NEXT:    popq %rbx
 ; BURR-NEXT:    retq
@@ -579,9 +569,8 @@ define i256 @test3(i256 %n) nounwind {
 ; SRC-NEXT:    cmovneq %rdi, %r9
 ; SRC-NEXT:    bsrq %rdx, %rdi
 ; SRC-NEXT:    xorq $63, %rdi
-; SRC-NEXT:    bsrq %rsi, %rsi
 ; SRC-NEXT:    movl $127, %r10d
-; SRC-NEXT:    cmovneq %rsi, %r10
+; SRC-NEXT:    bsrq %rsi, %r10
 ; SRC-NEXT:    xorq $63, %r10
 ; SRC-NEXT:    addq $64, %r10
 ; SRC-NEXT:    testq %rdx, %rdx
@@ -600,13 +589,12 @@ define i256 @test3(i256 %n) nounwind {
 ; LIN-NEXT:    movq %rdi, %rax
 ; LIN-NEXT:    xorps %xmm0, %xmm0
 ; LIN-NEXT:    movaps %xmm0, 16(%rdi)
+; LIN-NEXT:    movl $127, %r9d
 ; LIN-NEXT:    movq %rsi, %rdi
 ; LIN-NEXT:    negq %rdi
 ; LIN-NEXT:    notq %rsi
 ; LIN-NEXT:    andq %rdi, %rsi
-; LIN-NEXT:    bsrq %rsi, %rsi
-; LIN-NEXT:    movl $127, %r9d
-; LIN-NEXT:    cmovneq %rsi, %r9
+; LIN-NEXT:    bsrq %rsi, %r9
 ; LIN-NEXT:    xorq $63, %r9
 ; LIN-NEXT:    addq $64, %r9
 ; LIN-NEXT:    xorl %edi, %edi
diff --git a/llvm/test/TableGen/x86-fold-tables.inc b/llvm/test/TableGen/x86-fold-tables.inc
index 954c05bdb20767..2ab63392c7076a 100644
--- a/llvm/test/TableGen/x86-fold-tables.inc
+++ b/llvm/test/TableGen/x86-fold-tables.inc
@@ -684,12 +684,6 @@ static const X86FoldTableEntry Table1[] = {
   {X86::BLSR64rr, X86::BLSR64rm, 0},
   {X86::BLSR64rr_EVEX, X86::BLSR64rm_EVEX, 0},
   {X86::BLSR64rr_NF, X86::BLSR64rm_NF, 0},
-  {X86::BSF16rr, X86::BSF16rm, 0},
-  {X86::BSF32rr, X86::BSF32rm, 0},
-  {X86::BSF64rr, X86::BSF64rm, 0},
-  {X86::BSR16rr, X86::BSR16rm, 0},
-  {X86::BSR32rr, X86::BSR32rm, 0},
-  {X86::BSR64rr, X86::BSR64rm, 0},
   {X86::BZHI32rr, X86::BZHI32rm, 0},
   {X86::BZHI32rr_EVEX, X86::BZHI32rm_EVEX, 0},
   {X86::BZHI32rr_NF, X86::BZHI32rm_NF, 0},
@@ -2072,6 +2066,12 @@ static const X86FoldTableEntry Table2[] = {
   {X86::BLENDPSrri, X86::BLENDPSrmi, TB_ALIGN_16},
   {X86::BLENDVPDrr0, X86::BLENDVPDrm0, TB_ALIGN_16},
   {X86::BLENDVPSrr0, X86::BLENDVPSrm0, TB_ALIGN_16},
+  {X86::BSF16rr, X86::BSF16rm, 0},
+  {X86::BSF32rr, X86::BSF32rm, 0},
+  {X86::BSF64rr, X86::BSF64rm, 0},
+  {X86::BSR16rr, X86::BSR16rm, 0},
+  {X86::BSR32rr, X86::BSR32rm, 0},
+  {X86::BSR64rr, X86::BSR64rm, 0},
   {X86::CMOV16rr, X86::CMOV16rm, 0},
   {X86::CMOV16rr_ND, X86::CMOV16rm_ND, 0},
   {X86::CMOV32rr, X86::CMOV32rm, 0},
diff --git a/llvm/test/tools/llvm-mca/X86/BtVer2/clear-super-register-1.s b/llvm/test/tools/llvm-mca/X86/BtVer2/clear-super-register-1.s
index 6483809deda3a9..0bd5f451e2e341 100644
--- a/llvm/test/tools/llvm-mca/X86/BtVer2/clear-super-register-1.s
+++ b/llvm/test/tools/llvm-mca/X86/BtVer2/clear-super-register-1.s
@@ -15,12 +15,12 @@ bsf   %rax, %rcx
 
 # CHECK:      Iterations:        100
 # CHECK-NEXT: Instructions:      400
-# CHECK-NEXT: Total Cycles:      655
+# CHECK-NEXT: Total Cycles:      663
 # CHECK-NEXT: Total uOps:        1000
 
 # CHECK:      Dispatch Width:    2
-# CHECK-NEXT: uOps Per Cycle:    1.53
-# CHECK-NEXT: IPC:               0.61
+# CHECK-NEXT: uOps Per Cycle:    1.51
+# CHECK-NEXT: IPC:               0.60
 # CHECK-NEXT: Block RThroughput: 5.0
 
 # CHECK:      Instruction Info:



More information about the llvm-commits mailing list