<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
</head>
<body>
<p>Thanks!</p>
<p>Philip<br>
</p>
<div class="moz-cite-prefix">On 5/10/22 13:45, Mingming Liu wrote:<br>
</div>
<blockquote type="cite"
cite="mid:CAENFzptSWT0ZK+T=fTtvjbJ5_8NSt1XD9R7q1MoHBf6mYooZyg@mail.gmail.com">
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
<div dir="ltr">Okay, working on it. Will update when it's
complete. </div>
<br>
<div class="gmail_quote">
<div dir="ltr" class="gmail_attr">On Tue, May 10, 2022 at 1:43
PM Philip Reames <<a
href="mailto:listmail@philipreames.com"
moz-do-not-send="true" class="moz-txt-link-freetext">listmail@philipreames.com</a>>
wrote:<br>
</div>
<blockquote class="gmail_quote" style="margin:0px 0px 0px
0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">
<div>
<p>Revert and recommit please. The commit comment needs to
contain the information, not just the phab review.</p>
<p>Philip<br>
</p>
<div>On 5/10/22 13:27, Mingming Liu wrote:<br>
</div>
<blockquote type="cite">
<div dir="ltr">Hello Philip!
<div> A differential revision is attached in the
commit message, and has more context of the commit.</div>
<div><br>
</div>
<div> Shall I just use differential revision or still
revert and re-commit?</div>
</div>
<br>
<div class="gmail_quote">
<div dir="ltr" class="gmail_attr">On Tue, May 10, 2022
at 1:17 PM Philip Reames <<a
href="mailto:listmail@philipreames.com"
target="_blank" moz-do-not-send="true"
class="moz-txt-link-freetext">listmail@philipreames.com</a>>
wrote:<br>
</div>
<blockquote class="gmail_quote" style="margin:0px 0px
0px 0.8ex;border-left:1px solid
rgb(204,204,204);padding-left:1ex">Please revert and
reapply with a descriptive commit message.<br>
<br>
Philip<br>
<br>
On 5/10/22 12:39, Mingming Liu via llvm-commits wrote:<br>
> Author: Mingming Liu<br>
> Date: 2022-05-10T12:35:35-07:00<br>
> New Revision:
d84ca05ef7f897fdd51900ea07e3c5344632130a<br>
><br>
> URL: <a
href="https://github.com/llvm/llvm-project/commit/d84ca05ef7f897fdd51900ea07e3c5344632130a"
rel="noreferrer" target="_blank"
moz-do-not-send="true" class="moz-txt-link-freetext">https://github.com/llvm/llvm-project/commit/d84ca05ef7f897fdd51900ea07e3c5344632130a</a><br>
> DIFF: <a
href="https://github.com/llvm/llvm-project/commit/d84ca05ef7f897fdd51900ea07e3c5344632130a.diff"
rel="noreferrer" target="_blank"
moz-do-not-send="true" class="moz-txt-link-freetext">https://github.com/llvm/llvm-project/commit/d84ca05ef7f897fdd51900ea07e3c5344632130a.diff</a><br>
><br>
> LOG: Enhance peephole optimization.<br>
><br>
> Differential Revision: <a
href="https://reviews.llvm.org/D124118"
rel="noreferrer" target="_blank"
moz-do-not-send="true" class="moz-txt-link-freetext">https://reviews.llvm.org/D124118</a><br>
><br>
> Added:<br>
>
llvm/test/CodeGen/X86/peephole-test-after-add.mir<br>
><br>
> Modified:<br>
> llvm/lib/Target/X86/X86InstrInfo.cpp<br>
><br>
> Removed:<br>
> <br>
><br>
><br>
>
################################################################################<br>
> diff --git
a/llvm/lib/Target/X86/X86InstrInfo.cpp
b/llvm/lib/Target/X86/X86InstrInfo.cpp<br>
> index 0c45094b6e2f6..f4ffb42d9972d 100644<br>
> --- a/llvm/lib/Target/X86/X86InstrInfo.cpp<br>
> +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp<br>
> @@ -25,13 +25,16 @@<br>
> #include "llvm/CodeGen/MachineConstantPool.h"<br>
> #include "llvm/CodeGen/MachineDominators.h"<br>
> #include "llvm/CodeGen/MachineFrameInfo.h"<br>
> +#include "llvm/CodeGen/MachineInstr.h"<br>
> #include "llvm/CodeGen/MachineInstrBuilder.h"<br>
> #include "llvm/CodeGen/MachineModuleInfo.h"<br>
> +#include "llvm/CodeGen/MachineOperand.h"<br>
> #include "llvm/CodeGen/MachineRegisterInfo.h"<br>
> #include "llvm/CodeGen/StackMaps.h"<br>
> #include "llvm/IR/DebugInfoMetadata.h"<br>
> #include "llvm/IR/DerivedTypes.h"<br>
> #include "llvm/IR/Function.h"<br>
> +#include "llvm/IR/InstrTypes.h"<br>
> #include "llvm/MC/MCAsmInfo.h"<br>
> #include "llvm/MC/MCExpr.h"<br>
> #include "llvm/MC/MCInst.h"<br>
> @@ -964,6 +967,101 @@ inline static bool
isTruncatedShiftCountForLEA(unsigned ShAmt) {<br>
> return ShAmt < 4 && ShAmt > 0;<br>
> }<br>
> <br>
> +static bool findRedundantFlagInstr(MachineInstr
&CmpInstr,<br>
> + MachineInstr
&CmpValDefInstr,<br>
> + const
MachineRegisterInfo *MRI,<br>
> + MachineInstr
**AndInstr,<br>
> + const
TargetRegisterInfo *TRI,<br>
> + bool
&NoSignFlag, bool &ClearsOverflowFlag) {<br>
> + if (CmpValDefInstr.getOpcode() !=
X86::SUBREG_TO_REG)<br>
> + return false;<br>
> +<br>
> + if (CmpInstr.getOpcode() != X86::TEST64rr)<br>
> + return false;<br>
> +<br>
> + // CmpInstr is a TEST64rr instruction, and
`X86InstrInfo::analyzeCompare`<br>
> + // guarantees that it's analyzable only if two
registers are identical.<br>
> + assert(<br>
> + (CmpInstr.getOperand(0).getReg() ==
CmpInstr.getOperand(1).getReg()) &&<br>
> + "CmpInstr is an analyzable TEST64rr, and
`X86InstrInfo::analyzeCompare` "<br>
> + "requires two reg operands are the
same.");<br>
> +<br>
> + // Caller
(`X86InstrInfo::optimizeCompareInstr`) guarantees that<br>
> + // `CmpValDefInstr` defines the value that's
used by `CmpInstr`; in this case<br>
> + // if `CmpValDefInstr` sets the EFLAGS, it is
likely that `CmpInstr` is<br>
> + // redundant.<br>
> + assert(<br>
> +
(MRI->getVRegDef(CmpInstr.getOperand(0).getReg())
== &CmpValDefInstr) &&<br>
> + "Caller guarantees that TEST64rr is a user
of SUBREG_TO_REG.");<br>
> +<br>
> + // As seen in X86 td files,
CmpValDefInstr.getOperand(1).getImm() is typically<br>
> + // 0.<br>
> + if (CmpValDefInstr.getOperand(1).getImm() !=
0)<br>
> + return false;<br>
> +<br>
> + // As seen in X86 td files,
CmpValDefInstr.getOperand(3) is typically<br>
> + // sub_32bit or sub_xmm.<br>
> + if (CmpValDefInstr.getOperand(3).getImm() !=
X86::sub_32bit)<br>
> + return false;<br>
> +<br>
> + MachineInstr *VregDefInstr =<br>
> +
MRI->getVRegDef(CmpValDefInstr.getOperand(2).getReg());<br>
> +<br>
> + assert(VregDefInstr && "Must have a
definition (SSA)");<br>
> +<br>
> + // Requires `CmpValDefInstr` and
`VregDefInstr` are from the same MBB<br>
> + // to simplify the subsequent analysis.<br>
> + //<br>
> + // FIXME: If `VregDefInstr->getParent()` is
the only predecessor of<br>
> + // `CmpValDefInstr.getParent()`, this could be
handled.<br>
> + if (VregDefInstr->getParent() !=
CmpValDefInstr.getParent())<br>
> + return false;<br>
> +<br>
> + if (X86::isAND(VregDefInstr->getOpcode()))
{<br>
> + // Get a sequence of instructions like<br>
> + // %reg = and* ... //
Set EFLAGS<br>
> + // ... //
EFLAGS not changed<br>
> + // %extended_reg = subreg_to_reg 0, %reg,
%subreg.sub_32bit<br>
> + // test64rr %extended_reg, %extended_reg,
implicit-def $eflags<br>
> + //<br>
> + // If subsequent readers use a subset of
bits that don't change<br>
> + // after `and*` instructions, it's likely
that the test64rr could<br>
> + // be optimized away.<br>
> + for (const MachineInstr &Instr :<br>
> +
make_range(std::next(MachineBasicBlock::iterator(VregDefInstr)),<br>
> +
MachineBasicBlock::iterator(CmpValDefInstr))) {<br>
> + // There are instructions between
'VregDefInstr' and<br>
> + // 'CmpValDefInstr' that modifies EFLAGS.<br>
> + if (Instr.modifiesRegister(X86::EFLAGS,
TRI))<br>
> + return false;<br>
> + }<br>
> +<br>
> + *AndInstr = VregDefInstr;<br>
> +<br>
> + // AND instruction will essentially update
SF and clear OF, so<br>
> + // NoSignFlag should be false in the sense
that SF is modified by `AND`.<br>
> + //<br>
> + // However, the implementation artifically
sets `NoSignFlag` to true<br>
> + // to poison the SF bit; that is to say, if
SF is looked at later, the<br>
> + // optimization (to erase TEST64rr) will be
disabled.<br>
> + //<br>
> + // The reason to poison SF bit is that SF
bit value could be<br>
> diff erent<br>
> + // in the `AND` and `TEST` operation; signed
bit is not known for `AND`,<br>
> + // and is known to be 0 as a result of
`TEST64rr`.<br>
> + //<br>
> + // FIXME: As opposed to poisoning the SF bit
direclty, consider peeking into<br>
> + // the AND instruction and using the static
information to guide peephole optimization if
possible.<br>
> + // For example, it's possible to fold a
conditional move into a copy<br>
> + // if the relevant EFLAG bits could be
deduced from an immediate operand of and operation.<br>
> + //<br>
> + NoSignFlag = true;<br>
> + // ClearsOverflowFlag is true for AND
operation (no surprise).<br>
> + ClearsOverflowFlag = true;<br>
> + return true;<br>
> + }<br>
> + return false;<br>
> +}<br>
> +<br>
> bool X86InstrInfo::classifyLEAReg(MachineInstr
&MI, const MachineOperand &Src,<br>
> unsigned Opc,
bool AllowSP, Register &NewSrc,<br>
> bool
&isKill, MachineOperand &ImplicitOp,<br>
> @@ -4226,6 +4324,23 @@ bool
X86InstrInfo::optimizeCompareInstr(MachineInstr
&CmpInstr, Register SrcReg,<br>
> MI = &Inst;<br>
> break;<br>
> }<br>
> +<br>
> + // Look back for the following pattern,
in which case the test64rr<br>
> + // instruction could be erased.<br>
> + //<br>
> + // Example:<br>
> + // %reg = and32ri %in_reg, 5<br>
> + // ... //
EFLAGS not changed.<br>
> + // %src_reg = subreg_to_reg 0, %reg,
%subreg.sub_index<br>
> + // test64rr %src_reg, %src_reg,
implicit-def $eflags<br>
> + MachineInstr *AndInstr = nullptr;<br>
> + if (IsCmpZero &&<br>
> + findRedundantFlagInstr(CmpInstr,
Inst, MRI, &AndInstr, TRI,<br>
> + NoSignFlag,
ClearsOverflowFlag)) {<br>
> + assert(AndInstr != nullptr &&
X86::isAND(AndInstr->getOpcode()));<br>
> + MI = AndInstr;<br>
> + break;<br>
> + }<br>
> // Cannot find other candidates before
definition of SrcReg.<br>
> return false;<br>
> }<br>
><br>
> diff --git
a/llvm/test/CodeGen/X86/peephole-test-after-add.mir
b/llvm/test/CodeGen/X86/peephole-test-after-add.mir<br>
> new file mode 100644<br>
> index 0000000000000..65a90fafa655c<br>
> --- /dev/null<br>
> +++
b/llvm/test/CodeGen/X86/peephole-test-after-add.mir<br>
> @@ -0,0 +1,196 @@<br>
> +# NOTE: Assertions have been autogenerated by
utils/update_mir_test_checks.py<br>
> +# RUN: llc -o - %s
-mtriple=x86_64-unknown-linux-gnu
--run-pass=peephole-opt | FileCheck %s<br>
> +<br>
> +# Test that TEST64rr is erased in `test_erased`,
and kept in `test_not_erased_when_sf_used`<br>
> +# and `test_not_erased_when_eflags_change`.<br>
> +<br>
> +--- |<br>
> + ; ModuleID = 'tmp.ll'<br>
> + source_filename = "tmp.ll"<br>
> + target datalayout =
"e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"<br>
> +<br>
> + define i64 @test_erased(ptr %0, i64 %1, i64
%2) {<br>
> + %4 = load i64, ptr %0, align 8<br>
> + %5 = and i64 %4, 3<br>
> + %6 = icmp eq i64 %5, 0<br>
> + %7 = select i1 %6, i64 %1, i64 %5<br>
> + store i64 %7, ptr %0, align 8<br>
> + ret i64 %5<br>
> + }<br>
> +<br>
> + define i64 @test_not_erased_when_sf_used(ptr
%0, i64 %1, i64 %2, i64 %3) {<br>
> + %5 = load i64, ptr %0, align 8<br>
> + %6 = and i64 %5, 3<br>
> + %7 = icmp slt i64 %6, 0<br>
> + %8 = select i1 %7, i64 %1, i64 %6<br>
> + store i64 %8, ptr %0, align 8<br>
> + ret i64 %5<br>
> + }<br>
> +<br>
> + define void
@test_not_erased_when_eflags_change(ptr %0, i64 %1,
i64 %2, i64 %3, ptr %4) {<br>
> + %6 = load i64, ptr %0, align 8<br>
> + %7 = and i64 %6, 3<br>
> + %8 = xor i64 %3, 5<br>
> + %9 = icmp eq i64 %7, 0<br>
> + %10 = select i1 %9, i64 %1, i64 %7<br>
> + store i64 %10, ptr %0, align 8<br>
> + store i64 %8, ptr %4, align 8<br>
> + ret void<br>
> + }<br>
> +<br>
> +...<br>
> +---<br>
> +name: test_erased<br>
> +alignment: 16<br>
> +tracksDebugUserValues: false<br>
> +registers:<br>
> + - { id: 0, class: gr64, preferred-register: ''
}<br>
> + - { id: 1, class: gr64, preferred-register: ''
}<br>
> + - { id: 2, class: gr64, preferred-register: ''
}<br>
> + - { id: 3, class: gr64, preferred-register: ''
}<br>
> + - { id: 4, class: gr32, preferred-register: ''
}<br>
> + - { id: 5, class: gr32, preferred-register: ''
}<br>
> + - { id: 6, class: gr64, preferred-register: ''
}<br>
> + - { id: 7, class: gr64, preferred-register: ''
}<br>
> +liveins:<br>
> + - { reg: '$rdi', virtual-reg: '%0' }<br>
> + - { reg: '$rsi', virtual-reg: '%1' }<br>
> +frameInfo:<br>
> + maxAlignment: 1<br>
> +machineFunctionInfo: {}<br>
> +body: |<br>
> + bb.0 (%ir-block.3):<br>
> + liveins: $rdi, $rsi<br>
> +<br>
> + ; CHECK-LABEL: name: test_erased<br>
> + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rsi<br>
> + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY
$rdi<br>
> + ; CHECK-NEXT: [[MOV64rm:%[0-9]+]]:gr64 =
MOV64rm [[COPY1]], 1, $noreg, 0, $noreg :: (load (s64)
from %ir.0)<br>
> + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr32 = COPY
[[MOV64rm]].sub_32bit<br>
> + ; CHECK-NEXT: [[AND32ri8_:%[0-9]+]]:gr32 =
AND32ri8 [[COPY2]], 3, implicit-def $eflags<br>
> + ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gr64
= SUBREG_TO_REG 0, killed [[AND32ri8_]],
%subreg.sub_32bit<br>
> + ; CHECK-NEXT: [[CMOV64rr:%[0-9]+]]:gr64 =
CMOV64rr [[SUBREG_TO_REG]], [[COPY]], 4, implicit
$eflags<br>
> + ; CHECK-NEXT: MOV64mr [[COPY1]], 1, $noreg,
0, $noreg, killed [[CMOV64rr]] :: (store (s64) into
%ir.0)<br>
> + ; CHECK-NEXT: $rax = COPY [[SUBREG_TO_REG]]<br>
> + ; CHECK-NEXT: RET 0, $rax<br>
> + %1:gr64 = COPY $rsi<br>
> + %0:gr64 = COPY $rdi<br>
> + %3:gr64 = MOV64rm %0, 1, $noreg, 0, $noreg
:: (load (s64) from %ir.0)<br>
> + %4:gr32 = COPY %3.sub_32bit<br>
> + %5:gr32 = AND32ri8 %4, 3, implicit-def dead
$eflags<br>
> + %6:gr64 = SUBREG_TO_REG 0, killed %5,
%subreg.sub_32bit<br>
> + TEST64rr %6, %6, implicit-def $eflags<br>
> + %7:gr64 = CMOV64rr %6, %1, 4, implicit
$eflags<br>
> + MOV64mr %0, 1, $noreg, 0, $noreg, killed %7
:: (store (s64) into %ir.0)<br>
> + $rax = COPY %6<br>
> + RET 0, $rax<br>
> +<br>
> +...<br>
> +---<br>
> +name: test_not_erased_when_sf_used<br>
> +alignment: 16<br>
> +tracksDebugUserValues: false<br>
> +registers:<br>
> + - { id: 0, class: gr64, preferred-register: ''
}<br>
> + - { id: 1, class: gr64, preferred-register: ''
}<br>
> + - { id: 2, class: gr64, preferred-register: ''
}<br>
> + - { id: 3, class: gr64, preferred-register: ''
}<br>
> + - { id: 4, class: gr64, preferred-register: ''
}<br>
> + - { id: 5, class: gr32, preferred-register: ''
}<br>
> + - { id: 6, class: gr32, preferred-register: ''
}<br>
> + - { id: 7, class: gr64, preferred-register: ''
}<br>
> + - { id: 8, class: gr64, preferred-register: ''
}<br>
> +liveins:<br>
> + - { reg: '$rdi', virtual-reg: '%0' }<br>
> + - { reg: '$rsi', virtual-reg: '%1' }<br>
> +frameInfo:<br>
> + maxAlignment: 1<br>
> +machineFunctionInfo: {}<br>
> +body: |<br>
> + bb.0 (%ir-block.4):<br>
> + liveins: $rdi, $rsi<br>
> +<br>
> + ; CHECK-LABEL: name:
test_not_erased_when_sf_used<br>
> + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rsi<br>
> + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY
$rdi<br>
> + ; CHECK-NEXT: [[MOV64rm:%[0-9]+]]:gr64 =
MOV64rm [[COPY1]], 1, $noreg, 0, $noreg :: (load (s64)
from %ir.0)<br>
> + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr32 = COPY
[[MOV64rm]].sub_32bit<br>
> + ; CHECK-NEXT: [[AND32ri8_:%[0-9]+]]:gr32 =
AND32ri8 [[COPY2]], 3, implicit-def dead $eflags<br>
> + ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gr64
= SUBREG_TO_REG 0, killed [[AND32ri8_]],
%subreg.sub_32bit<br>
> + ; CHECK-NEXT: TEST64rr [[SUBREG_TO_REG]],
[[SUBREG_TO_REG]], implicit-def $eflags<br>
> + ; CHECK-NEXT: [[CMOV64rr:%[0-9]+]]:gr64 =
CMOV64rr [[SUBREG_TO_REG]], [[COPY]], 8, implicit
$eflags<br>
> + ; CHECK-NEXT: MOV64mr [[COPY1]], 1, $noreg,
0, $noreg, killed [[CMOV64rr]] :: (store (s64) into
%ir.0)<br>
> + ; CHECK-NEXT: $rax = COPY [[MOV64rm]]<br>
> + ; CHECK-NEXT: RET 0, $rax<br>
> + %1:gr64 = COPY $rsi<br>
> + %0:gr64 = COPY $rdi<br>
> + %4:gr64 = MOV64rm %0, 1, $noreg, 0, $noreg
:: (load (s64) from %ir.0)<br>
> + %5:gr32 = COPY %4.sub_32bit<br>
> + %6:gr32 = AND32ri8 %5, 3, implicit-def dead
$eflags<br>
> + %7:gr64 = SUBREG_TO_REG 0, killed %6,
%subreg.sub_32bit<br>
> + TEST64rr %7, %7, implicit-def $eflags<br>
> + %8:gr64 = CMOV64rr %7, %1, 8, implicit
$eflags<br>
> + MOV64mr %0, 1, $noreg, 0, $noreg, killed %8
:: (store (s64) into %ir.0)<br>
> + $rax = COPY %4<br>
> + RET 0, $rax<br>
> +<br>
> +...<br>
> +---<br>
> +name:
test_not_erased_when_eflags_change<br>
> +alignment: 16<br>
> +tracksDebugUserValues: false<br>
> +registers:<br>
> + - { id: 0, class: gr64, preferred-register: ''
}<br>
> + - { id: 1, class: gr64, preferred-register: ''
}<br>
> + - { id: 2, class: gr64, preferred-register: ''
}<br>
> + - { id: 3, class: gr64, preferred-register: ''
}<br>
> + - { id: 4, class: gr64, preferred-register: ''
}<br>
> + - { id: 5, class: gr64, preferred-register: ''
}<br>
> + - { id: 6, class: gr32, preferred-register: ''
}<br>
> + - { id: 7, class: gr32, preferred-register: ''
}<br>
> + - { id: 8, class: gr64, preferred-register: ''
}<br>
> + - { id: 9, class: gr64, preferred-register: ''
}<br>
> + - { id: 10, class: gr64, preferred-register:
'' }<br>
> +liveins:<br>
> + - { reg: '$rdi', virtual-reg: '%0' }<br>
> + - { reg: '$rsi', virtual-reg: '%1' }<br>
> + - { reg: '$rcx', virtual-reg: '%3' }<br>
> + - { reg: '$r8', virtual-reg: '%4' }<br>
> +frameInfo:<br>
> + maxAlignment: 1<br>
> +machineFunctionInfo: {}<br>
> +body: |<br>
> + bb.0 (%ir-block.5):<br>
> + liveins: $rdi, $rsi, $rcx, $r8<br>
> +<br>
> + ; CHECK-LABEL: name:
test_not_erased_when_eflags_change<br>
> + ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $r8<br>
> + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY
$rcx<br>
> + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64 = COPY
$rsi<br>
> + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gr64 = COPY
$rdi<br>
> + ; CHECK-NEXT: [[MOV64rm:%[0-9]+]]:gr64 =
MOV64rm [[COPY3]], 1, $noreg, 0, $noreg :: (load (s64)
from %ir.0)<br>
> + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gr32 = COPY
[[MOV64rm]].sub_32bit<br>
> + ; CHECK-NEXT: [[AND32ri8_:%[0-9]+]]:gr32 =
AND32ri8 [[COPY4]], 3, implicit-def dead $eflags<br>
> + ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gr64
= SUBREG_TO_REG 0, killed [[AND32ri8_]],
%subreg.sub_32bit<br>
> + ; CHECK-NEXT: [[XOR64ri8_:%[0-9]+]]:gr64 =
XOR64ri8 [[COPY1]], 5, implicit-def dead $eflags<br>
> + ; CHECK-NEXT: TEST64rr [[SUBREG_TO_REG]],
[[SUBREG_TO_REG]], implicit-def $eflags<br>
> + ; CHECK-NEXT: [[CMOV64rr:%[0-9]+]]:gr64 =
CMOV64rr [[SUBREG_TO_REG]], [[COPY2]], 4, implicit
$eflags<br>
> + ; CHECK-NEXT: MOV64mr [[COPY3]], 1, $noreg,
0, $noreg, killed [[CMOV64rr]] :: (store (s64) into
%ir.0)<br>
> + ; CHECK-NEXT: MOV64mr [[COPY]], 1, $noreg,
0, $noreg, killed [[XOR64ri8_]] :: (store (s64) into
%ir.4)<br>
> + ; CHECK-NEXT: RET 0<br>
> + %4:gr64 = COPY $r8<br>
> + %3:gr64 = COPY $rcx<br>
> + %1:gr64 = COPY $rsi<br>
> + %0:gr64 = COPY $rdi<br>
> + %5:gr64 = MOV64rm %0, 1, $noreg, 0, $noreg
:: (load (s64) from %ir.0)<br>
> + %6:gr32 = COPY %5.sub_32bit<br>
> + %7:gr32 = AND32ri8 %6, 3, implicit-def dead
$eflags<br>
> + %8:gr64 = SUBREG_TO_REG 0, killed %7,
%subreg.sub_32bit<br>
> + %9:gr64 = XOR64ri8 %3, 5, implicit-def dead
$eflags<br>
> + TEST64rr %8, %8, implicit-def $eflags<br>
> + %10:gr64 = CMOV64rr %8, %1, 4, implicit
$eflags<br>
> + MOV64mr %0, 1, $noreg, 0, $noreg, killed %10
:: (store (s64) into %ir.0)<br>
> + MOV64mr %4, 1, $noreg, 0, $noreg, killed %9
:: (store (s64) into %ir.4)<br>
> + RET 0<br>
> +<br>
> +...<br>
><br>
><br>
> <br>
> _______________________________________________<br>
> llvm-commits mailing list<br>
> <a href="mailto:llvm-commits@lists.llvm.org"
target="_blank" moz-do-not-send="true"
class="moz-txt-link-freetext">llvm-commits@lists.llvm.org</a><br>
> <a
href="https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits"
rel="noreferrer" target="_blank"
moz-do-not-send="true" class="moz-txt-link-freetext">https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits</a><br>
</blockquote>
</div>
<br clear="all">
<div><br>
</div>
-- <br>
<div dir="ltr">
<div dir="ltr">
<div>
<div dir="ltr">
<div>
<div><font size="2" face="sans-serif"
color="#555555">Thanks,</font></div>
<div><font size="2" face="sans-serif"
color="#555555">Mingming</font></div>
</div>
</div>
</div>
</div>
</div>
</blockquote>
</div>
</blockquote>
</div>
<br clear="all">
<div><br>
</div>
-- <br>
<div dir="ltr" class="gmail_signature">
<div dir="ltr">
<div>
<div dir="ltr">
<div>
<div><font size="2" face="sans-serif" color="#555555">Thanks,</font></div>
<div><font size="2" face="sans-serif" color="#555555">Mingming</font></div>
</div>
</div>
</div>
</div>
</div>
</blockquote>
</body>
</html>