[PATCH] R600/SI: Allow commuting compares
Tom Stellard
tom at stellard.net
Mon Mar 23 11:19:16 PDT 2015
On Fri, Mar 20, 2015 at 06:49:08PM -0700, Matt Arsenault wrote:
> From c009909e1f183228b6a3ed183eb514719ef14cb5 Mon Sep 17 00:00:00 2001
> From: Matt Arsenault <Matthew.Arsenault at amd.com>
> Date: Fri, 12 Dec 2014 12:14:09 -0500
> Subject: [PATCH 1/5] R600/SI: Allow commuting compares
>
> This enables very common cases to switch to the
> smaller encoding.
>
> All of the standard LLVM canonicalizations of comparisons
> are the opposite of what we want. Compares with constants
> are moved to the RHS, but the first operand can be an inline
> immediate, literal constant, or SGPR using the 32-bit VOPC
> encoding.
>
> There are additional bad canonicalizations that should
> also be fixed, such as canonicalizing ge x, k to gt x, (k + 1)
> if this makes k no longer an inline immediate value.
> ---
> lib/Target/R600/SIInstrInfo.cpp | 26 +-
> lib/Target/R600/SIInstrInfo.h | 6 +-
> lib/Target/R600/SIInstrInfo.td | 98 +++--
> lib/Target/R600/SIInstructions.td | 138 +++---
> test/CodeGen/R600/commute-compares.ll | 697 ++++++++++++++++++++++++++++++
> test/CodeGen/R600/fceil64.ll | 6 +-
> test/CodeGen/R600/ffloor.f64.ll | 6 +-
> test/CodeGen/R600/ftrunc.f64.ll | 4 +-
> test/CodeGen/R600/i1-copy-phi.ll | 2 +-
> test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll | 18 +-
> test/CodeGen/R600/llvm.round.f64.ll | 2 +-
> test/CodeGen/R600/llvm.round.ll | 2 +-
> test/CodeGen/R600/or.ll | 2 +-
> test/CodeGen/R600/setcc-opt.ll | 18 +-
> test/CodeGen/R600/sgpr-control-flow.ll | 6 +-
> test/CodeGen/R600/trunc-cmp-constant.ll | 14 +-
> test/CodeGen/R600/trunc.ll | 8 +-
> test/CodeGen/R600/valu-i1.ll | 16 +-
> test/CodeGen/R600/xor.ll | 4 +-
> 19 files changed, 919 insertions(+), 154 deletions(-)
> create mode 100644 test/CodeGen/R600/commute-compares.ll
>
> diff --git a/lib/Target/R600/SIInstrInfo.cpp b/lib/Target/R600/SIInstrInfo.cpp
> index 95334c3..0c5acb8 100644
> --- a/lib/Target/R600/SIInstrInfo.cpp
> +++ b/lib/Target/R600/SIInstrInfo.cpp
> @@ -418,7 +418,29 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
> }
> }
>
> -unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
> +static unsigned commuteCompareOpcode(unsigned Opcode) {
> + // Compares have arbitrarily selected that < is the "original" and > is the
> + // "reverse"
> +
> + int NewOpc;
> +
> + NewOpc = AMDGPU::getCommuteCmpRev(Opcode);
> + if (NewOpc != -1)
> + return NewOpc;
> +
> + NewOpc = AMDGPU::getCommuteCmpOrig(Opcode);
> + if (NewOpc != -1)
> + return NewOpc;
> +
> + return Opcode;
> +}
> +
> +unsigned SIInstrInfo::commuteOpcode(const MachineInstr &MI) const {
> + const unsigned Opcode = MI.getOpcode();
> +
> + if (MI.isCompare())
> + return commuteCompareOpcode(Opcode);
> +
> int NewOpc;
>
> // Try to map original to commuted opcode
> @@ -789,7 +811,7 @@ MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
> }
>
> if (MI)
> - MI->setDesc(get(commuteOpcode(MI->getOpcode())));
> + MI->setDesc(get(commuteOpcode(*MI)));
>
> return MI;
> }
> diff --git a/lib/Target/R600/SIInstrInfo.h b/lib/Target/R600/SIInstrInfo.h
> index 3a0d63b..6214008 100644
> --- a/lib/Target/R600/SIInstrInfo.h
> +++ b/lib/Target/R600/SIInstrInfo.h
> @@ -114,7 +114,7 @@ public:
> // register. If there is no hardware instruction that can store to \p
> // DstRC, then AMDGPU::COPY is returned.
> unsigned getMovOpcode(const TargetRegisterClass *DstRC) const;
> - unsigned commuteOpcode(unsigned Opcode) const;
> + unsigned commuteOpcode(const MachineInstr &MI) const;
>
> MachineInstr *commuteInstruction(MachineInstr *MI,
> bool NewMI = false) const override;
> @@ -349,6 +349,10 @@ namespace AMDGPU {
> int getVOPe32(uint16_t Opcode);
> int getCommuteRev(uint16_t Opcode);
> int getCommuteOrig(uint16_t Opcode);
> +
> + int getCommuteCmpRev(uint16_t Opcode);
> + int getCommuteCmpOrig(uint16_t Opcode);
> +
> int getAddr64Inst(uint16_t Opcode);
> int getAtomicRetOp(uint16_t Opcode);
> int getAtomicNoRetOp(uint16_t Opcode);
> diff --git a/lib/Target/R600/SIInstrInfo.td b/lib/Target/R600/SIInstrInfo.td
> index 31c6ab1..62e877e 100644
> --- a/lib/Target/R600/SIInstrInfo.td
> +++ b/lib/Target/R600/SIInstrInfo.td
> @@ -829,6 +829,11 @@ class VOP2_REV <string revOp, bit isOrig> {
> bit IsOrig = isOrig;
> }
>
> +class VOPC_REV <string revOp, bit isOrig> {
> + string RevOp = revOp;
> + bit IsOrig = isOrig;
> +}
> +
> class AtomicNoRet <string noRetOp, bit isRet> {
> string NoRetOp = noRetOp;
> bit IsRet = isRet;
> @@ -1057,9 +1062,10 @@ multiclass VOP3b_3_m <vop op, dag outs, dag ins, string asm,
>
> multiclass VOP3_C_m <vop op, dag outs, dag ins, string asm,
> list<dag> pattern, string opName,
> - bit HasMods, bit defExec> {
> + bit HasMods, bit defExec, string revOp> {
>
> - def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
> + def "" : VOP3_Pseudo <outs, ins, pattern, opName>,
> + VOPC_REV<revOp#"_e64", !eq(revOp, opName)>;
>
> def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
> VOP3DisableFields<1, 0, HasMods> {
> @@ -1245,7 +1251,7 @@ class VOPC_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
> }
>
> multiclass VOPC_m <vopc op, dag outs, dag ins, string asm, list<dag> pattern,
> - string opName, bit DefExec> {
> + string opName, bit DefExec, string revOpName = ""> {
> def "" : VOPC_Pseudo <outs, ins, pattern, opName>;
>
> def _si : VOPC<op.SI, ins, asm, []>,
> @@ -1262,11 +1268,11 @@ multiclass VOPC_m <vopc op, dag outs, dag ins, string asm, list<dag> pattern,
> multiclass VOPC_Helper <vopc op, string opName,
> dag ins32, string asm32, list<dag> pat32,
> dag out64, dag ins64, string asm64, list<dag> pat64,
> - bit HasMods, bit DefExec> {
> + bit HasMods, bit DefExec, string revOp> {
> defm _e32 : VOPC_m <op, (outs), ins32, opName#asm32, pat32, opName, DefExec>;
>
> defm _e64 : VOP3_C_m <op, out64, ins64, opName#asm64, pat64,
> - opName, HasMods, DefExec>;
> + opName, HasMods, DefExec, revOp>;
> }
>
> // Special case for class instructions which only have modifiers on
> @@ -1274,16 +1280,17 @@ multiclass VOPC_Helper <vopc op, string opName,
> multiclass VOPC_Class_Helper <vopc op, string opName,
> dag ins32, string asm32, list<dag> pat32,
> dag out64, dag ins64, string asm64, list<dag> pat64,
> - bit HasMods, bit DefExec> {
> + bit HasMods, bit DefExec, string revOp> {
> defm _e32 : VOPC_m <op, (outs), ins32, opName#asm32, pat32, opName, DefExec>;
>
> defm _e64 : VOP3_C_m <op, out64, ins64, opName#asm64, pat64,
> - opName, HasMods, DefExec>,
> + opName, HasMods, DefExec, revOp>,
> VOP3DisableModFields<1, 0, 0>;
> }
>
> multiclass VOPCInst <vopc op, string opName,
> VOPProfile P, PatLeaf cond = COND_NULL,
> + string revOp = opName,
> bit DefExec = 0> : VOPC_Helper <
> op, opName,
> P.Ins32, P.Asm32, [],
> @@ -1295,7 +1302,7 @@ multiclass VOPCInst <vopc op, string opName,
> (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)),
> cond))],
> [(set i1:$dst, (setcc P.Src0VT:$src0, P.Src1VT:$src1, cond))]),
> - P.HasModifiers, DefExec
> + P.HasModifiers, DefExec, revOp
> >;
>
> multiclass VOPCClassInst <vopc op, string opName, VOPProfile P,
> @@ -1307,38 +1314,39 @@ multiclass VOPCClassInst <vopc op, string opName, VOPProfile P,
> [(set i1:$dst,
> (AMDGPUfp_class (P.Src0VT (VOP3Mods0Clamp0OMod P.Src0VT:$src0, i32:$src0_modifiers)), P.Src1VT:$src1))],
> [(set i1:$dst, (AMDGPUfp_class P.Src0VT:$src0, P.Src1VT:$src1))]),
> - P.HasModifiers, DefExec
> + P.HasModifiers, DefExec, opName
> >;
>
>
> -multiclass VOPC_F32 <vopc op, string opName, PatLeaf cond = COND_NULL> :
> - VOPCInst <op, opName, VOP_F32_F32_F32, cond>;
> +multiclass VOPC_F32 <vopc op, string opName, PatLeaf cond = COND_NULL, string revOp = opName> :
> + VOPCInst <op, opName, VOP_F32_F32_F32, cond, revOp>;
>
> -multiclass VOPC_F64 <vopc op, string opName, PatLeaf cond = COND_NULL> :
> - VOPCInst <op, opName, VOP_F64_F64_F64, cond>;
> +multiclass VOPC_F64 <vopc op, string opName, PatLeaf cond = COND_NULL, string revOp = opName> :
> + VOPCInst <op, opName, VOP_F64_F64_F64, cond, revOp>;
>
> -multiclass VOPC_I32 <vopc op, string opName, PatLeaf cond = COND_NULL> :
> - VOPCInst <op, opName, VOP_I32_I32_I32, cond>;
> +multiclass VOPC_I32 <vopc op, string opName, PatLeaf cond = COND_NULL, string revOp = opName> :
> + VOPCInst <op, opName, VOP_I32_I32_I32, cond, revOp>;
>
> -multiclass VOPC_I64 <vopc op, string opName, PatLeaf cond = COND_NULL> :
> - VOPCInst <op, opName, VOP_I64_I64_I64, cond>;
> +multiclass VOPC_I64 <vopc op, string opName, PatLeaf cond = COND_NULL, string revOp = opName> :
> + VOPCInst <op, opName, VOP_I64_I64_I64, cond, revOp>;
>
>
> multiclass VOPCX <vopc op, string opName, VOPProfile P,
> - PatLeaf cond = COND_NULL>
> - : VOPCInst <op, opName, P, cond, 1>;
> + PatLeaf cond = COND_NULL,
> + string revOp = "">
> + : VOPCInst <op, opName, P, cond, revOp, 1>;
>
> -multiclass VOPCX_F32 <vopc op, string opName> :
> - VOPCX <op, opName, VOP_F32_F32_F32, COND_NULL>;
> +multiclass VOPCX_F32 <vopc op, string opName, string revOp = opName> :
> + VOPCX <op, opName, VOP_F32_F32_F32, COND_NULL, revOp>;
>
> -multiclass VOPCX_F64 <vopc op, string opName> :
> - VOPCX <op, opName, VOP_F64_F64_F64, COND_NULL>;
> +multiclass VOPCX_F64 <vopc op, string opName, string revOp = opName> :
> + VOPCX <op, opName, VOP_F64_F64_F64, COND_NULL, revOp>;
>
> -multiclass VOPCX_I32 <vopc op, string opName> :
> - VOPCX <op, opName, VOP_I32_I32_I32, COND_NULL>;
> +multiclass VOPCX_I32 <vopc op, string opName, string revOp = opName> :
> + VOPCX <op, opName, VOP_I32_I32_I32, COND_NULL, revOp>;
>
> -multiclass VOPCX_I64 <vopc op, string opName> :
> - VOPCX <op, opName, VOP_I64_I64_I64, COND_NULL>;
> +multiclass VOPCX_I64 <vopc op, string opName, string revOp = opName> :
> + VOPCX <op, opName, VOP_I64_I64_I64, COND_NULL, revOp>;
>
> multiclass VOP3_Helper <vop3 op, string opName, dag outs, dag ins, string asm,
> list<dag> pat, int NumSrcArgs, bit HasMods> : VOP3_m <
> @@ -2197,15 +2205,6 @@ def getVOPe32 : InstrMapping {
> let ValueCols = [["4"]];
> }
>
> -// Maps an original opcode to its commuted version
> -def getCommuteRev : InstrMapping {
> - let FilterClass = "VOP2_REV";
> - let RowFields = ["RevOp"];
> - let ColFields = ["IsOrig"];
> - let KeyCol = ["1"];
> - let ValueCols = [["0"]];
> -}
> -
> def getMaskedMIMGOp : InstrMapping {
> let FilterClass = "MIMG_Mask";
> let RowFields = ["Op"];
> @@ -2223,6 +2222,33 @@ def getCommuteOrig : InstrMapping {
> let ValueCols = [["1"]];
> }
>
> +// Maps an original opcode to its commuted version
> +def getCommuteRev : InstrMapping {
> + let FilterClass = "VOP2_REV";
> + let RowFields = ["RevOp"];
> + let ColFields = ["IsOrig"];
> + let KeyCol = ["1"];
> + let ValueCols = [["0"]];
> +}
> +
> +def getCommuteCmpOrig : InstrMapping {
> + let FilterClass = "VOPC_REV";
> + let RowFields = ["RevOp"];
> + let ColFields = ["IsOrig"];
> + let KeyCol = ["0"];
> + let ValueCols = [["1"]];
> +}
> +
> +// Maps an original opcode to its commuted version
> +def getCommuteCmpRev : InstrMapping {
> + let FilterClass = "VOPC_REV";
> + let RowFields = ["RevOp"];
> + let ColFields = ["IsOrig"];
> + let KeyCol = ["1"];
> + let ValueCols = [["0"]];
> +}
> +
> +
> def getMCOpcodeGen : InstrMapping {
> let FilterClass = "SIMCInstr";
> let RowFields = ["PseudoInstr"];
> diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td
> index a175d86..95a0a0b 100644
> --- a/lib/Target/R600/SIInstructions.td
> +++ b/lib/Target/R600/SIInstructions.td
> @@ -509,29 +509,31 @@ def S_TTRACEDATA : SOPP <0x00000016, (ins), "s_ttracedata"> {
>
> let isCompare = 1 in {
>
> +let isCommutable = 1 in {
> defm V_CMP_F_F32 : VOPC_F32 <vopc<0x0, 0x40>, "v_cmp_f_f32">;
> -defm V_CMP_LT_F32 : VOPC_F32 <vopc<0x1, 0x41>, "v_cmp_lt_f32", COND_OLT>;
> +defm V_CMP_LT_F32 : VOPC_F32 <vopc<0x1, 0x41>, "v_cmp_lt_f32", COND_OLT, "v_cmp_gt_f32">;
> defm V_CMP_EQ_F32 : VOPC_F32 <vopc<0x2, 0x42>, "v_cmp_eq_f32", COND_OEQ>;
> -defm V_CMP_LE_F32 : VOPC_F32 <vopc<0x3, 0x43>, "v_cmp_le_f32", COND_OLE>;
> +defm V_CMP_LE_F32 : VOPC_F32 <vopc<0x3, 0x43>, "v_cmp_le_f32", COND_OLE, "v_cmp_ge_f32">;
> defm V_CMP_GT_F32 : VOPC_F32 <vopc<0x4, 0x44>, "v_cmp_gt_f32", COND_OGT>;
> defm V_CMP_LG_F32 : VOPC_F32 <vopc<0x5, 0x45>, "v_cmp_lg_f32", COND_ONE>;
> defm V_CMP_GE_F32 : VOPC_F32 <vopc<0x6, 0x46>, "v_cmp_ge_f32", COND_OGE>;
> defm V_CMP_O_F32 : VOPC_F32 <vopc<0x7, 0x47>, "v_cmp_o_f32", COND_O>;
> defm V_CMP_U_F32 : VOPC_F32 <vopc<0x8, 0x48>, "v_cmp_u_f32", COND_UO>;
> -defm V_CMP_NGE_F32 : VOPC_F32 <vopc<0x9, 0x49>, "v_cmp_nge_f32", COND_ULT>;
> +defm V_CMP_NGE_F32 : VOPC_F32 <vopc<0x9, 0x49>, "v_cmp_nge_f32", COND_ULT, "v_cmp_nle_f32">;
> defm V_CMP_NLG_F32 : VOPC_F32 <vopc<0xa, 0x4a>, "v_cmp_nlg_f32", COND_UEQ>;
> -defm V_CMP_NGT_F32 : VOPC_F32 <vopc<0xb, 0x4b>, "v_cmp_ngt_f32", COND_ULE>;
> +defm V_CMP_NGT_F32 : VOPC_F32 <vopc<0xb, 0x4b>, "v_cmp_ngt_f32", COND_ULE, "v_cmp_nlt_f32">;
> defm V_CMP_NLE_F32 : VOPC_F32 <vopc<0xc, 0x4c>, "v_cmp_nle_f32", COND_UGT>;
> defm V_CMP_NEQ_F32 : VOPC_F32 <vopc<0xd, 0x4d>, "v_cmp_neq_f32", COND_UNE>;
> defm V_CMP_NLT_F32 : VOPC_F32 <vopc<0xe, 0x4e>, "v_cmp_nlt_f32", COND_UGE>;
> defm V_CMP_TRU_F32 : VOPC_F32 <vopc<0xf, 0x4f>, "v_cmp_tru_f32">;
> +} // End isCommutable = 1
>
> -let hasSideEffects = 1 in {
> +let hasSideEffects = 1, isCommutable = 1 in {
>
> defm V_CMPX_F_F32 : VOPCX_F32 <vopc<0x10, 0x50>, "v_cmpx_f_f32">;
> -defm V_CMPX_LT_F32 : VOPCX_F32 <vopc<0x11, 0x51>, "v_cmpx_lt_f32">;
> +defm V_CMPX_LT_F32 : VOPCX_F32 <vopc<0x11, 0x51>, "v_cmpx_lt_f32", "v_cmpx_gt_f32">;
> defm V_CMPX_EQ_F32 : VOPCX_F32 <vopc<0x12, 0x52>, "v_cmpx_eq_f32">;
> -defm V_CMPX_LE_F32 : VOPCX_F32 <vopc<0x13, 0x53>, "v_cmpx_le_f32">;
> +defm V_CMPX_LE_F32 : VOPCX_F32 <vopc<0x13, 0x53>, "v_cmpx_le_f32", "v_cmpx_ge_f32">;
> defm V_CMPX_GT_F32 : VOPCX_F32 <vopc<0x14, 0x54>, "v_cmpx_gt_f32">;
> defm V_CMPX_LG_F32 : VOPCX_F32 <vopc<0x15, 0x55>, "v_cmpx_lg_f32">;
> defm V_CMPX_GE_F32 : VOPCX_F32 <vopc<0x16, 0x56>, "v_cmpx_ge_f32">;
> @@ -545,141 +547,149 @@ defm V_CMPX_NEQ_F32 : VOPCX_F32 <vopc<0x1d, 0x5d>, "v_cmpx_neq_f32">;
> defm V_CMPX_NLT_F32 : VOPCX_F32 <vopc<0x1e, 0x5e>, "v_cmpx_nlt_f32">;
> defm V_CMPX_TRU_F32 : VOPCX_F32 <vopc<0x1f, 0x5f>, "v_cmpx_tru_f32">;
>
> -} // End hasSideEffects = 1
> +} // End hasSideEffects = 1, isCommutable = 1
>
> +let isCommutable = 1 in {
> defm V_CMP_F_F64 : VOPC_F64 <vopc<0x20, 0x60>, "v_cmp_f_f64">;
> -defm V_CMP_LT_F64 : VOPC_F64 <vopc<0x21, 0x61>, "v_cmp_lt_f64", COND_OLT>;
> +defm V_CMP_LT_F64 : VOPC_F64 <vopc<0x21, 0x61>, "v_cmp_lt_f64", COND_OLT, "v_cmp_gt_f64">;
> defm V_CMP_EQ_F64 : VOPC_F64 <vopc<0x22, 0x62>, "v_cmp_eq_f64", COND_OEQ>;
> -defm V_CMP_LE_F64 : VOPC_F64 <vopc<0x23, 0x63>, "v_cmp_le_f64", COND_OLE>;
> +defm V_CMP_LE_F64 : VOPC_F64 <vopc<0x23, 0x63>, "v_cmp_le_f64", COND_OLE, "v_cmp_ge_f64">;
> defm V_CMP_GT_F64 : VOPC_F64 <vopc<0x24, 0x64>, "v_cmp_gt_f64", COND_OGT>;
> defm V_CMP_LG_F64 : VOPC_F64 <vopc<0x25, 0x65>, "v_cmp_lg_f64", COND_ONE>;
> defm V_CMP_GE_F64 : VOPC_F64 <vopc<0x26, 0x66>, "v_cmp_ge_f64", COND_OGE>;
> defm V_CMP_O_F64 : VOPC_F64 <vopc<0x27, 0x67>, "v_cmp_o_f64", COND_O>;
> defm V_CMP_U_F64 : VOPC_F64 <vopc<0x28, 0x68>, "v_cmp_u_f64", COND_UO>;
> -defm V_CMP_NGE_F64 : VOPC_F64 <vopc<0x29, 0x69>, "v_cmp_nge_f64", COND_ULT>;
> +defm V_CMP_NGE_F64 : VOPC_F64 <vopc<0x29, 0x69>, "v_cmp_nge_f64", COND_ULT, "v_cmp_nle_f64">;
> defm V_CMP_NLG_F64 : VOPC_F64 <vopc<0x2a, 0x6a>, "v_cmp_nlg_f64", COND_UEQ>;
> -defm V_CMP_NGT_F64 : VOPC_F64 <vopc<0x2b, 0x6b>, "v_cmp_ngt_f64", COND_ULE>;
> +defm V_CMP_NGT_F64 : VOPC_F64 <vopc<0x2b, 0x6b>, "v_cmp_ngt_f64", COND_ULE, "v_cmp_nlt_f64">;
> defm V_CMP_NLE_F64 : VOPC_F64 <vopc<0x2c, 0x6c>, "v_cmp_nle_f64", COND_UGT>;
> defm V_CMP_NEQ_F64 : VOPC_F64 <vopc<0x2d, 0x6d>, "v_cmp_neq_f64", COND_UNE>;
> defm V_CMP_NLT_F64 : VOPC_F64 <vopc<0x2e, 0x6e>, "v_cmp_nlt_f64", COND_UGE>;
> defm V_CMP_TRU_F64 : VOPC_F64 <vopc<0x2f, 0x6f>, "v_cmp_tru_f64">;
> +} // End isCommutable = 1
>
> -let hasSideEffects = 1 in {
> +let hasSideEffects = 1, isCommutable = 1 in {
>
> defm V_CMPX_F_F64 : VOPCX_F64 <vopc<0x30, 0x70>, "v_cmpx_f_f64">;
> -defm V_CMPX_LT_F64 : VOPCX_F64 <vopc<0x31, 0x71>, "v_cmpx_lt_f64">;
> +defm V_CMPX_LT_F64 : VOPCX_F64 <vopc<0x31, 0x71>, "v_cmpx_lt_f64", "v_cmpx_gt_f64">;
> defm V_CMPX_EQ_F64 : VOPCX_F64 <vopc<0x32, 0x72>, "v_cmpx_eq_f64">;
> -defm V_CMPX_LE_F64 : VOPCX_F64 <vopc<0x33, 0x73>, "v_cmpx_le_f64">;
> +defm V_CMPX_LE_F64 : VOPCX_F64 <vopc<0x33, 0x73>, "v_cmpx_le_f64", "v_cmpx_ge_f64">;
> defm V_CMPX_GT_F64 : VOPCX_F64 <vopc<0x34, 0x74>, "v_cmpx_gt_f64">;
> defm V_CMPX_LG_F64 : VOPCX_F64 <vopc<0x35, 0x75>, "v_cmpx_lg_f64">;
> defm V_CMPX_GE_F64 : VOPCX_F64 <vopc<0x36, 0x76>, "v_cmpx_ge_f64">;
> defm V_CMPX_O_F64 : VOPCX_F64 <vopc<0x37, 0x77>, "v_cmpx_o_f64">;
> defm V_CMPX_U_F64 : VOPCX_F64 <vopc<0x38, 0x78>, "v_cmpx_u_f64">;
> -defm V_CMPX_NGE_F64 : VOPCX_F64 <vopc<0x39, 0x79>, "v_cmpx_nge_f64">;
> +defm V_CMPX_NGE_F64 : VOPCX_F64 <vopc<0x39, 0x79>, "v_cmpx_nge_f64", "v_cmpx_nle_f64">;
> defm V_CMPX_NLG_F64 : VOPCX_F64 <vopc<0x3a, 0x7a>, "v_cmpx_nlg_f64">;
> -defm V_CMPX_NGT_F64 : VOPCX_F64 <vopc<0x3b, 0x7b>, "v_cmpx_ngt_f64">;
> +defm V_CMPX_NGT_F64 : VOPCX_F64 <vopc<0x3b, 0x7b>, "v_cmpx_ngt_f64", "v_cmpx_nlt_f64">;
> defm V_CMPX_NLE_F64 : VOPCX_F64 <vopc<0x3c, 0x7c>, "v_cmpx_nle_f64">;
> defm V_CMPX_NEQ_F64 : VOPCX_F64 <vopc<0x3d, 0x7d>, "v_cmpx_neq_f64">;
> defm V_CMPX_NLT_F64 : VOPCX_F64 <vopc<0x3e, 0x7e>, "v_cmpx_nlt_f64">;
> defm V_CMPX_TRU_F64 : VOPCX_F64 <vopc<0x3f, 0x7f>, "v_cmpx_tru_f64">;
>
> -} // End hasSideEffects = 1
> +} // End hasSideEffects = 1, isCommutable = 1
>
> let SubtargetPredicate = isSICI in {
>
> +let isCommutable = 1 in {
> defm V_CMPS_F_F32 : VOPC_F32 <vopc<0x40>, "v_cmps_f_f32">;
> -defm V_CMPS_LT_F32 : VOPC_F32 <vopc<0x41>, "v_cmps_lt_f32">;
> +defm V_CMPS_LT_F32 : VOPC_F32 <vopc<0x41>, "v_cmps_lt_f32", COND_NULL, "v_cmps_gt_f32">;
> defm V_CMPS_EQ_F32 : VOPC_F32 <vopc<0x42>, "v_cmps_eq_f32">;
> -defm V_CMPS_LE_F32 : VOPC_F32 <vopc<0x43>, "v_cmps_le_f32">;
> +defm V_CMPS_LE_F32 : VOPC_F32 <vopc<0x43>, "v_cmps_le_f32", COND_NULL, "v_cmps_ge_f32">;
> defm V_CMPS_GT_F32 : VOPC_F32 <vopc<0x44>, "v_cmps_gt_f32">;
> defm V_CMPS_LG_F32 : VOPC_F32 <vopc<0x45>, "v_cmps_lg_f32">;
> defm V_CMPS_GE_F32 : VOPC_F32 <vopc<0x46>, "v_cmps_ge_f32">;
> defm V_CMPS_O_F32 : VOPC_F32 <vopc<0x47>, "v_cmps_o_f32">;
> defm V_CMPS_U_F32 : VOPC_F32 <vopc<0x48>, "v_cmps_u_f32">;
> -defm V_CMPS_NGE_F32 : VOPC_F32 <vopc<0x49>, "v_cmps_nge_f32">;
> +defm V_CMPS_NGE_F32 : VOPC_F32 <vopc<0x49>, "v_cmps_nge_f32", COND_NULL, "v_cmps_nle_f32">;
> defm V_CMPS_NLG_F32 : VOPC_F32 <vopc<0x4a>, "v_cmps_nlg_f32">;
> -defm V_CMPS_NGT_F32 : VOPC_F32 <vopc<0x4b>, "v_cmps_ngt_f32">;
> +defm V_CMPS_NGT_F32 : VOPC_F32 <vopc<0x4b>, "v_cmps_ngt_f32", COND_NULL, "v_cmps_nlt_f32">;
> defm V_CMPS_NLE_F32 : VOPC_F32 <vopc<0x4c>, "v_cmps_nle_f32">;
> defm V_CMPS_NEQ_F32 : VOPC_F32 <vopc<0x4d>, "v_cmps_neq_f32">;
> defm V_CMPS_NLT_F32 : VOPC_F32 <vopc<0x4e>, "v_cmps_nlt_f32">;
> defm V_CMPS_TRU_F32 : VOPC_F32 <vopc<0x4f>, "v_cmps_tru_f32">;
> +} // End isCommutable = 1
>
> -let hasSideEffects = 1 in {
> +let hasSideEffects = 1, isCommutable = 1 in {
>
> defm V_CMPSX_F_F32 : VOPCX_F32 <vopc<0x50>, "v_cmpsx_f_f32">;
> -defm V_CMPSX_LT_F32 : VOPCX_F32 <vopc<0x51>, "v_cmpsx_lt_f32">;
> +defm V_CMPSX_LT_F32 : VOPCX_F32 <vopc<0x51>, "v_cmpsx_lt_f32", "v_cmpsx_gt_f32">;
> defm V_CMPSX_EQ_F32 : VOPCX_F32 <vopc<0x52>, "v_cmpsx_eq_f32">;
> -defm V_CMPSX_LE_F32 : VOPCX_F32 <vopc<0x53>, "v_cmpsx_le_f32">;
> +defm V_CMPSX_LE_F32 : VOPCX_F32 <vopc<0x53>, "v_cmpsx_le_f32", "v_cmpsx_ge_f32">;
> defm V_CMPSX_GT_F32 : VOPCX_F32 <vopc<0x54>, "v_cmpsx_gt_f32">;
> defm V_CMPSX_LG_F32 : VOPCX_F32 <vopc<0x55>, "v_cmpsx_lg_f32">;
> defm V_CMPSX_GE_F32 : VOPCX_F32 <vopc<0x56>, "v_cmpsx_ge_f32">;
> defm V_CMPSX_O_F32 : VOPCX_F32 <vopc<0x57>, "v_cmpsx_o_f32">;
> defm V_CMPSX_U_F32 : VOPCX_F32 <vopc<0x58>, "v_cmpsx_u_f32">;
> -defm V_CMPSX_NGE_F32 : VOPCX_F32 <vopc<0x59>, "v_cmpsx_nge_f32">;
> +defm V_CMPSX_NGE_F32 : VOPCX_F32 <vopc<0x59>, "v_cmpsx_nge_f32", "v_cmpsx_nle_f32">;
> defm V_CMPSX_NLG_F32 : VOPCX_F32 <vopc<0x5a>, "v_cmpsx_nlg_f32">;
> -defm V_CMPSX_NGT_F32 : VOPCX_F32 <vopc<0x5b>, "v_cmpsx_ngt_f32">;
> +defm V_CMPSX_NGT_F32 : VOPCX_F32 <vopc<0x5b>, "v_cmpsx_ngt_f32", "v_cmpsx_nlt_f32">;
> defm V_CMPSX_NLE_F32 : VOPCX_F32 <vopc<0x5c>, "v_cmpsx_nle_f32">;
> defm V_CMPSX_NEQ_F32 : VOPCX_F32 <vopc<0x5d>, "v_cmpsx_neq_f32">;
> defm V_CMPSX_NLT_F32 : VOPCX_F32 <vopc<0x5e>, "v_cmpsx_nlt_f32">;
> defm V_CMPSX_TRU_F32 : VOPCX_F32 <vopc<0x5f>, "v_cmpsx_tru_f32">;
>
> -} // End hasSideEffects = 1
> +} // End hasSideEffects = 1, isCommutable = 1
>
> +let isCommutable = 1 in {
> defm V_CMPS_F_F64 : VOPC_F64 <vopc<0x60>, "v_cmps_f_f64">;
> -defm V_CMPS_LT_F64 : VOPC_F64 <vopc<0x61>, "v_cmps_lt_f64">;
> +defm V_CMPS_LT_F64 : VOPC_F64 <vopc<0x61>, "v_cmps_lt_f64", COND_NULL, "v_cmps_gt_f64">;
> defm V_CMPS_EQ_F64 : VOPC_F64 <vopc<0x62>, "v_cmps_eq_f64">;
> -defm V_CMPS_LE_F64 : VOPC_F64 <vopc<0x63>, "v_cmps_le_f64">;
> +defm V_CMPS_LE_F64 : VOPC_F64 <vopc<0x63>, "v_cmps_le_f64", COND_NULL, "v_cmps_ge_f64">;
> defm V_CMPS_GT_F64 : VOPC_F64 <vopc<0x64>, "v_cmps_gt_f64">;
> defm V_CMPS_LG_F64 : VOPC_F64 <vopc<0x65>, "v_cmps_lg_f64">;
> defm V_CMPS_GE_F64 : VOPC_F64 <vopc<0x66>, "v_cmps_ge_f64">;
> defm V_CMPS_O_F64 : VOPC_F64 <vopc<0x67>, "v_cmps_o_f64">;
> defm V_CMPS_U_F64 : VOPC_F64 <vopc<0x68>, "v_cmps_u_f64">;
> -defm V_CMPS_NGE_F64 : VOPC_F64 <vopc<0x69>, "v_cmps_nge_f64">;
> +defm V_CMPS_NGE_F64 : VOPC_F64 <vopc<0x69>, "v_cmps_nge_f64", COND_NULL, "v_cmps_nle_f64">;
> defm V_CMPS_NLG_F64 : VOPC_F64 <vopc<0x6a>, "v_cmps_nlg_f64">;
> -defm V_CMPS_NGT_F64 : VOPC_F64 <vopc<0x6b>, "v_cmps_ngt_f64">;
> +defm V_CMPS_NGT_F64 : VOPC_F64 <vopc<0x6b>, "v_cmps_ngt_f64", COND_NULL, "v_cmps_nlt_f64">;
> defm V_CMPS_NLE_F64 : VOPC_F64 <vopc<0x6c>, "v_cmps_nle_f64">;
> defm V_CMPS_NEQ_F64 : VOPC_F64 <vopc<0x6d>, "v_cmps_neq_f64">;
> defm V_CMPS_NLT_F64 : VOPC_F64 <vopc<0x6e>, "v_cmps_nlt_f64">;
> defm V_CMPS_TRU_F64 : VOPC_F64 <vopc<0x6f>, "v_cmps_tru_f64">;
> +} // End isCommutable = 1
>
> -let hasSideEffects = 1 in {
> +let hasSideEffects = 1, isCommutable = 1 in {
>
> defm V_CMPSX_F_F64 : VOPCX_F64 <vopc<0x70>, "v_cmpsx_f_f64">;
> -defm V_CMPSX_LT_F64 : VOPCX_F64 <vopc<0x71>, "v_cmpsx_lt_f64">;
> +defm V_CMPSX_LT_F64 : VOPCX_F64 <vopc<0x71>, "v_cmpsx_lt_f64", "v_cmpsx_gt_f64">;
> defm V_CMPSX_EQ_F64 : VOPCX_F64 <vopc<0x72>, "v_cmpsx_eq_f64">;
> -defm V_CMPSX_LE_F64 : VOPCX_F64 <vopc<0x73>, "v_cmpsx_le_f64">;
> +defm V_CMPSX_LE_F64 : VOPCX_F64 <vopc<0x73>, "v_cmpsx_le_f64", "v_cmpsx_ge_f64">;
> defm V_CMPSX_GT_F64 : VOPCX_F64 <vopc<0x74>, "v_cmpsx_gt_f64">;
> defm V_CMPSX_LG_F64 : VOPCX_F64 <vopc<0x75>, "v_cmpsx_lg_f64">;
> defm V_CMPSX_GE_F64 : VOPCX_F64 <vopc<0x76>, "v_cmpsx_ge_f64">;
> defm V_CMPSX_O_F64 : VOPCX_F64 <vopc<0x77>, "v_cmpsx_o_f64">;
> defm V_CMPSX_U_F64 : VOPCX_F64 <vopc<0x78>, "v_cmpsx_u_f64">;
> -defm V_CMPSX_NGE_F64 : VOPCX_F64 <vopc<0x79>, "v_cmpsx_nge_f64">;
> +defm V_CMPSX_NGE_F64 : VOPCX_F64 <vopc<0x79>, "v_cmpsx_nge_f64", "v_cmpsx_nle_f64">;
> defm V_CMPSX_NLG_F64 : VOPCX_F64 <vopc<0x7a>, "v_cmpsx_nlg_f64">;
> -defm V_CMPSX_NGT_F64 : VOPCX_F64 <vopc<0x7b>, "v_cmpsx_ngt_f64">;
> +defm V_CMPSX_NGT_F64 : VOPCX_F64 <vopc<0x7b>, "v_cmpsx_ngt_f64", "v_cmpsx_nlt_f64">;
> defm V_CMPSX_NLE_F64 : VOPCX_F64 <vopc<0x7c>, "v_cmpsx_nle_f64">;
> defm V_CMPSX_NEQ_F64 : VOPCX_F64 <vopc<0x7d>, "v_cmpsx_neq_f64">;
> defm V_CMPSX_NLT_F64 : VOPCX_F64 <vopc<0x7e>, "v_cmpsx_nlt_f64">;
> defm V_CMPSX_TRU_F64 : VOPCX_F64 <vopc<0x7f>, "v_cmpsx_tru_f64">;
>
> -} // End hasSideEffects = 1
> +} // End hasSideEffects = 1, isCommutable = 1
>
> } // End SubtargetPredicate = isSICI
>
> +let isCommutable = 1 in {
> defm V_CMP_F_I32 : VOPC_I32 <vopc<0x80, 0xc0>, "v_cmp_f_i32">;
> -defm V_CMP_LT_I32 : VOPC_I32 <vopc<0x81, 0xc1>, "v_cmp_lt_i32", COND_SLT>;
> +defm V_CMP_LT_I32 : VOPC_I32 <vopc<0x81, 0xc1>, "v_cmp_lt_i32", COND_SLT, "v_cmp_gt_i32">;
> defm V_CMP_EQ_I32 : VOPC_I32 <vopc<0x82, 0xc2>, "v_cmp_eq_i32", COND_EQ>;
> -defm V_CMP_LE_I32 : VOPC_I32 <vopc<0x83, 0xc3>, "v_cmp_le_i32", COND_SLE>;
> +defm V_CMP_LE_I32 : VOPC_I32 <vopc<0x83, 0xc3>, "v_cmp_le_i32", COND_SLE, "v_cmp_ge_i32">;
> defm V_CMP_GT_I32 : VOPC_I32 <vopc<0x84, 0xc4>, "v_cmp_gt_i32", COND_SGT>;
> defm V_CMP_NE_I32 : VOPC_I32 <vopc<0x85, 0xc5>, "v_cmp_ne_i32", COND_NE>;
> defm V_CMP_GE_I32 : VOPC_I32 <vopc<0x86, 0xc6>, "v_cmp_ge_i32", COND_SGE>;
> defm V_CMP_T_I32 : VOPC_I32 <vopc<0x87, 0xc7>, "v_cmp_t_i32">;
> +} // End isCommutable = 1
>
> -let hasSideEffects = 1 in {
> +let hasSideEffects = 1, isCommutable = 1 in {
>
> defm V_CMPX_F_I32 : VOPCX_I32 <vopc<0x90, 0xd0>, "v_cmpx_f_i32">;
> -defm V_CMPX_LT_I32 : VOPCX_I32 <vopc<0x91, 0xd1>, "v_cmpx_lt_i32">;
> +defm V_CMPX_LT_I32 : VOPCX_I32 <vopc<0x91, 0xd1>, "v_cmpx_lt_i32", "v_cmpx_gt_i32">;
> defm V_CMPX_EQ_I32 : VOPCX_I32 <vopc<0x92, 0xd2>, "v_cmpx_eq_i32">;
> -defm V_CMPX_LE_I32 : VOPCX_I32 <vopc<0x93, 0xd3>, "v_cmpx_le_i32">;
> +defm V_CMPX_LE_I32 : VOPCX_I32 <vopc<0x93, 0xd3>, "v_cmpx_le_i32", "v_cmpx_ge_i32">;
> defm V_CMPX_GT_I32 : VOPCX_I32 <vopc<0x94, 0xd4>, "v_cmpx_gt_i32">;
> defm V_CMPX_NE_I32 : VOPCX_I32 <vopc<0x95, 0xd5>, "v_cmpx_ne_i32">;
> defm V_CMPX_GE_I32 : VOPCX_I32 <vopc<0x96, 0xd6>, "v_cmpx_ge_i32">;
> @@ -687,71 +697,77 @@ defm V_CMPX_T_I32 : VOPCX_I32 <vopc<0x97, 0xd7>, "v_cmpx_t_i32">;
>
> } // End hasSideEffects = 1
>
> +let isCommutable = 1 in {
> defm V_CMP_F_I64 : VOPC_I64 <vopc<0xa0, 0xe0>, "v_cmp_f_i64">;
> -defm V_CMP_LT_I64 : VOPC_I64 <vopc<0xa1, 0xe1>, "v_cmp_lt_i64", COND_SLT>;
> +defm V_CMP_LT_I64 : VOPC_I64 <vopc<0xa1, 0xe1>, "v_cmp_lt_i64", COND_SLT, "v_cmp_gt_i64">;
> defm V_CMP_EQ_I64 : VOPC_I64 <vopc<0xa2, 0xe2>, "v_cmp_eq_i64", COND_EQ>;
> -defm V_CMP_LE_I64 : VOPC_I64 <vopc<0xa3, 0xe3>, "v_cmp_le_i64", COND_SLE>;
> +defm V_CMP_LE_I64 : VOPC_I64 <vopc<0xa3, 0xe3>, "v_cmp_le_i64", COND_SLE, "v_cmp_ge_i64">;
> defm V_CMP_GT_I64 : VOPC_I64 <vopc<0xa4, 0xe4>, "v_cmp_gt_i64", COND_SGT>;
> defm V_CMP_NE_I64 : VOPC_I64 <vopc<0xa5, 0xe5>, "v_cmp_ne_i64", COND_NE>;
> defm V_CMP_GE_I64 : VOPC_I64 <vopc<0xa6, 0xe6>, "v_cmp_ge_i64", COND_SGE>;
> defm V_CMP_T_I64 : VOPC_I64 <vopc<0xa7, 0xe7>, "v_cmp_t_i64">;
> +} // End isCommutable = 1
>
> -let hasSideEffects = 1 in {
> +let hasSideEffects = 1, isCommutable = 1 in {
>
> defm V_CMPX_F_I64 : VOPCX_I64 <vopc<0xb0, 0xf0>, "v_cmpx_f_i64">;
> -defm V_CMPX_LT_I64 : VOPCX_I64 <vopc<0xb1, 0xf1>, "v_cmpx_lt_i64">;
> +defm V_CMPX_LT_I64 : VOPCX_I64 <vopc<0xb1, 0xf1>, "v_cmpx_lt_i64", "v_cmpx_gt_i64">;
> defm V_CMPX_EQ_I64 : VOPCX_I64 <vopc<0xb2, 0xf2>, "v_cmpx_eq_i64">;
> -defm V_CMPX_LE_I64 : VOPCX_I64 <vopc<0xb3, 0xf3>, "v_cmpx_le_i64">;
> +defm V_CMPX_LE_I64 : VOPCX_I64 <vopc<0xb3, 0xf3>, "v_cmpx_le_i64", "v_cmpx_ge_i64">;
> defm V_CMPX_GT_I64 : VOPCX_I64 <vopc<0xb4, 0xf4>, "v_cmpx_gt_i64">;
> defm V_CMPX_NE_I64 : VOPCX_I64 <vopc<0xb5, 0xf5>, "v_cmpx_ne_i64">;
> defm V_CMPX_GE_I64 : VOPCX_I64 <vopc<0xb6, 0xf6>, "v_cmpx_ge_i64">;
> defm V_CMPX_T_I64 : VOPCX_I64 <vopc<0xb7, 0xf7>, "v_cmpx_t_i64">;
>
> -} // End hasSideEffects = 1
> +} // End hasSideEffects = 1, isCommutable = 1
>
> +let isCommutable = 1 in {
> defm V_CMP_F_U32 : VOPC_I32 <vopc<0xc0, 0xc8>, "v_cmp_f_u32">;
> -defm V_CMP_LT_U32 : VOPC_I32 <vopc<0xc1, 0xc9>, "v_cmp_lt_u32", COND_ULT>;
> +defm V_CMP_LT_U32 : VOPC_I32 <vopc<0xc1, 0xc9>, "v_cmp_lt_u32", COND_ULT, "v_cmp_gt_u32">;
> defm V_CMP_EQ_U32 : VOPC_I32 <vopc<0xc2, 0xca>, "v_cmp_eq_u32", COND_EQ>;
> -defm V_CMP_LE_U32 : VOPC_I32 <vopc<0xc3, 0xcb>, "v_cmp_le_u32", COND_ULE>;
> +defm V_CMP_LE_U32 : VOPC_I32 <vopc<0xc3, 0xcb>, "v_cmp_le_u32", COND_ULE, "v_cmg_ge_u32">;
Typo: v_cmg_ge_u32 -> v_cmp_ge_u32
With that fixed. The 7 patches in this branch LGTM, go ahead and commit:
https://github.com/arsenm/llvm/commits/commute-compares
-Tom
> defm V_CMP_GT_U32 : VOPC_I32 <vopc<0xc4, 0xcc>, "v_cmp_gt_u32", COND_UGT>;
> defm V_CMP_NE_U32 : VOPC_I32 <vopc<0xc5, 0xcd>, "v_cmp_ne_u32", COND_NE>;
> defm V_CMP_GE_U32 : VOPC_I32 <vopc<0xc6, 0xce>, "v_cmp_ge_u32", COND_UGE>;
> defm V_CMP_T_U32 : VOPC_I32 <vopc<0xc7, 0xcf>, "v_cmp_t_u32">;
> +} // End isCommutable = 1
>
> -let hasSideEffects = 1 in {
> +let hasSideEffects = 1, isCommutable = 1 in {
>
> defm V_CMPX_F_U32 : VOPCX_I32 <vopc<0xd0, 0xd8>, "v_cmpx_f_u32">;
> -defm V_CMPX_LT_U32 : VOPCX_I32 <vopc<0xd1, 0xd9>, "v_cmpx_lt_u32">;
> +defm V_CMPX_LT_U32 : VOPCX_I32 <vopc<0xd1, 0xd9>, "v_cmpx_lt_u32", "v_cmpx_gt_u32">;
> defm V_CMPX_EQ_U32 : VOPCX_I32 <vopc<0xd2, 0xda>, "v_cmpx_eq_u32">;
> -defm V_CMPX_LE_U32 : VOPCX_I32 <vopc<0xd3, 0xdb>, "v_cmpx_le_u32">;
> +defm V_CMPX_LE_U32 : VOPCX_I32 <vopc<0xd3, 0xdb>, "v_cmpx_le_u32", "v_cmpx_le_u32">;
> defm V_CMPX_GT_U32 : VOPCX_I32 <vopc<0xd4, 0xdc>, "v_cmpx_gt_u32">;
> defm V_CMPX_NE_U32 : VOPCX_I32 <vopc<0xd5, 0xdd>, "v_cmpx_ne_u32">;
> defm V_CMPX_GE_U32 : VOPCX_I32 <vopc<0xd6, 0xde>, "v_cmpx_ge_u32">;
> defm V_CMPX_T_U32 : VOPCX_I32 <vopc<0xd7, 0xdf>, "v_cmpx_t_u32">;
>
> -} // End hasSideEffects = 1
> +} // End hasSideEffects = 1, isCommutable = 1
>
> +let isCommutable = 1 in {
> defm V_CMP_F_U64 : VOPC_I64 <vopc<0xe0, 0xe8>, "v_cmp_f_u64">;
> -defm V_CMP_LT_U64 : VOPC_I64 <vopc<0xe1, 0xe9>, "v_cmp_lt_u64", COND_ULT>;
> +defm V_CMP_LT_U64 : VOPC_I64 <vopc<0xe1, 0xe9>, "v_cmp_lt_u64", COND_ULT, "v_cmp_gt_u64">;
> defm V_CMP_EQ_U64 : VOPC_I64 <vopc<0xe2, 0xea>, "v_cmp_eq_u64", COND_EQ>;
> -defm V_CMP_LE_U64 : VOPC_I64 <vopc<0xe3, 0xeb>, "v_cmp_le_u64", COND_ULE>;
> +defm V_CMP_LE_U64 : VOPC_I64 <vopc<0xe3, 0xeb>, "v_cmp_le_u64", COND_ULE, "v_cmp_ge_u64">;
> defm V_CMP_GT_U64 : VOPC_I64 <vopc<0xe4, 0xec>, "v_cmp_gt_u64", COND_UGT>;
> defm V_CMP_NE_U64 : VOPC_I64 <vopc<0xe5, 0xed>, "v_cmp_ne_u64", COND_NE>;
> defm V_CMP_GE_U64 : VOPC_I64 <vopc<0xe6, 0xee>, "v_cmp_ge_u64", COND_UGE>;
> defm V_CMP_T_U64 : VOPC_I64 <vopc<0xe7, 0xef>, "v_cmp_t_u64">;
> +} // End isCommutable = 1
>
> -let hasSideEffects = 1 in {
> +let hasSideEffects = 1, isCommutable = 1 in {
>
> defm V_CMPX_F_U64 : VOPCX_I64 <vopc<0xf0, 0xf8>, "v_cmpx_f_u64">;
> -defm V_CMPX_LT_U64 : VOPCX_I64 <vopc<0xf1, 0xf9>, "v_cmpx_lt_u64">;
> +defm V_CMPX_LT_U64 : VOPCX_I64 <vopc<0xf1, 0xf9>, "v_cmpx_lt_u64", "v_cmpx_gt_u64">;
> defm V_CMPX_EQ_U64 : VOPCX_I64 <vopc<0xf2, 0xfa>, "v_cmpx_eq_u64">;
> -defm V_CMPX_LE_U64 : VOPCX_I64 <vopc<0xf3, 0xfb>, "v_cmpx_le_u64">;
> +defm V_CMPX_LE_U64 : VOPCX_I64 <vopc<0xf3, 0xfb>, "v_cmpx_le_u64", "v_cmpx_ge_u64">;
> defm V_CMPX_GT_U64 : VOPCX_I64 <vopc<0xf4, 0xfc>, "v_cmpx_gt_u64">;
> defm V_CMPX_NE_U64 : VOPCX_I64 <vopc<0xf5, 0xfd>, "v_cmpx_ne_u64">;
> defm V_CMPX_GE_U64 : VOPCX_I64 <vopc<0xf6, 0xfe>, "v_cmpx_ge_u64">;
> defm V_CMPX_T_U64 : VOPCX_I64 <vopc<0xf7, 0xff>, "v_cmpx_t_u64">;
>
> -} // End hasSideEffects = 1
> +} // End hasSideEffects = 1, isCommutable = 1
>
> defm V_CMP_CLASS_F32 : VOPC_CLASS_F32 <vopc<0x88, 0x10>, "v_cmp_class_f32">;
>
> diff --git a/test/CodeGen/R600/commute-compares.ll b/test/CodeGen/R600/commute-compares.ll
> new file mode 100644
> index 0000000..3176604
> --- /dev/null
> +++ b/test/CodeGen/R600/commute-compares.ll
> @@ -0,0 +1,697 @@
> +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
> +
> +declare i32 @llvm.r600.read.tidig.x() #0
> +
> +; --------------------------------------------------------------------------------
> +; i32 compares
> +; --------------------------------------------------------------------------------
> +
> +; GCN-LABEL: {{^}}commute_eq_64_i32:
> +; GCN: v_cmp_eq_i32_e32 vcc, 64, v{{[0-9]+}}
> +define void @commute_eq_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i32, i32 addrspace(1)* %gep.in
> + %cmp = icmp eq i32 %val, 64
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ne_64_i32:
> +; GCN: v_cmp_ne_i32_e32 vcc, 64, v{{[0-9]+}}
> +define void @commute_ne_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i32, i32 addrspace(1)* %gep.in
> + %cmp = icmp ne i32 %val, 64
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; FIXME: Why isn't this being folded as a constant?
> +; GCN-LABEL: {{^}}commute_ne_litk_i32:
> +; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x3039
> +; GCN: v_cmp_ne_i32_e32 vcc, [[K]], v{{[0-9]+}}
> +define void @commute_ne_litk_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i32, i32 addrspace(1)* %gep.in
> + %cmp = icmp ne i32 %val, 12345
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ugt_64_i32:
> +; GCN: v_cmp_lt_u32_e32 vcc, 64, v{{[0-9]+}}
> +define void @commute_ugt_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i32, i32 addrspace(1)* %gep.in
> + %cmp = icmp ugt i32 %val, 64
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_uge_64_i32:
> +; GCN: v_cmp_lt_u32_e32 vcc, 63, v{{[0-9]+}}
> +define void @commute_uge_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i32, i32 addrspace(1)* %gep.in
> + %cmp = icmp uge i32 %val, 64
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ult_64_i32:
> +; GCN: v_cmp_gt_u32_e32 vcc, 64, v{{[0-9]+}}
> +define void @commute_ult_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i32, i32 addrspace(1)* %gep.in
> + %cmp = icmp ult i32 %val, 64
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ule_63_i32:
> +; GCN: v_cmp_gt_u32_e32 vcc, 64, v{{[0-9]+}}
> +define void @commute_ule_63_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i32, i32 addrspace(1)* %gep.in
> + %cmp = icmp ule i32 %val, 63
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; FIXME: Undo canonicalization to gt (x + 1) since it doesn't use the inline imm
> +
> +; GCN-LABEL: {{^}}commute_ule_64_i32:
> +; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x41{{$}}
> +; GCN: v_cmp_gt_u32_e32 vcc, [[K]], v{{[0-9]+}}
> +define void @commute_ule_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i32, i32 addrspace(1)* %gep.in
> + %cmp = icmp ule i32 %val, 64
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_sgt_neg1_i32:
> +; GCN: v_cmp_lt_i32_e32 vcc, -1, v{{[0-9]+}}
> +define void @commute_sgt_neg1_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i32, i32 addrspace(1)* %gep.in
> + %cmp = icmp sgt i32 %val, -1
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_sge_neg2_i32:
> +; GCN: v_cmp_lt_i32_e32 vcc, -3, v{{[0-9]+}}
> +define void @commute_sge_neg2_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i32, i32 addrspace(1)* %gep.in
> + %cmp = icmp sge i32 %val, -2
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_slt_neg16_i32:
> +; GCN: v_cmp_gt_i32_e32 vcc, -16, v{{[0-9]+}}
> +define void @commute_slt_neg16_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i32, i32 addrspace(1)* %gep.in
> + %cmp = icmp slt i32 %val, -16
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_sle_5_i32:
> +; GCN: v_cmp_gt_i32_e32 vcc, 6, v{{[0-9]+}}
> +define void @commute_sle_5_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i32, i32 addrspace(1)* %gep.in
> + %cmp = icmp sle i32 %val, 5
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; --------------------------------------------------------------------------------
> +; i64 compares
> +; --------------------------------------------------------------------------------
> +
> +; GCN-LABEL: {{^}}commute_eq_64_i64:
> +; GCN: v_cmp_eq_i64_e32 vcc, 64, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_eq_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i64, i64 addrspace(1)* %gep.in
> + %cmp = icmp eq i64 %val, 64
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ne_64_i64:
> +; GCN: v_cmp_ne_i64_e32 vcc, 64, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_ne_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i64, i64 addrspace(1)* %gep.in
> + %cmp = icmp ne i64 %val, 64
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ugt_64_i64:
> +; GCN: v_cmp_lt_u64_e32 vcc, 64, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_ugt_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i64, i64 addrspace(1)* %gep.in
> + %cmp = icmp ugt i64 %val, 64
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_uge_64_i64:
> +; GCN: v_cmp_lt_u64_e32 vcc, 63, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_uge_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i64, i64 addrspace(1)* %gep.in
> + %cmp = icmp uge i64 %val, 64
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ult_64_i64:
> +; GCN: v_cmp_gt_u64_e32 vcc, 64, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_ult_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i64, i64 addrspace(1)* %gep.in
> + %cmp = icmp ult i64 %val, 64
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ule_63_i64:
> +; GCN: v_cmp_gt_u64_e32 vcc, 64, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_ule_63_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i64, i64 addrspace(1)* %gep.in
> + %cmp = icmp ule i64 %val, 63
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; FIXME: Undo canonicalization to gt (x + 1) since it doesn't use the inline imm
> +
> +; GCN-LABEL: {{^}}commute_ule_64_i64:
> +; GCN-DAG: s_movk_i32 s[[KLO:[0-9]+]], 0x41{{$}}
> +; GCN: v_cmp_gt_u64_e32 vcc, s{{\[}}[[KLO]]:{{[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_ule_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i64, i64 addrspace(1)* %gep.in
> + %cmp = icmp ule i64 %val, 64
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_sgt_neg1_i64:
> +; GCN: v_cmp_lt_i64_e32 vcc, -1, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_sgt_neg1_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i64, i64 addrspace(1)* %gep.in
> + %cmp = icmp sgt i64 %val, -1
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_sge_neg2_i64:
> +; GCN: v_cmp_lt_i64_e32 vcc, -3, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_sge_neg2_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i64, i64 addrspace(1)* %gep.in
> + %cmp = icmp sge i64 %val, -2
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_slt_neg16_i64:
> +; GCN: v_cmp_gt_i64_e32 vcc, -16, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_slt_neg16_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i64, i64 addrspace(1)* %gep.in
> + %cmp = icmp slt i64 %val, -16
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_sle_5_i64:
> +; GCN: v_cmp_gt_i64_e32 vcc, 6, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_sle_5_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load i64, i64 addrspace(1)* %gep.in
> + %cmp = icmp sle i64 %val, 5
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; --------------------------------------------------------------------------------
> +; f32 compares
> +; --------------------------------------------------------------------------------
> +
> +
> +; GCN-LABEL: {{^}}commute_oeq_2.0_f32:
> +; GCN: v_cmp_eq_f32_e32 vcc, 2.0, v{{[0-9]+}}
> +define void @commute_oeq_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load float, float addrspace(1)* %gep.in
> + %cmp = fcmp oeq float %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +
> +; GCN-LABEL: {{^}}commute_ogt_2.0_f32:
> +; GCN: v_cmp_lt_f32_e32 vcc, 2.0, v{{[0-9]+}}
> +define void @commute_ogt_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load float, float addrspace(1)* %gep.in
> + %cmp = fcmp ogt float %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_oge_2.0_f32:
> +; GCN: v_cmp_le_f32_e32 vcc, 2.0, v{{[0-9]+}}
> +define void @commute_oge_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load float, float addrspace(1)* %gep.in
> + %cmp = fcmp oge float %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_olt_2.0_f32:
> +; GCN: v_cmp_gt_f32_e32 vcc, 2.0, v{{[0-9]+}}
> +define void @commute_olt_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load float, float addrspace(1)* %gep.in
> + %cmp = fcmp olt float %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ole_2.0_f32:
> +; GCN: v_cmp_ge_f32_e32 vcc, 2.0, v{{[0-9]+}}
> +define void @commute_ole_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load float, float addrspace(1)* %gep.in
> + %cmp = fcmp ole float %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_one_2.0_f32:
> +; GCN: v_cmp_lg_f32_e32 vcc, 2.0, v{{[0-9]+}}
> +define void @commute_one_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load float, float addrspace(1)* %gep.in
> + %cmp = fcmp one float %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ord_2.0_f32:
> +; GCN: v_cmp_o_f32_e32 vcc, [[REG:v[0-9]+]], [[REG]]
> +define void @commute_ord_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load float, float addrspace(1)* %gep.in
> + %cmp = fcmp ord float %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ueq_2.0_f32:
> +; GCN: v_cmp_nlg_f32_e32 vcc, 2.0, v{{[0-9]+}}
> +define void @commute_ueq_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load float, float addrspace(1)* %gep.in
> + %cmp = fcmp ueq float %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ugt_2.0_f32:
> +; GCN: v_cmp_nge_f32_e32 vcc, 2.0, v{{[0-9]+}}
> +define void @commute_ugt_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load float, float addrspace(1)* %gep.in
> + %cmp = fcmp ugt float %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_uge_2.0_f32:
> +; GCN: v_cmp_ngt_f32_e32 vcc, 2.0, v{{[0-9]+}}
> +define void @commute_uge_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load float, float addrspace(1)* %gep.in
> + %cmp = fcmp uge float %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ult_2.0_f32:
> +; GCN: v_cmp_nle_f32_e32 vcc, 2.0, v{{[0-9]+}}
> +define void @commute_ult_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load float, float addrspace(1)* %gep.in
> + %cmp = fcmp ult float %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ule_2.0_f32:
> +; GCN: v_cmp_nlt_f32_e32 vcc, 2.0, v{{[0-9]+}}
> +define void @commute_ule_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load float, float addrspace(1)* %gep.in
> + %cmp = fcmp ule float %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_une_2.0_f32:
> +; GCN: v_cmp_neq_f32_e32 vcc, 2.0, v{{[0-9]+}}
> +define void @commute_une_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load float, float addrspace(1)* %gep.in
> + %cmp = fcmp une float %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_uno_2.0_f32:
> +; GCN: v_cmp_u_f32_e32 vcc, [[REG:v[0-9]+]], [[REG]]
> +define void @commute_uno_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load float, float addrspace(1)* %gep.in
> + %cmp = fcmp uno float %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; --------------------------------------------------------------------------------
> +; f64 compares
> +; --------------------------------------------------------------------------------
> +
> +
> +; GCN-LABEL: {{^}}commute_oeq_2.0_f64:
> +; GCN: v_cmp_eq_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_oeq_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load double, double addrspace(1)* %gep.in
> + %cmp = fcmp oeq double %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +
> +; GCN-LABEL: {{^}}commute_ogt_2.0_f64:
> +; GCN: v_cmp_lt_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_ogt_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load double, double addrspace(1)* %gep.in
> + %cmp = fcmp ogt double %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_oge_2.0_f64:
> +; GCN: v_cmp_le_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_oge_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load double, double addrspace(1)* %gep.in
> + %cmp = fcmp oge double %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_olt_2.0_f64:
> +; GCN: v_cmp_gt_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_olt_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load double, double addrspace(1)* %gep.in
> + %cmp = fcmp olt double %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ole_2.0_f64:
> +; GCN: v_cmp_ge_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_ole_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load double, double addrspace(1)* %gep.in
> + %cmp = fcmp ole double %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_one_2.0_f64:
> +; GCN: v_cmp_lg_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_one_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load double, double addrspace(1)* %gep.in
> + %cmp = fcmp one double %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ord_2.0_f64:
> +; GCN: v_cmp_o_f64_e32 vcc, [[REG:v\[[0-9]+:[0-9]+\]]], [[REG]]
> +define void @commute_ord_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load double, double addrspace(1)* %gep.in
> + %cmp = fcmp ord double %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ueq_2.0_f64:
> +; GCN: v_cmp_nlg_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_ueq_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load double, double addrspace(1)* %gep.in
> + %cmp = fcmp ueq double %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ugt_2.0_f64:
> +; GCN: v_cmp_nge_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_ugt_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load double, double addrspace(1)* %gep.in
> + %cmp = fcmp ugt double %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_uge_2.0_f64:
> +; GCN: v_cmp_ngt_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_uge_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load double, double addrspace(1)* %gep.in
> + %cmp = fcmp uge double %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ult_2.0_f64:
> +; GCN: v_cmp_nle_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_ult_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load double, double addrspace(1)* %gep.in
> + %cmp = fcmp ult double %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_ule_2.0_f64:
> +; GCN: v_cmp_nlt_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_ule_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load double, double addrspace(1)* %gep.in
> + %cmp = fcmp ule double %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_une_2.0_f64:
> +; GCN: v_cmp_neq_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}}
> +define void @commute_une_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load double, double addrspace(1)* %gep.in
> + %cmp = fcmp une double %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}commute_uno_2.0_f64:
> +; GCN: v_cmp_u_f64_e32 vcc, [[REG:v\[[0-9]+:[0-9]+\]]], [[REG]]
> +define void @commute_uno_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 {
> + %tid = call i32 @llvm.r600.read.tidig.x() #0
> + %gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
> + %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
> + %val = load double, double addrspace(1)* %gep.in
> + %cmp = fcmp uno double %val, 2.0
> + %ext = sext i1 %cmp to i32
> + store i32 %ext, i32 addrspace(1)* %gep.out
> + ret void
> +}
> +
> +attributes #0 = { nounwind readnone }
> +attributes #1 = { nounwind }
> diff --git a/test/CodeGen/R600/fceil64.ll b/test/CodeGen/R600/fceil64.ll
> index e3244fa..e8c34f0 100644
> --- a/test/CodeGen/R600/fceil64.ll
> +++ b/test/CodeGen/R600/fceil64.ll
> @@ -17,13 +17,13 @@ declare <16 x double> @llvm.ceil.v16f64(<16 x double>) nounwind readnone
> ; SI: s_lshr_b64
> ; SI: s_not_b64
> ; SI: s_and_b64
> -; SI: cmp_lt_i32
> +; SI: cmp_gt_i32
> ; SI: cndmask_b32
> ; SI: cndmask_b32
> -; SI: cmp_gt_i32
> +; SI: cmp_lt_i32
> ; SI: cndmask_b32
> ; SI: cndmask_b32
> -; SI-DAG: v_cmp_gt_f64
> +; SI-DAG: v_cmp_lt_f64
> ; SI-DAG: v_cmp_lg_f64
> ; SI: s_and_b64
> ; SI: v_cndmask_b32
> diff --git a/test/CodeGen/R600/ffloor.f64.ll b/test/CodeGen/R600/ffloor.f64.ll
> index 745ad3b..f3cbce1 100644
> --- a/test/CodeGen/R600/ffloor.f64.ll
> +++ b/test/CodeGen/R600/ffloor.f64.ll
> @@ -18,13 +18,13 @@ declare <16 x double> @llvm.floor.v16f64(<16 x double>) nounwind readnone
> ; SI: s_lshr_b64
> ; SI: s_not_b64
> ; SI: s_and_b64
> -; SI: cmp_lt_i32
> +; SI: cmp_gt_i32
> ; SI: cndmask_b32
> ; SI: cndmask_b32
> -; SI: cmp_gt_i32
> +; SI: cmp_lt_i32
> ; SI: cndmask_b32
> ; SI: cndmask_b32
> -; SI-DAG: v_cmp_lt_f64
> +; SI-DAG: v_cmp_gt_f64
> ; SI-DAG: v_cmp_lg_f64
> ; SI-DAG: s_and_b64
> ; SI-DAG: v_cndmask_b32
> diff --git a/test/CodeGen/R600/ftrunc.f64.ll b/test/CodeGen/R600/ftrunc.f64.ll
> index dd51f64..4ea84a7 100644
> --- a/test/CodeGen/R600/ftrunc.f64.ll
> +++ b/test/CodeGen/R600/ftrunc.f64.ll
> @@ -27,12 +27,12 @@ define void @v_ftrunc_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
> ; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000000
> ; SI: s_add_i32 s{{[0-9]+}}, [[SEXP]], 0xfffffc01
> ; SI: s_lshr_b64
> -; SI: cmp_lt_i32
> +; SI: cmp_gt_i32
> ; SI: s_not_b64
> ; SI: s_and_b64
> ; SI: cndmask_b32
> ; SI: cndmask_b32
> -; SI: cmp_gt_i32
> +; SI: cmp_lt_i32
> ; SI: cndmask_b32
> ; SI: cndmask_b32
> ; SI: s_endpgm
> diff --git a/test/CodeGen/R600/i1-copy-phi.ll b/test/CodeGen/R600/i1-copy-phi.ll
> index 430466e..105cd06 100644
> --- a/test/CodeGen/R600/i1-copy-phi.ll
> +++ b/test/CodeGen/R600/i1-copy-phi.ll
> @@ -6,7 +6,7 @@
> ; SI: s_and_saveexec_b64
> ; SI: s_xor_b64
> ; SI: v_mov_b32_e32 [[REG]], -1{{$}}
> -; SI: v_cmp_ne_i32_e64 {{s\[[0-9]+:[0-9]+\]}}, [[REG]], 0
> +; SI: v_cmp_ne_i32_e32 vcc, 0, [[REG]]
> ; SI: s_and_saveexec_b64
> ; SI: s_xor_b64
> ; SI: s_endpgm
> diff --git a/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll b/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll
> index 48a4af1..bcb7f87 100644
> --- a/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll
> +++ b/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll
> @@ -78,7 +78,7 @@ define void @test_div_fmas_f64(double addrspace(1)* %out, double %a, double %b,
> }
>
> ; GCN-LABEL: {{^}}test_div_fmas_f32_cond_to_vcc:
> -; SI: v_cmp_eq_i32_e64 vcc, s{{[0-9]+}}, 0
> +; SI: v_cmp_eq_i32_e64 vcc, 0, s{{[0-9]+}}
> ; SI: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
> define void @test_div_fmas_f32_cond_to_vcc(float addrspace(1)* %out, float %a, float %b, float %c, i32 %i) nounwind {
> %cmp = icmp eq i32 %i, 0
> @@ -110,8 +110,8 @@ define void @test_div_fmas_f32_imm_true_cond_to_vcc(float addrspace(1)* %out, fl
> ; SI-DAG: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}}
> ; SI-DAG: buffer_load_dword [[C:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
>
> -; SI-DAG: v_cmp_eq_i32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0
> -; SI-DAG: v_cmp_ne_i32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 0
> +; SI-DAG: v_cmp_eq_i32_e32 [[CMP0:vcc]], 0, v{{[0-9]+}}
> +; SI-DAG: v_cmp_ne_i32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], 0, s{{[0-9]+}}
> ; SI: s_and_b64 vcc, [[CMP0]], [[CMP1]]
> ; SI: v_div_fmas_f32 {{v[0-9]+}}, [[A]], [[B]], [[C]]
> ; SI: s_endpgm
> @@ -136,17 +136,17 @@ define void @test_div_fmas_f32_logical_cond_to_vcc(float addrspace(1)* %out, flo
> }
>
> ; GCN-LABEL: {{^}}test_div_fmas_f32_i1_phi_vcc:
> -; SI: v_cmp_eq_i32_e64 [[CMPTID:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0
> -; SI: s_and_saveexec_b64 [[CMPTID]], [[CMPTID]]
> -; SI: s_xor_b64 [[CMPTID]], exec, [[CMPTID]]
> +; SI: v_cmp_eq_i32_e32 vcc, 0, v{{[0-9]+}}
> +; SI: s_and_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], vcc
> +; SI: s_xor_b64 [[SAVE]], exec, [[SAVE]]
>
> ; SI: buffer_load_dword [[LOAD:v[0-9]+]]
> -; SI: v_cmp_ne_i32_e64 [[CMPLOAD:s\[[0-9]+:[0-9]+\]]], [[LOAD]], 0
> -; SI: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, [[CMPLOAD]]
> +; SI: v_cmp_ne_i32_e32 vcc, 0, [[LOAD]]
> +; SI: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
>
>
> ; SI: BB9_2:
> -; SI: s_or_b64 exec, exec, [[CMPTID]]
> +; SI: s_or_b64 exec, exec, [[SAVE]]
> ; SI: v_cmp_ne_i32_e32 vcc, 0, v0
> ; SI: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
> ; SI: buffer_store_dword
> diff --git a/test/CodeGen/R600/llvm.round.f64.ll b/test/CodeGen/R600/llvm.round.f64.ll
> index 7d082a2..3d0f57e 100644
> --- a/test/CodeGen/R600/llvm.round.f64.ll
> +++ b/test/CodeGen/R600/llvm.round.f64.ll
> @@ -21,7 +21,7 @@ define void @round_f64(double addrspace(1)* %out, double %x) #0 {
> ; SI-DAG: v_cmp_eq_i32
>
> ; SI-DAG: s_mov_b32 [[BFIMASK:s[0-9]+]], 0x7fffffff
> -; SI-DAG: v_cmp_lt_i32_e64
> +; SI-DAG: v_cmp_gt_i32_e64
> ; SI-DAG: v_bfi_b32 [[COPYSIGN:v[0-9]+]], [[BFIMASK]]
>
> ; SI-DAG: v_cmp_gt_i32_e64
> diff --git a/test/CodeGen/R600/llvm.round.ll b/test/CodeGen/R600/llvm.round.ll
> index 8d1cfb6..f5f124d 100644
> --- a/test/CodeGen/R600/llvm.round.ll
> +++ b/test/CodeGen/R600/llvm.round.ll
> @@ -9,7 +9,7 @@
> ; SI: v_sub_f32_e32 [[SUB:v[0-9]+]], [[SX]], [[TRUNC]]
> ; SI: v_mov_b32_e32 [[VX:v[0-9]+]], [[SX]]
> ; SI: v_bfi_b32 [[COPYSIGN:v[0-9]+]], [[K]], 1.0, [[VX]]
> -; SI: v_cmp_ge_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], |[[SUB]]|, 0.5
> +; SI: v_cmp_le_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], 0.5, |[[SUB]]|
> ; SI: v_cndmask_b32_e64 [[SEL:v[0-9]+]], 0, [[VX]], [[CMP]]
> ; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], [[SEL]], [[TRUNC]]
> ; SI: buffer_store_dword [[RESULT]]
> diff --git a/test/CodeGen/R600/or.ll b/test/CodeGen/R600/or.ll
> index 1337adb..1c04090 100644
> --- a/test/CodeGen/R600/or.ll
> +++ b/test/CodeGen/R600/or.ll
> @@ -155,7 +155,7 @@ define void @trunc_i64_or_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
> ; FUNC-LABEL: {{^}}or_i1:
> ; EG: OR_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], PS}}
>
> -; SI: s_or_b64 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}]
> +; SI: s_or_b64 s[{{[0-9]+:[0-9]+}}], vcc, s[{{[0-9]+:[0-9]+}}]
> define void @or_i1(i32 addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) {
> %a = load float, float addrspace(1)* %in0
> %b = load float, float addrspace(1)* %in1
> diff --git a/test/CodeGen/R600/setcc-opt.ll b/test/CodeGen/R600/setcc-opt.ll
> index 0219cdb..4e6a10d 100644
> --- a/test/CodeGen/R600/setcc-opt.ll
> +++ b/test/CodeGen/R600/setcc-opt.ll
> @@ -40,7 +40,7 @@ define void @sext_bool_icmp_ne_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind
> ; FUNC-LABEL: {{^}}sext_bool_icmp_eq_1:
> ; GCN: v_cmp_eq_i32_e32 vcc,
> ; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, -1, vcc
> -; GCN-NEXT: v_cmp_eq_i32_e64 {{s\[[0-9]+:[0-9]+\]}}, [[TMP]], 1{{$}}
> +; GCN-NEXT: v_cmp_eq_i32_e32 vcc, 1, [[TMP]]{{$}}
> ; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, 1,
> ; GCN-NEXT: buffer_store_byte [[TMP]]
> ; GCN-NEXT: s_endpgm
> @@ -56,7 +56,7 @@ define void @sext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind
> ; FUNC-LABEL: {{^}}sext_bool_icmp_ne_1:
> ; GCN: v_cmp_ne_i32_e32 vcc,
> ; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, -1, vcc
> -; GCN-NEXT: v_cmp_ne_i32_e64 {{s\[[0-9]+:[0-9]+\]}}, [[TMP]], 1{{$}}
> +; GCN-NEXT: v_cmp_ne_i32_e32 vcc, 1, [[TMP]]{{$}}
> ; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, 1,
> ; GCN-NEXT: buffer_store_byte [[TMP]]
> ; GCN-NEXT: s_endpgm
> @@ -129,8 +129,8 @@ define void @zext_bool_icmp_ne_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind
> ; VI-DAG: s_load_dword [[A:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
> ; VI-DAG: s_load_dword [[B:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x30
> ; GCN: v_mov_b32_e32 [[VB:v[0-9]+]], [[B]]
> -; GCN: v_cmp_ne_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[VB]], 2{{$}}
> -; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[CMP]]
> +; GCN: v_cmp_ne_i32_e32 vcc, 2, [[VB]]{{$}}
> +; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
> ; GCN: buffer_store_byte
> ; GCN: s_endpgm
> define void @sext_bool_icmp_ne_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
> @@ -144,7 +144,7 @@ define void @sext_bool_icmp_ne_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind
> ; FUNC-LABEL: {{^}}cmp_zext_k_i8max:
> ; GCN: buffer_load_ubyte [[B:v[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0 offset:44
> ; GCN: v_mov_b32_e32 [[K255:v[0-9]+]], 0xff{{$}}
> -; GCN: v_cmp_ne_i32_e32 vcc, [[B]], [[K255]]
> +; GCN: v_cmp_ne_i32_e32 vcc, [[K255]], [[B]]
> ; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
> ; GCN-NEXT: buffer_store_byte [[RESULT]]
> ; GCN: s_endpgm
> @@ -157,8 +157,8 @@ define void @cmp_zext_k_i8max(i1 addrspace(1)* %out, i8 %b) nounwind {
>
> ; FUNC-LABEL: {{^}}cmp_sext_k_neg1:
> ; GCN: buffer_load_sbyte [[B:v[0-9]+]]
> -; GCN: v_cmp_ne_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[B]], -1{{$}}
> -; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[CMP]]
> +; GCN: v_cmp_ne_i32_e32 vcc, -1, [[B]]{{$}}
> +; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
> ; GCN-NEXT: buffer_store_byte [[RESULT]]
> ; GCN: s_endpgm
> define void @cmp_sext_k_neg1(i1 addrspace(1)* %out, i8 addrspace(1)* %b.ptr) nounwind {
> @@ -171,7 +171,7 @@ define void @cmp_sext_k_neg1(i1 addrspace(1)* %out, i8 addrspace(1)* %b.ptr) nou
>
> ; FUNC-LABEL: {{^}}cmp_sext_k_neg1_i8_sext_arg:
> ; GCN: s_load_dword [[B:s[0-9]+]]
> -; GCN: v_cmp_ne_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[B]], -1{{$}}
> +; GCN: v_cmp_ne_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -1, [[B]]
> ; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[CMP]]
> ; GCN-NEXT: buffer_store_byte [[RESULT]]
> ; GCN: s_endpgm
> @@ -189,7 +189,7 @@ define void @cmp_sext_k_neg1_i8_sext_arg(i1 addrspace(1)* %out, i8 signext %b) n
> ; FUNC-LABEL: {{^}}cmp_sext_k_neg1_i8_arg:
> ; GCN-DAG: buffer_load_ubyte [[B:v[0-9]+]]
> ; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0xff{{$}}
> -; GCN: v_cmp_ne_i32_e32 vcc, [[B]], [[K]]{{$}}
> +; GCN: v_cmp_ne_i32_e32 vcc, [[K]], [[B]]{{$}}
> ; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
> ; GCN-NEXT: buffer_store_byte [[RESULT]]
> ; GCN: s_endpgm
> diff --git a/test/CodeGen/R600/sgpr-control-flow.ll b/test/CodeGen/R600/sgpr-control-flow.ll
> index fae7cd2..38289ce 100644
> --- a/test/CodeGen/R600/sgpr-control-flow.ll
> +++ b/test/CodeGen/R600/sgpr-control-flow.ll
> @@ -64,15 +64,15 @@ endif:
>
> ; SI-LABEL: {{^}}sgpr_if_else_valu_cmp_phi_br:
> ; SI: buffer_load_dword [[AVAL:v[0-9]+]]
> -; SI: v_cmp_lt_i32_e64 [[CMP_IF:s\[[0-9]+:[0-9]+\]]], [[AVAL]], 0
> +; SI: v_cmp_gt_i32_e32 [[CMP_IF:vcc]], 0, [[AVAL]]
> ; SI: v_cndmask_b32_e64 [[V_CMP:v[0-9]+]], 0, -1, [[CMP_IF]]
>
> ; SI: BB2_1:
> ; SI: buffer_load_dword [[AVAL:v[0-9]+]]
> -; SI: v_cmp_eq_i32_e64 [[CMP_ELSE:s\[[0-9]+:[0-9]+\]]], [[AVAL]], 0
> +; SI: v_cmp_eq_i32_e32 [[CMP_ELSE:vcc]], 0, [[AVAL]]
> ; SI: v_cndmask_b32_e64 [[V_CMP]], 0, -1, [[CMP_ELSE]]
>
> -; SI: v_cmp_ne_i32_e64 [[CMP_CMP:s\[[0-9]+:[0-9]+\]]], [[V_CMP]], 0
> +; SI: v_cmp_ne_i32_e32 [[CMP_CMP:vcc]], 0, [[V_CMP]]
> ; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP_CMP]]
> ; SI: buffer_store_dword [[RESULT]]
> define void @sgpr_if_else_valu_cmp_phi_br(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 addrspace(1)* %b) {
> diff --git a/test/CodeGen/R600/trunc-cmp-constant.ll b/test/CodeGen/R600/trunc-cmp-constant.ll
> index 21dfade..dac7472 100644
> --- a/test/CodeGen/R600/trunc-cmp-constant.ll
> +++ b/test/CodeGen/R600/trunc-cmp-constant.ll
> @@ -4,8 +4,8 @@
> ; FUNC-LABEL {{^}}sextload_i1_to_i32_trunc_cmp_eq_0:
> ; SI: buffer_load_ubyte [[LOAD:v[0-9]+]]
> ; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]]
> -; SI: v_cmp_eq_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[TMP]], 1{{$}}
> -; SI: s_xor_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -1{{$}}
> +; SI: v_cmp_eq_i32_e32 vcc, 1, [[TMP]]{{$}}
> +; SI: s_xor_b64 s{{\[[0-9]+:[0-9]+\]}}, vcc, -1{{$}}
> ; SI: v_cndmask_b32_e64
> ; SI: buffer_store_byte
> define void @sextload_i1_to_i32_trunc_cmp_eq_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
> @@ -20,8 +20,8 @@ define void @sextload_i1_to_i32_trunc_cmp_eq_0(i1 addrspace(1)* %out, i1 addrspa
> ; FUNC-LABEL: {{^}}zextload_i1_to_i32_trunc_cmp_eq_0:
> ; SI: buffer_load_ubyte [[LOAD:v[0-9]+]]
> ; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]]
> -; SI: v_cmp_eq_i32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], [[TMP]], 1{{$}}
> -; SI-NEXT: s_xor_b64 [[NEG:s\[[0-9]+:[0-9]+\]]], [[CMP0]], -1
> +; SI: v_cmp_eq_i32_e32 vcc, 1, [[TMP]]{{$}}
> +; SI-NEXT: s_xor_b64 [[NEG:s\[[0-9]+:[0-9]+\]]], vcc, -1
> ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[NEG]]
> ; SI-NEXT: buffer_store_byte [[RESULT]]
> define void @zextload_i1_to_i32_trunc_cmp_eq_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
> @@ -117,8 +117,8 @@ define void @sextload_i1_to_i32_trunc_cmp_ne_1(i1 addrspace(1)* %out, i1 addrspa
> ; FUNC-LABEL: {{^}}zextload_i1_to_i32_trunc_cmp_ne_1:
> ; SI: buffer_load_ubyte [[LOAD:v[0-9]+]]
> ; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]]
> -; SI: v_cmp_eq_i32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], [[TMP]], 1{{$}}
> -; SI-NEXT: s_xor_b64 [[NEG:s\[[0-9]+:[0-9]+\]]], [[CMP0]], -1
> +; SI: v_cmp_eq_i32_e32 vcc, 1, [[TMP]]{{$}}
> +; SI-NEXT: s_xor_b64 [[NEG:s\[[0-9]+:[0-9]+\]]], vcc, -1
> ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[NEG]]
> ; SI-NEXT: buffer_store_byte [[RESULT]]
> define void @zextload_i1_to_i32_trunc_cmp_ne_1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind {
> @@ -157,7 +157,7 @@ define void @zextload_i1_to_i32_trunc_cmp_ne_neg1(i1 addrspace(1)* %out, i1 addr
>
> ; FUNC-LABEL: {{^}}masked_load_i1_to_i32_trunc_cmp_ne_neg1:
> ; SI: buffer_load_sbyte [[LOAD:v[0-9]+]]
> -; SI: v_cmp_ne_i32_e64 {{s\[[0-9]+:[0-9]+\]}}, [[LOAD]], -1{{$}}
> +; SI: v_cmp_ne_i32_e32 vcc, -1, [[LOAD]]{{$}}
> ; SI-NEXT: v_cndmask_b32_e64
> ; SI-NEXT: buffer_store_byte
> define void @masked_load_i1_to_i32_trunc_cmp_ne_neg1(i1 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind {
> diff --git a/test/CodeGen/R600/trunc.ll b/test/CodeGen/R600/trunc.ll
> index 5580bd3..bf690ca 100644
> --- a/test/CodeGen/R600/trunc.ll
> +++ b/test/CodeGen/R600/trunc.ll
> @@ -73,8 +73,8 @@ define void @sgpr_trunc_i32_to_i1(i32 addrspace(1)* %out, i32 %a) {
> ; SI-LABEL: {{^}}s_trunc_i64_to_i1:
> ; SI: s_load_dwordx2 s{{\[}}[[SLO:[0-9]+]]:{{[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0xb
> ; SI: v_and_b32_e64 [[MASKED:v[0-9]+]], 1, s[[SLO]]
> -; SI: v_cmp_eq_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[MASKED]], 1
> -; SI: v_cndmask_b32_e64 {{v[0-9]+}}, -12, 63, [[CMP]]
> +; SI: v_cmp_eq_i32_e32 vcc, 1, [[MASKED]]
> +; SI: v_cndmask_b32_e64 {{v[0-9]+}}, -12, 63, vcc
> define void @s_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 %x) {
> %trunc = trunc i64 %x to i1
> %sel = select i1 %trunc, i32 63, i32 -12
> @@ -85,8 +85,8 @@ define void @s_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 %x) {
> ; SI-LABEL: {{^}}v_trunc_i64_to_i1:
> ; SI: buffer_load_dwordx2 v{{\[}}[[VLO:[0-9]+]]:{{[0-9]+\]}}
> ; SI: v_and_b32_e32 [[MASKED:v[0-9]+]], 1, v[[VLO]]
> -; SI: v_cmp_eq_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[MASKED]], 1
> -; SI: v_cndmask_b32_e64 {{v[0-9]+}}, -12, 63, [[CMP]]
> +; SI: v_cmp_eq_i32_e32 vcc, 1, [[MASKED]]
> +; SI: v_cndmask_b32_e64 {{v[0-9]+}}, -12, 63, vcc
> define void @v_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
> %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
> %gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
> diff --git a/test/CodeGen/R600/valu-i1.ll b/test/CodeGen/R600/valu-i1.ll
> index ef4f3ef..7d0ebd1 100644
> --- a/test/CodeGen/R600/valu-i1.ll
> +++ b/test/CodeGen/R600/valu-i1.ll
> @@ -42,8 +42,8 @@ end:
> }
>
> ; SI-LABEL: @simple_test_v_if
> -; SI: v_cmp_ne_i32_e64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0
> -; SI: s_and_saveexec_b64 [[BR_SREG]], [[BR_SREG]]
> +; SI: v_cmp_ne_i32_e32 vcc, 0, v{{[0-9]+}}
> +; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc
> ; SI: s_xor_b64 [[BR_SREG]], exec, [[BR_SREG]]
>
> ; SI: ; BB#1
> @@ -68,8 +68,8 @@ exit:
> }
>
> ; SI-LABEL: @simple_test_v_loop
> -; SI: v_cmp_ne_i32_e64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0
> -; SI: s_and_saveexec_b64 [[BR_SREG]], [[BR_SREG]]
> +; SI: v_cmp_ne_i32_e32 vcc, 0, v{{[0-9]+}}
> +; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc
> ; SI: s_xor_b64 [[BR_SREG]], exec, [[BR_SREG]]
> ; SI: s_cbranch_execz BB2_2
>
> @@ -111,8 +111,8 @@ exit:
> ; Branch to exit if uniformly not taken
> ; SI: ; BB#0:
> ; SI: buffer_load_dword [[VBOUND:v[0-9]+]]
> -; SI: v_cmp_gt_i32_e64 [[OUTER_CMP_SREG:s\[[0-9]+:[0-9]+\]]]
> -; SI: s_and_saveexec_b64 [[OUTER_CMP_SREG]], [[OUTER_CMP_SREG]]
> +; SI: v_cmp_lt_i32_e32 vcc
> +; SI: s_and_saveexec_b64 [[OUTER_CMP_SREG:s\[[0-9]+:[0-9]+\]]], vcc
> ; SI: s_xor_b64 [[OUTER_CMP_SREG]], exec, [[OUTER_CMP_SREG]]
> ; SI: s_cbranch_execz BB3_2
>
> @@ -125,8 +125,8 @@ exit:
> ; SI: BB3_3:
> ; SI: buffer_load_dword [[B:v[0-9]+]]
> ; SI: buffer_load_dword [[A:v[0-9]+]]
> -; SI-DAG: v_cmp_ne_i32_e64 [[NEG1_CHECK_0:s\[[0-9]+:[0-9]+\]]], [[A]], -1
> -; SI-DAG: v_cmp_ne_i32_e64 [[NEG1_CHECK_1:s\[[0-9]+:[0-9]+\]]], [[B]], -1
> +; SI-DAG: v_cmp_ne_i32_e64 [[NEG1_CHECK_0:s\[[0-9]+:[0-9]+\]]], -1, [[A]]
> +; SI-DAG: v_cmp_ne_i32_e32 [[NEG1_CHECK_1:vcc]], -1, [[B]]
> ; SI: s_and_b64 [[ORNEG1:s\[[0-9]+:[0-9]+\]]], [[NEG1_CHECK_1]], [[NEG1_CHECK_0]]
> ; SI: s_and_saveexec_b64 [[ORNEG1]], [[ORNEG1]]
> ; SI: s_xor_b64 [[ORNEG1]], exec, [[ORNEG1]]
> diff --git a/test/CodeGen/R600/xor.ll b/test/CodeGen/R600/xor.ll
> index ea78cca..089db59 100644
> --- a/test/CodeGen/R600/xor.ll
> +++ b/test/CodeGen/R600/xor.ll
> @@ -40,8 +40,8 @@ define void @xor_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in
> ; FUNC-LABEL: {{^}}xor_i1:
> ; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], PS}}
>
> -; SI-DAG: v_cmp_ge_f32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], {{v[0-9]+}}, 0
> -; SI-DAG: v_cmp_ge_f32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], {{v[0-9]+}}, 1.0
> +; SI-DAG: v_cmp_le_f32_e32 [[CMP0:vcc]], 0, {{v[0-9]+}}
> +; SI-DAG: v_cmp_le_f32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], 1.0, {{v[0-9]+}}
> ; SI: s_xor_b64 [[XOR:s\[[0-9]+:[0-9]+\]]], [[CMP0]], [[CMP1]]
> ; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, [[XOR]]
> ; SI: buffer_store_dword [[RESULT]]
> --
> 2.2.1
>
More information about the llvm-commits
mailing list