PATCH: R600/SI: Do abs/neg folding with ComplexPatterns

Matt Arsenault Matthew.Arsenault at amd.com
Thu Jul 24 11:20:10 PDT 2014


On 07/24/2014 10:47 AM, Tom Stellard wrote:
>
> 0001-R600-SI-Fix-incorrect-commute-operation-in-shrink-in.patch
>
>
>  From 36d87c5f94e75192c2f7af1c9ab090e4bc942a76 Mon Sep 17 00:00:00 2001
> From: Tom Stellard<thomas.stellard at amd.com>
> Date: Mon, 21 Jul 2014 15:49:53 -0400
> Subject: [PATCH 1/4] R600/SI: Fix incorrect commute operation in shrink
>   instructions pass
>
> We were commuting the instruction by still shrinking it using the
> original opcode.
>
> NOTE: This is a candidate for the 3.5 branch.
> ---
>   lib/Target/R600/SIInstrInfo.cpp          |  4 ++++
>   lib/Target/R600/SIInstrInfo.h            |  4 ++++
>   lib/Target/R600/SIShrinkInstructions.cpp | 11 ++++++++---
>   test/CodeGen/R600/vop-shrink.ll          | 34 ++++++++++++++++++++++++++++++++
>   4 files changed, 50 insertions(+), 3 deletions(-)
>   create mode 100644 test/CodeGen/R600/vop-shrink.ll
>
> diff --git a/lib/Target/R600/SIInstrInfo.cpp b/lib/Target/R600/SIInstrInfo.cpp
> index 8c3af77..51f4532 100644
> --- a/lib/Target/R600/SIInstrInfo.cpp
> +++ b/lib/Target/R600/SIInstrInfo.cpp
> @@ -576,6 +576,10 @@ bool SIInstrInfo::isImmOperandLegal(const MachineInstr *MI, unsigned OpNo,
>     return RI.regClassCanUseImmediate(OpInfo.RegClass);
>   }
>   
> +bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
> +  return AMDGPU::getVOPe32(Opcode) != -1;
> +}
> +
>   bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
>                                       StringRef &ErrInfo) const {
>     uint16_t Opcode = MI->getOpcode();
> diff --git a/lib/Target/R600/SIInstrInfo.h b/lib/Target/R600/SIInstrInfo.h
> index 13ab484..4687539 100644
> --- a/lib/Target/R600/SIInstrInfo.h
> +++ b/lib/Target/R600/SIInstrInfo.h
> @@ -109,6 +109,10 @@ public:
>     bool isImmOperandLegal(const MachineInstr *MI, unsigned OpNo,
>                            const MachineOperand &MO) const;
>   
> +  /// \brief Return true if this 64-bit VALU instruction has a 32-bit encoding.
> +  /// This function will return false if you pass it a 32-bit instruction.
> +  bool hasVALU32BitEncoding(unsigned Opcode) const;
> +
>     bool verifyInstruction(const MachineInstr *MI,
>                            StringRef &ErrInfo) const override;
>   
> diff --git a/lib/Target/R600/SIShrinkInstructions.cpp b/lib/Target/R600/SIShrinkInstructions.cpp
> index 362a5c1..745c4b6 100644
> --- a/lib/Target/R600/SIShrinkInstructions.cpp
> +++ b/lib/Target/R600/SIShrinkInstructions.cpp
> @@ -125,9 +125,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
>         Next = std::next(I);
>         MachineInstr &MI = *I;
>   
> -      int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
> -
> -      if (Op32 == -1)
> +      if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
>           continue;
>   
>         if (!canShrink(MI, TII, TRI, MRI)) {
> @@ -138,6 +136,13 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
>             continue;
>         }
>   
> +      int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
> +
> +      // Op32 could be -1 here if we started with an instruction that had a
> +      // a 32-bit encoding and then commuted it to an instruction that did not.
> +      if (Op32 == -1)
> +        continue;
> +
>         if (TII->isVOPC(Op32)) {
>           unsigned DstReg = MI.getOperand(0).getReg();
>           if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
> diff --git a/test/CodeGen/R600/vop-shrink.ll b/test/CodeGen/R600/vop-shrink.ll
> new file mode 100644
> index 0000000..86e23d2
> --- /dev/null
> +++ b/test/CodeGen/R600/vop-shrink.ll
> @@ -0,0 +1,34 @@
> +; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
> +; XXX: Enable this test once we are selecting 64-bit instructions
> +
> +; XFAIL: *
Why is this disabled? This comment doesn't make it clear

> +
> +; Test that we correctly commute a sub instruction
> +; FUNC-LABEL: @sub_rev
> +; SI-NOT: V_SUB_I32_e32 v{{[0-9]+}}, s
> +; SI: V_SUBREV_I32_e32 v{{[0-9]+}}, s
> +define void @sub_rev(i32 addrspace(1)* %out, <4 x i32> %sgpr, i32 %cond) {
> +entry:
> +  %vgpr = call i32 @llvm.r600.read.tidig.x() #0
> +  %0 = icmp eq i32 %cond, 0
> +  br i1 %0, label %if, label %else
> +
> +if:
> +  %1 = getelementptr i32 addrspace(1)* %out, i32 1
> +  %2 = extractelement <4 x i32> %sgpr, i32 1
> +  store i32 %2, i32 addrspace(1)* %out
> +  br label %endif
> +
> +else:
> +  %3 = extractelement <4 x i32> %sgpr, i32 2
> +  %4 = sub i32 %vgpr, %3
> +  store i32 %4, i32 addrspace(1)* %out
> +  br label %endif
> +
> +endif:
> +  ret void
> +}
> +
> +declare i32 @llvm.r600.read.tidig.x() #0
> +
> +attributes #0 = { readnone }
> -- 1.8.1.5
For this test and any others in the future, can you run instnamer on 
them? It makes it easier to make similar copies of tests



>
> 0002-R600-SI-Fold-immediates-when-shrinking-instructions.patch
>
>
>  From 889076055c9d5ea7cc2369cf1a9a3d0cf34c2f03 Mon Sep 17 00:00:00 2001
> From: Tom Stellard<thomas.stellard at amd.com>
> Date: Tue, 22 Jul 2014 10:18:05 -0400
> Subject: [PATCH 2/4] R600/SI: Fold immediates when shrinking instructions
>
> This will prevent us from using extra MOV instructions once we prefer
> selecting 64-bit instructions.
> ---
>   lib/Target/R600/SIInstrInfo.cpp          |  2 +-
>   lib/Target/R600/SIInstrInfo.h            |  3 +-
>   lib/Target/R600/SIShrinkInstructions.cpp | 85 +++++++++++++++++++++++++++++---
>   test/CodeGen/R600/vop-shrink.ll          | 16 ++++++
>   4 files changed, 95 insertions(+), 11 deletions(-)
>
> diff --git a/lib/Target/R600/SIInstrInfo.cpp b/lib/Target/R600/SIInstrInfo.cpp
> index 51f4532..06529a2 100644
> --- a/lib/Target/R600/SIInstrInfo.cpp
> +++ b/lib/Target/R600/SIInstrInfo.cpp
> @@ -1644,7 +1644,7 @@ void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved,
>       Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index));
>   }
>   
> -const MachineOperand *SIInstrInfo::getNamedOperand(const MachineInstr& MI,
> +MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI,
>                                                      unsigned OperandName) const {
>     int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName);
>     if (Idx == -1)
> diff --git a/lib/Target/R600/SIInstrInfo.h b/lib/Target/R600/SIInstrInfo.h
> index 4687539..3689a36 100644
> --- a/lib/Target/R600/SIInstrInfo.h
> +++ b/lib/Target/R600/SIInstrInfo.h
> @@ -181,8 +181,7 @@ public:
>   
>     /// \brief Returns the operand named \p Op.  If \p MI does not have an
>     /// operand named \c Op, this function returns nullptr.
> -  const MachineOperand *getNamedOperand(const MachineInstr& MI,
> -                                        unsigned OperandName) const;
> +  MachineOperand *getNamedOperand(MachineInstr &MI, unsigned OperandName) const;
>   };
>   
>   namespace AMDGPU {
> diff --git a/lib/Target/R600/SIShrinkInstructions.cpp b/lib/Target/R600/SIShrinkInstructions.cpp
> index 745c4b6..8f1526d 100644
> --- a/lib/Target/R600/SIShrinkInstructions.cpp
> +++ b/lib/Target/R600/SIShrinkInstructions.cpp
> @@ -15,6 +15,7 @@
>   #include "llvm/CodeGen/MachineFunctionPass.h"
>   #include "llvm/CodeGen/MachineInstrBuilder.h"
>   #include "llvm/CodeGen/MachineRegisterInfo.h"
> +#include "llvm/IR/Constants.h"
>   #include "llvm/IR/LLVMContext.h"
>   #include "llvm/IR/Function.h"
>   #include "llvm/Support/Debug.h"
> @@ -24,6 +25,8 @@
>   
>   STATISTIC(NumInstructionsShrunk,
>             "Number of 64-bit instruction reduced to 32-bit.");
> +STATISTIC(NumLiteralConstantsFolded,
> +          "Number of literal constants fold into 32-bit instructions.");
Grammar: folded
>   
>   namespace llvm {
>     void initializeSIShrinkInstructionsPass(PassRegistry&);
> @@ -109,6 +112,71 @@ static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII,
>     return !Clamp || Clamp->getImm() == 0;
>   }
>   
> +/// \brief This function checks \p MI for operands defined by a move immediate
> +/// instruction and then folds the literal constant into the instruction if it
> +/// can.  This function assumes that \p MI is a VOP1, VOP2, or VOPC instruction
> +/// and will only fold literal constants if we are still in SSA.
> +static void foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
> +                           MachineRegisterInfo &MRI, bool TryToCommute = true) {
> +
> +  if (!MRI.isSSA())
> +    return;
> +
> +  assert(TII->isVOP1(MI.getOpcode()) || TII->isVOP2(MI.getOpcode()) ||
> +         TII->isVOPC(MI.getOpcode()));
> +
> +  const SIRegisterInfo &TRI = TII->getRegisterInfo();
> +  MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
> +
> +  // Only one literal constant is allowed per instruction, so if src0 is a
> +  // literal constant then we can't do any folding.
> +  if (Src0->isImm() && TII->isLiteralConstant(*Src0))
> +    return;
> +
> +
> +  // Literal constants and SGPRs can only be used in Src0, so if Src0 is an
> +  // SGPR, we cannot commute the instruction, so we can't fold any literal
> +  // constants.
> +  if (Src0->isReg() && !isVGPR(Src0, TRI, MRI))
> +    return;
> +
> +  // Try to fold Src0
> +  if (Src0->isReg()) {
> +    unsigned Reg = Src0->getReg();
> +    MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
> +    if (Def && Def->isMoveImmediate()) {
> +      MachineOperand &MovSrc = Def->getOperand(1);
> +      bool ConstantFolded = false;
> +
> +      if (MovSrc.isImm() && isUInt<32>(MovSrc.getImm())) {
> +        Src0->ChangeToImmediate(MovSrc.getImm());
> +        ConstantFolded = true;
> +      } else if (MovSrc.isFPImm()) {
> +        const APFloat &APF = MovSrc.getFPImm()->getValueAPF();
> +        if (&APF.getSemantics() == &APFloat::IEEEsingle) {
> +          MRI.removeRegOperandFromUseList(Src0);
> +          Src0->ChangeToImmediate(APF.bitcastToAPInt().getZExtValue());
> +          ConstantFolded = true;
> +        }
> +      }
> +      if (ConstantFolded) {
> +        for (MachineOperand &Use : MRI.use_operands(Reg))
> +          Use.getParent()->dump();
> +        if (MRI.use_empty(Reg))
> +          Def->eraseFromParent();
> +        ++NumLiteralConstantsFolded;
> +        return;
> +      }
> +    }
> +  }
> +
> +  // We have failed to fold src0, so commute the instruction and try again.
> +  if (TryToCommute && MI.isCommutable() && TII->commuteInstruction(&MI))
> +    foldImmediates(MI, TII, MRI, false);
> +
> +  return;
Unnecessary return

> +}
> +
>   bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
>     MachineRegisterInfo &MRI = MF.getRegInfo();
>     const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
> @@ -169,25 +237,26 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
>         // We can shrink this instruction
>         DEBUG(dbgs() << "Shrinking "; MI.dump(); dbgs() << "\n";);
>   
> -      MachineInstrBuilder MIB =
> +      MachineInstrBuilder Inst32 =
>             BuildMI(MBB, I, MI.getDebugLoc(), TII->get(Op32));
>   
>         // dst
> -      MIB.addOperand(MI.getOperand(0));
> +      Inst32.addOperand(MI.getOperand(0));
>   
> -      MIB.addOperand(*TII->getNamedOperand(MI, AMDGPU::OpName::src0));
> +      Inst32.addOperand(*TII->getNamedOperand(MI, AMDGPU::OpName::src0));
>   
>         const MachineOperand *Src1 =
>             TII->getNamedOperand(MI, AMDGPU::OpName::src1);
>         if (Src1)
> -        MIB.addOperand(*Src1);
> +        Inst32.addOperand(*Src1);
>   
> -      for (const MachineOperand &MO : MI.implicit_operands())
> -        MIB.addOperand(MO);
> -
> -      DEBUG(dbgs() << "e32 MI = "; MI.dump(); dbgs() << "\n";);
>         ++NumInstructionsShrunk;
>         MI.eraseFromParent();
> +
> +      foldImmediates(*Inst32, TII, MRI);
> +      DEBUG(dbgs() << "e32 MI = " << *Inst32 << "\n");
Should use '\n' for printing a single character

> +
> +
>       }
>     }
>     return false;
> diff --git a/test/CodeGen/R600/vop-shrink.ll b/test/CodeGen/R600/vop-shrink.ll
> index 86e23d2..bb93ec4 100644
> --- a/test/CodeGen/R600/vop-shrink.ll
> +++ b/test/CodeGen/R600/vop-shrink.ll
> @@ -29,6 +29,22 @@ endif:
>     ret void
>   }
>   
> +; Test that we fold an immediate that was illegal for a 64-bit op into the
> +; 32-bit op when we shrink it.
> +
> +; FUNC-LABEL: @add_fold
> +; SI: V_ADD_F32_e32 v{{[0-9]+}}, 0x44800000
> +define void @add_fold(float addrspace(1)* %out) {
> +entry:
> +  %0 = call i32 @llvm.r600.read.tidig.x()
> +  %1 = uitofp i32 %0 to float
> +  %2 = fadd float %1, 1024.0
> +  store float %2, float addrspace(1)* %out
> +  ret void
> +}
> +
> +
>   declare i32 @llvm.r600.read.tidig.x() #0
>   
>   attributes #0 = { readnone }
> +
> -- 1.8.1.5
>
> 0003-TableGen-Allow-AddedComplexity-values-to-be-negative.patch
>
>
>  From 36338e5ef870201f2e37e9f21bf94575d89f0450 Mon Sep 17 00:00:00 2001
> From: Tom Stellard<thomas.stellard at amd.com>
> Date: Mon, 30 Jun 2014 13:03:17 -0400
> Subject: [PATCH 3/4] TableGen: Allow AddedComplexity values to be negative
>
> This is useful for cases when stand-alone patterns are preferred to the
> patterns included in the instruction definitions.  Instead of requiring
> that stand-alone patterns set a larger AddedComplexity value, which
> can be confusing to new developers, the allows us to reduce the
> complexity of the included patterns to achieve the same result.
>
> There will be test cases for this added to the R600 backend in a
> future commit.
> ---
>   utils/TableGen/CodeGenDAGPatterns.cpp | 2 +-
>   utils/TableGen/CodeGenDAGPatterns.h   | 8 ++++----
>   utils/TableGen/DAGISelEmitter.cpp     | 4 ++--
>   3 files changed, 7 insertions(+), 7 deletions(-)
>
> diff --git a/utils/TableGen/CodeGenDAGPatterns.cpp b/utils/TableGen/CodeGenDAGPatterns.cpp
> index 2602bbc..d195ba8 100644
> --- a/utils/TableGen/CodeGenDAGPatterns.cpp
> +++ b/utils/TableGen/CodeGenDAGPatterns.cpp
> @@ -771,7 +771,7 @@ static unsigned getPatternSize(const TreePatternNode *P,
>   
>   /// Compute the complexity metric for the input pattern.  This roughly
>   /// corresponds to the number of nodes that are covered.
> -unsigned PatternToMatch::
> +int PatternToMatch::
>   getPatternComplexity(const CodeGenDAGPatterns &CGP) const {
>     return getPatternSize(getSrcPattern(), CGP) + getAddedComplexity();
>   }
> diff --git a/utils/TableGen/CodeGenDAGPatterns.h b/utils/TableGen/CodeGenDAGPatterns.h
> index fb30cdd..ef6c787 100644
> --- a/utils/TableGen/CodeGenDAGPatterns.h
> +++ b/utils/TableGen/CodeGenDAGPatterns.h
> @@ -667,7 +667,7 @@ public:
>     PatternToMatch(Record *srcrecord, ListInit *preds,
>                    TreePatternNode *src, TreePatternNode *dst,
>                    const std::vector<Record*> &dstregs,
> -                 unsigned complexity, unsigned uid)
> +                 int complexity, unsigned uid)
>       : SrcRecord(srcrecord), Predicates(preds), SrcPattern(src), DstPattern(dst),
>         Dstregs(dstregs), AddedComplexity(complexity), ID(uid) {}
>   
> @@ -676,7 +676,7 @@ public:
>     TreePatternNode *SrcPattern;  // Source pattern to match.
>     TreePatternNode *DstPattern;  // Resulting pattern.
>     std::vector<Record*> Dstregs; // Physical register defs being matched.
> -  unsigned         AddedComplexity; // Add to matching pattern complexity.
> +  int              AddedComplexity; // Add to matching pattern complexity.
>     unsigned         ID;          // Unique ID for the record.
>   
>     Record          *getSrcRecord()  const { return SrcRecord; }
> @@ -684,13 +684,13 @@ public:
>     TreePatternNode *getSrcPattern() const { return SrcPattern; }
>     TreePatternNode *getDstPattern() const { return DstPattern; }
>     const std::vector<Record*> &getDstRegs() const { return Dstregs; }
> -  unsigned         getAddedComplexity() const { return AddedComplexity; }
> +  int         getAddedComplexity() const { return AddedComplexity; }
>   
>     std::string getPredicateCheck() const;
>   
>     /// Compute the complexity metric for the input pattern.  This roughly
>     /// corresponds to the number of nodes that are covered.
> -  unsigned getPatternComplexity(const CodeGenDAGPatterns &CGP) const;
> +  int getPatternComplexity(const CodeGenDAGPatterns &CGP) const;
>   };
>   
>   class CodeGenDAGPatterns {
> diff --git a/utils/TableGen/DAGISelEmitter.cpp b/utils/TableGen/DAGISelEmitter.cpp
> index 82682cd..e2e6ab1 100644
> --- a/utils/TableGen/DAGISelEmitter.cpp
> +++ b/utils/TableGen/DAGISelEmitter.cpp
> @@ -94,8 +94,8 @@ struct PatternSortingPredicate {
>       // Otherwise, if the patterns might both match, sort based on complexity,
>       // which means that we prefer to match patterns that cover more nodes in the
>       // input over nodes that cover fewer.
> -    unsigned LHSSize = LHS->getPatternComplexity(CGP);
> -    unsigned RHSSize = RHS->getPatternComplexity(CGP);
> +    int LHSSize = LHS->getPatternComplexity(CGP);
> +    int RHSSize = RHS->getPatternComplexity(CGP);
>       if (LHSSize > RHSSize) return true;   // LHS -> bigger -> less cost
>       if (LHSSize < RHSSize) return false;
>   
> -- 1.8.1.5
>
> 0004-R600-SI-Do-abs-neg-folding-with-ComplexPatterns.patch
>
>
>  From 3ca510153eda33ec8b28929e339697ea36af95f3 Mon Sep 17 00:00:00 2001
> From: Tom Stellard<thomas.stellard at amd.com>
> Date: Mon, 30 Jun 2014 13:02:59 -0400
> Subject: [PATCH 4/4] R600/SI: Do abs/neg folding with ComplexPatterns
>
> Abs/neg folding has moved out of foldOperands and into the instruction
> selection phase using complex patterns.  As a consequence of this
> change, we now prefer to select the 64-bit encoding for most
> instructions and the modifier operands have been dropped from
> integer VOP3 instructions.
> ---
>   lib/Target/R600/AMDGPUISelDAGToDAG.cpp           |  35 ++
>   lib/Target/R600/AMDGPUInstructions.td            |   8 +
>   lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp |  10 +
>   lib/Target/R600/SIISelLowering.cpp               |  29 +-
>   lib/Target/R600/SIInstrFormats.td                |   7 +
>   lib/Target/R600/SIInstrInfo.cpp                  |  52 +--
>   lib/Target/R600/SIInstrInfo.h                    |  10 +
>   lib/Target/R600/SIInstrInfo.td                   | 455 +++++++++++++++++------
>   lib/Target/R600/SIInstructions.td                | 382 ++++++++++---------
>   lib/Target/R600/SILowerI1Copies.cpp              |   4 -
>   lib/Target/R600/SIShrinkInstructions.cpp         |   2 +-
>   test/CodeGen/R600/fabs.ll                        |   3 +-
>   test/CodeGen/R600/fneg.ll                        |   3 +-
>   test/CodeGen/R600/fsub.ll                        |  12 +-
>   test/CodeGen/R600/mul_uint24.ll                  |   4 +-
>   test/CodeGen/R600/vop-shrink.ll                  |   2 -
>   16 files changed, 654 insertions(+), 364 deletions(-)
>
> diff --git a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
> index cc17b7e..090fd1d 100644
> --- a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
> +++ b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
> @@ -96,6 +96,9 @@ private:
>                            SDValue &SOffset, SDValue &Offset, SDValue &Offen,
>                            SDValue &Idxen, SDValue &GLC, SDValue &SLC,
>                            SDValue &TFE) const;
> +  bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
> +  bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,
> +                       SDValue &Clamp, SDValue &Omod) const;
>   
>     SDNode *SelectADD_SUB_I64(SDNode *N);
>     SDNode *SelectDIV_SCALE(SDNode *N);
> @@ -879,6 +882,38 @@ bool AMDGPUDAGToDAGISel::SelectMUBUFAddr32(SDValue Addr, SDValue &SRsrc,
>     return SelectMUBUFScratch(Addr, SRsrc, VAddr, SOffset, Offset);
>   }
>   
> +bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
> +                                        SDValue &SrcMods) const {
> +
> +  unsigned Mods = 0;
> +
> +  Src = In;
> +
> +  if (Src.getOpcode() == ISD::FNEG) {
> +    Mods |= SISrcMods::NEG;
> +    Src = Src.getOperand(0);
> +  }
> +
> +  if (Src.getOpcode() == ISD::FABS) {
> +    Mods |= SISrcMods::ABS;
> +    Src = Src.getOperand(0);
> +  }
> +
> +  SrcMods = CurDAG->getTargetConstant(Mods, MVT::i32);
> +
> +  return true;
> +}
> +
> +bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
> +                                         SDValue &SrcMods, SDValue &Clamp,
> +                                         SDValue &Omod) const {
> +  // FIXME: Handle Clamp and Omod
> +  Clamp = CurDAG->getTargetConstant(0, MVT::i32);
> +  Omod = CurDAG->getTargetConstant(0, MVT::i32);
> +
> +  return SelectVOP3Mods(In, Src, SrcMods);
> +}
> +
>   void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
>     const AMDGPUTargetLowering& Lowering =
>       *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
> diff --git a/lib/Target/R600/AMDGPUInstructions.td b/lib/Target/R600/AMDGPUInstructions.td
> index cd35603..4b86858 100644
> --- a/lib/Target/R600/AMDGPUInstructions.td
> +++ b/lib/Target/R600/AMDGPUInstructions.td
> @@ -323,6 +323,14 @@ def atomic_cmp_swap_64_local :
>            AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
>   }]>;
>   
> +//===----------------------------------------------------------------------===//
> +// Misc Pattern Fragments
> +//===----------------------------------------------------------------------===//
> +
> +def fmad : PatFrag <
> +  (ops node:$src0, node:$src1, node:$src2),
> +  (fadd (fmul node:$src0, node:$src1), node:$src2)
> +>;
>   
>   class Constants {
>   int TWO_PI = 0x40c90fdb;
> diff --git a/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp b/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
> index 78776c1..8cdf878 100644
> --- a/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
> +++ b/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
> @@ -14,6 +14,7 @@
>   //===----------------------------------------------------------------------===//
>   
>   #include "AMDGPU.h"
> +#include "SIDefines.h"
>   #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
>   #include "MCTargetDesc/AMDGPUMCCodeEmitter.h"
>   #include "MCTargetDesc/AMDGPUFixupKinds.h"
> @@ -84,6 +85,15 @@ MCCodeEmitter *llvm::createSIMCCodeEmitter(const MCInstrInfo &MCII,
>   
>   bool SIMCCodeEmitter::isSrcOperand(const MCInstrDesc &Desc,
>                                      unsigned OpNo) const {
> +  // FIXME: We need a better way to figure out which operands can be immediate
> +  // values
> +  //
> +  // Some VOP* instructions like ADDC use VReg32 as the register class
> +  // for source 0, becuase they read VCC and can't take an SGPR as an
> +  // argument due to constant bus restrictions.
> +  if (OpNo == 1 && (Desc.TSFlags & (SIInstrFlags::VOP1 | SIInstrFlags::VOP2 |
> +                                    SIInstrFlags::VOPC)))
> +    return true;
>   
>     unsigned RegClass = Desc.OpInfo[OpNo].RegClass;
>     return (AMDGPU::SSrc_32RegClassID == RegClass) ||
> diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp
> index 5a148a2..2a319e1 100644
> --- a/lib/Target/R600/SIISelLowering.cpp
> +++ b/lib/Target/R600/SIISelLowering.cpp
> @@ -1592,39 +1592,22 @@ SDNode *SITargetLowering::foldOperands(MachineSDNode *Node,
>         continue;
>       if (!Operand.isMachineOpcode())
>         continue;
> -    if (Operand.getMachineOpcode() == AMDGPU::FNEG_SI) {
> -      Ops.pop_back();
> -      Ops.push_back(Operand.getOperand(0));
> -      InputModifiers[i] = 1;
> -      Promote2e64 = true;
> -      if (!DescE64)
> -        continue;
> -      Desc = DescE64;
> -      DescE64 = nullptr;
> -    }
> -    else if (Operand.getMachineOpcode() == AMDGPU::FABS_SI) {
> -      Ops.pop_back();
> -      Ops.push_back(Operand.getOperand(0));
> -      InputModifiers[i] = 2;
> -      Promote2e64 = true;
> -      if (!DescE64)
> -        continue;
> -      Desc = DescE64;
> -      DescE64 = nullptr;
> -    }
>     }
>   
>     if (Promote2e64) {
>       std::vector<SDValue> OldOps(Ops);
>       Ops.clear();
> +    bool HasModifiers = TII->hasModifiers(Desc->Opcode);
>       for (unsigned i = 0; i < OldOps.size(); ++i) {
>         // src_modifier
> -      Ops.push_back(DAG.getTargetConstant(InputModifiers[i], MVT::i32));
> +      if (HasModifiers)
> +        Ops.push_back(DAG.getTargetConstant(InputModifiers[i], MVT::i32));
>         Ops.push_back(OldOps[i]);
>       }
>       // Add the modifier flags while promoting
> -    for (unsigned i = 0; i < 2; ++i)
> -      Ops.push_back(DAG.getTargetConstant(0, MVT::i32));
> +    if (HasModifiers)
Should have braces for this if
> +      for (unsigned i = 0; i < 2; ++i)
> +        Ops.push_back(DAG.getTargetConstant(0, MVT::i32));
>     }
>   
>     // Add optional chain and glue
> diff --git a/lib/Target/R600/SIInstrFormats.td b/lib/Target/R600/SIInstrFormats.td
> index 00e69dd..fc6174c 100644
> --- a/lib/Target/R600/SIInstrFormats.td
> +++ b/lib/Target/R600/SIInstrFormats.td
> @@ -56,9 +56,16 @@ class VOP3Common <dag outs, dag ins, string asm, list<dag> pattern> :
>     let mayStore = 0;
>     let hasSideEffects = 0;
>     let UseNamedOperandTable = 1;
> +  // Using complex patterns gives VOP3 patterns a very high complexity rating,
> +  // but standalone patterns are almost always prefered, so we need to adjust the
> +  // priority lower.  The goal is to use a high number to reduce complexity to
> +  // zero (or less than zero).
> +  let AddedComplexity = -1000;
> +
>     let VOP3 = 1;
>   
>     int Size = 8;
> +  let Uses = [EXEC];
>   }
>   
>   //===----------------------------------------------------------------------===//
> diff --git a/lib/Target/R600/SIInstrInfo.cpp b/lib/Target/R600/SIInstrInfo.cpp
> index 06529a2..61424c9 100644
> --- a/lib/Target/R600/SIInstrInfo.cpp
> +++ b/lib/Target/R600/SIInstrInfo.cpp
> @@ -404,12 +404,17 @@ MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
>         return nullptr;
>       }
>   
> -    // XXX: Commute VOP3 instructions with abs and neg set.
> -    if (isVOP3(MI->getOpcode()) &&
> -        (MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
> -                        AMDGPU::OpName::abs)).getImm() ||
> -         MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
> -                        AMDGPU::OpName::neg)).getImm()))
> +    // XXX: Commute VOP3 instructions with abs and neg set .
> +    const MachineOperand *Abs, *Neg, *Src0Mods, *Src1Mods, *Src2Mods;
Can you declare these where they are defined instead?
> +    Abs = getNamedOperand(*MI, AMDGPU::OpName::abs);
> +    Neg = getNamedOperand(*MI, AMDGPU::OpName::neg);
> +    Src0Mods = getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers);
> +    Src1Mods = getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers);
> +    Src2Mods = getNamedOperand(*MI, AMDGPU::OpName::src2_modifiers);
> +
> +    if ((Abs && Abs->getImm()) || (Neg && Neg->getImm()) ||
> +        (Src0Mods && Src0Mods->getImm()) || (Src1Mods && Src1Mods->getImm()) ||
> +        (Src2Mods && Src2Mods->getImm()))
>         return nullptr;
>   
>       unsigned Reg = MI->getOperand(1).getReg();
> @@ -580,6 +585,14 @@ bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
>     return AMDGPU::getVOPe32(Opcode) != -1;
>   }
>   
> +bool SIInstrInfo::hasModifiers(unsigned Opcode) const {
> +  // The src0_modifier operand is present on all instructions
> +  // that have modifiers.
> +
> +  return AMDGPU::getNamedOperandIdx(Opcode,
> +                                    AMDGPU::OpName::src0_modifiers) != -1;
> +}
> +
>   bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
>                                       StringRef &ErrInfo) const {
>     uint16_t Opcode = MI->getOpcode();
> @@ -596,14 +609,20 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
>     }
>   
>     // Make sure the register classes are correct
> -  for (unsigned i = 0, e = Desc.getNumOperands(); i != e; ++i) {
> +  for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
>       switch (Desc.OpInfo[i].OperandType) {
>       case MCOI::OPERAND_REGISTER: {
>         int RegClass = Desc.OpInfo[i].RegClass;
>         if (!RI.regClassCanUseImmediate(RegClass) &&
>             (MI->getOperand(i).isImm() || MI->getOperand(i).isFPImm())) {
> -        ErrInfo = "Expected register, but got immediate";
> -        return false;
> +        // Handle some special cases:
> +        // Src0 can of VOP1, VOP2, VOPC can be an immediate no matter what
> +        // the register class.
> +        if (i != Src0Idx || (!isVOP1(Opcode) && !isVOP2(Opcode) &&
> +                                  !isVOPC(Opcode))) {
> +          ErrInfo = "Expected register, but got immediate";
> +          return false;
> +        }
>         }
>       }
>         break;
> @@ -1311,17 +1330,9 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
>         // We are converting these to a BFE, so we need to add the missing
>         // operands for the size and offset.
>         unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
> -      Inst->addOperand(Inst->getOperand(1));
> -      Inst->getOperand(1).ChangeToImmediate(0);
> -      Inst->addOperand(MachineOperand::CreateImm(0));
> -      Inst->addOperand(MachineOperand::CreateImm(0));
>         Inst->addOperand(MachineOperand::CreateImm(0));
>         Inst->addOperand(MachineOperand::CreateImm(Size));
>   
> -      // XXX - Other pointless operands. There are 4, but it seems you only need
> -      // 3 to not hit an assertion later in MCInstLower.
> -      Inst->addOperand(MachineOperand::CreateImm(0));
> -      Inst->addOperand(MachineOperand::CreateImm(0));
>       } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
>         // The VALU version adds the second operand to the result, so insert an
>         // extra 0 operand.
> @@ -1340,16 +1351,9 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
>   
>         uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
>         uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
> -
>         Inst->RemoveOperand(2); // Remove old immediate.
> -      Inst->addOperand(Inst->getOperand(1));
> -      Inst->getOperand(1).ChangeToImmediate(0);
> -      Inst->addOperand(MachineOperand::CreateImm(0));
>         Inst->addOperand(MachineOperand::CreateImm(Offset));
> -      Inst->addOperand(MachineOperand::CreateImm(0));
>         Inst->addOperand(MachineOperand::CreateImm(BitWidth));
> -      Inst->addOperand(MachineOperand::CreateImm(0));
> -      Inst->addOperand(MachineOperand::CreateImm(0));
>       }
>   
>       // Update the destination register class.
> diff --git a/lib/Target/R600/SIInstrInfo.h b/lib/Target/R600/SIInstrInfo.h
> index 3689a36..7d92f88 100644
> --- a/lib/Target/R600/SIInstrInfo.h
> +++ b/lib/Target/R600/SIInstrInfo.h
> @@ -113,6 +113,9 @@ public:
>     /// This function will return false if you pass it a 32-bit instruction.
>     bool hasVALU32BitEncoding(unsigned Opcode) const;
>   
> +  /// \brief Return true if this instruction has any modifiers.
> +  ///  e.g. src[012]_mod, omod, clamp.
> +  bool hasModifiers(unsigned Opcode) const;
>     bool verifyInstruction(const MachineInstr *MI,
>                            StringRef &ErrInfo) const override;
>   
> @@ -208,4 +211,11 @@ namespace SIInstrFlags {
>     };
>   }
>   
> +namespace SISrcMods {
> +  enum {
> +   NEG = 1 << 0,
> +   ABS = 1 << 1
> +  };
> +}
> +
>   #endif //SIINSTRINFO_H
> diff --git a/lib/Target/R600/SIInstrInfo.td b/lib/Target/R600/SIInstrInfo.td
> index b0ac20f..ede7b02 100644
> --- a/lib/Target/R600/SIInstrInfo.td
> +++ b/lib/Target/R600/SIInstrInfo.td
> @@ -159,6 +159,8 @@ def sopp_brtarget : Operand<OtherVT> {
>     let OperandType = "OPERAND_PCREL";
>   }
>   
> +include "SIInstrFormats.td"
> +
>   //===----------------------------------------------------------------------===//
>   // Complex patterns
>   //===----------------------------------------------------------------------===//
> @@ -167,6 +169,9 @@ def MUBUFAddr32 : ComplexPattern<i64, 9, "SelectMUBUFAddr32">;
>   def MUBUFAddr64 : ComplexPattern<i64, 3, "SelectMUBUFAddr64">;
>   def MUBUFScratch : ComplexPattern<i64, 4, "SelectMUBUFScratch">;
>   
> +def VOP3Mods0 : ComplexPattern<untyped, 4, "SelectVOP3Mods0">;
> +def VOP3Mods  : ComplexPattern<untyped, 2, "SelectVOP3Mods">;
> +
>   //===----------------------------------------------------------------------===//
>   // SI assembler operands
>   //===----------------------------------------------------------------------===//
> @@ -176,7 +181,17 @@ def SIOperand {
>     int VCC = 0x6A;
>   }
>   
> -include "SIInstrFormats.td"
> +def SRCMODS {
> +  int NONE = 0;
> +}
> +
> +def DSTCLAMP {
> +  int NONE = 0;
> +}
> +
> +def DSTOMOD {
> +  int NONE = 0;
> +}
>   
>   //===----------------------------------------------------------------------===//
>   //
> @@ -270,6 +285,29 @@ multiclass SMRD_Helper <bits<5> op, string asm, RegisterClass baseClass,
>   // Vector ALU classes
>   //===----------------------------------------------------------------------===//
>   
> +// This must always be right before the operand being input modified.
> +def InputMods : OperandWithDefaultOps <i32, (ops (i32 0))> {
> +  let PrintMethod = "printOperandAndMods";
> +}
> +def InputModsNoDefault : Operand <i32> {
> +  let PrintMethod = "printOperandAndMods";
> +}
> +
> +def VOP3Constants {
> +
> +  // 32-bit Op 1
> +  dag Op1Out32 = (outs VReg_32:$dst);
> +  dag Op1InMods32 = (ins InputModsNoDefault:$src0_modifiers, VSrc_32:$src0,
> +                         i32imm:$clamp, i32imm:$omod);
> +  dag Op1InNoMods32 = (ins VSrc_32:$src0);
> +
> +  // 64-bit Op 1
> +  dag Op1Out64 = (outs VReg_64:$dst);
> +  dag Op1InMods64 = (ins InputModsNoDefault:$src0_modifiers, VSrc_64:$src0,
> +                         i32imm:$clamp, i32imm:$omod);
> +  dag Op1InNoMods64 = (ins VSrc_64:$src0);
> +}
> +
>   class VOP <string opName> {
>     string OpName = opName;
>   }
> @@ -284,6 +322,17 @@ class SIMCInstr <string pseudo, int subtarget> {
>     int Subtarget = subtarget;
>   }
>   
> +class VOP3DisableFields <bit HasSrc1, bit HasSrc2, bit HasModifiers> {
> +
> +  bits<2> src0_modifiers = !if(HasModifiers, ?, 0);
> +  bits<2> src1_modifiers = !if(HasModifiers, !if(HasSrc1, ?, 0), 0);
> +  bits<2> src2_modifiers = !if(HasModifiers, !if(HasSrc2, ? ,0) ,0);
> +  bits<2> omod = !if(HasModifiers, ?, 0);
> +  bits<1> clamp = !if(HasModifiers, ?, 0);
> +  bits<9> src1 = !if(HasSrc1, ?, 0);
> +  bits<9> src2 = !if(HasSrc2, ?, 0);
> +}
> +
>   class VOP3_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
>     VOP3Common <outs, ins, "", pattern>,
>     VOP <opName>,
> @@ -296,212 +345,404 @@ class VOP3_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName> :
>     SIMCInstr<opName, SISubtarget.SI>;
>   
>   multiclass VOP3_m <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern,
> -                   string opName> {
> +                   string opName, bit HasMods = 1> {
>   
>     def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
>   
> -  def _si : VOP3_Real_si <op, outs, ins, asm, opName>;
> +  def _si : VOP3_Real_si <op, outs, ins, asm, opName>,
> +            VOP3DisableFields<1, 1, HasMods>;
>   
>   }
>   
>   multiclass VOP3_1_m <bits<8> op, dag outs, dag ins, string asm,
> -                     list<dag> pattern, string opName> {
> +                     list<dag> pattern, string opName, bit HasMods = 1> {
>   
>     def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
>   
> -  let src1 = 0, src1_modifiers = 0, src2 = 0, src2_modifiers = 0 in {
> +  def _si : VOP3_Real_si <
> +              {1, 1, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
> +              outs, ins, asm, opName>,
> +            VOP3DisableFields<0, 0, HasMods>;
> +}
>   
> -    def _si : VOP3_Real_si <
> -      {1, 1, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
> -      outs, ins, asm, opName
> -    >;
> +multiclass VOP3_2_m <bits<9> op, dag outs, dag ins, string asm,
> +                     list<dag> pattern, string opName, string revOp,
> +                     bit HasMods = 1, bit UseFullOp = 0> {
>   
> -  } // src1 = 0, src1_modifiers = 0, src2 = 0, src2_modifiers = 0
> +  def "" : VOP3_Pseudo <outs, ins, pattern, opName>,
> +           VOP2_REV<revOp#"_e64", !eq(revOp, opName)>;
> +
> +  def _si : VOP3_Real_si <
> +              !if(UseFullOp, op,
> +                  {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}}),
> +              outs, ins, asm, opName>,
> +            VOP2_REV<revOp#"_e64_si", !eq(revOp, opName)>,
> +            VOP3DisableFields<1, 0, HasMods>;
> +}
> +
> +multiclass VOP3b_2_m <bits<9> op, dag outs, dag ins, string asm,
> +                      list<dag> pattern, string opName, string revOp,
> +                      bit HasMods = 1, bit UseFullOp = 0> {
> +  def "" : VOP3_Pseudo <outs, ins, pattern, opName>,
> +           VOP2_REV<revOp#"_e64", !eq(revOp, opName)>;
> +
> +  // The VOP2 variant puts the carry out into VCC, the VOP3 variant
> +  // can write it into any SGPR. We currently don't use the carry out,
> +  // so for now hardcode it to VCC as well.
Is this something that should be reversed and handled by the instruction 
shrinking pass now?

> +  let sdst = SIOperand.VCC in {
> +    def _si : VOP3b <
> +              !if(UseFullOp, op,
> +                  {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}}),
> +              outs, ins, asm, pattern>,
> +              VOP3DisableFields<1, 0, HasMods>,
> +              SIMCInstr<opName, SISubtarget.SI>,
> +              VOP2_REV<revOp#"_e64_si", !eq(revOp, opName)>;
> +  } // End sdst = SIOperand.VCC
>   }
>   
> -multiclass VOP3_2_m <bits<6> op, dag outs, dag ins, string asm,
> -                     list<dag> pattern, string opName, string revOp> {
> +multiclass VOP3_C_m <bits<8> op, dag outs, dag ins, string asm,
> +                     list<dag> pattern, string opName,
> +                     bit HasMods, bit defExec> {
>   
>     def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
>   
> -  let src2 = 0, src2_modifiers = 0 in {
> +//  let Defs = !if(defExec, [EXEC], []) in {
Potentially confusing commented out code

>   
>       def _si : VOP3_Real_si <
> -        {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
> -        outs, ins, asm, opName>,
> -        VOP2_REV<revOp#"_e64", !eq(revOp, opName)>;
> -
> -  } // src2 = 0, src2_modifiers = 0
> -}
> +                {0, op{7}, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
> +                outs, ins, asm, opName>,
> +              VOP3DisableFields<1, 0, HasMods> {
> +  let Defs = !if(defExec, [EXEC], []);
> +  }
>   
> -// This must always be right before the operand being input modified.
> -def InputMods : OperandWithDefaultOps <i32, (ops (i32 0))> {
> -  let PrintMethod = "printOperandAndMods";
> +//  } // End Defs = !if(defExec, [EXEC], [])
>   }
>   
>   multiclass VOP1_Helper <bits<8> op, RegisterClass drc, RegisterClass src,
> -                        string opName, list<dag> pattern> {
> +                        string opName, list<dag> pattern64, bit HasMods = 1,
> +                        list<dag> pattern32 = []> {
>   
>     def _e32 : VOP1 <
>       op, (outs drc:$dst), (ins src:$src0),
> -    opName#"_e32 $dst, $src0", pattern
> +    opName#"_e32 $dst, $src0", pattern32
>     >, VOP <opName>;
>   
>     defm _e64 : VOP3_1_m <
>       op,
>       (outs drc:$dst),
> -    (ins InputMods:$src0_modifiers, src:$src0, i32imm:$clamp, i32imm:$omod),
> -    opName#"_e64 $dst, $src0_modifiers, $clamp, $omod", [], opName>;
> +    !if(HasMods, (ins InputModsNoDefault:$src0_modifiers, src:$src0,
> +         i32imm:$clamp, i32imm:$omod),
> +                 (ins src:$src0)),
> +    opName#"_e64 $dst, "#!if(HasMods, "$src0_modifiers $clamp, $omod", "$src0"),
> +    pattern64, opName,
> +    HasMods
> +  >;
>   }
>   
> +multiclass VOP1InstModHelper <bits<8> op, RegisterClass drc, RegisterClass src,
> +                              ValueType dstVT, ValueType srcVT, string opName,
> +                              SDPatternOperator node,
> +                              bit HasMods> : VOP1_Helper <
> +  op, drc, src, opName,
> +  !if(HasMods, [(set dstVT:$dst,
> +                (node (srcVT (VOP3Mods0 srcVT:$src0, i32:$src0_modifiers,
> +                                        i32:$clamp, i32:$omod))))],
> +               [(set dstVT:$dst, (node srcVT:$src0))]),
> +  HasMods
> +>;
> +
> +multiclass VOP1Inst <bits<8> op, string opName, ValueType dstVT,
> +                     ValueType srcVT = dstVT,
> +                     SDPatternOperator node = null_frag> : VOP1InstModHelper <
I think passing in the entire instruction pattern, rather than the 
srcVT, dstVT, and the pattern operator is much easier to follow. This 
becomes more unmanageable when there are more operands with different types


> +  op,
> +  !if(!eq(dstVT.Size, 32), VReg_32, VReg_64),
> +  !if(!eq(srcVT.Size, 32), VSrc_32, VSrc_64),
> +  dstVT, srcVT,
> +  opName, node,
> +  !if(!eq(srcVT.Value, f32.Value), 1,
> +      !if(!eq(srcVT.Value, f64.Value), 1, 0)) // set HasMods bit for fp sources.
> +>;
> +
>   multiclass VOP1_32 <bits<8> op, string opName, list<dag> pattern>
> -  : VOP1_Helper <op, VReg_32, VSrc_32, opName, pattern>;
> +  : VOP1_Helper <op, VReg_32, VSrc_32, opName, [], 0, pattern>;
>   
>   multiclass VOP1_64 <bits<8> op, string opName, list<dag> pattern>
> -  : VOP1_Helper <op, VReg_64, VSrc_64, opName, pattern>;
> +  : VOP1_Helper <op, VReg_64, VSrc_64, opName, [], 0, pattern>;
>   
>   multiclass VOP1_32_64 <bits<8> op, string opName, list<dag> pattern>
> -  : VOP1_Helper <op, VReg_32, VSrc_64, opName, pattern>;
> +  : VOP1_Helper <op, VReg_32, VSrc_64, opName, [], 0, pattern>;
>   
>   multiclass VOP1_64_32 <bits<8> op, string opName, list<dag> pattern>
> -  : VOP1_Helper <op, VReg_64, VSrc_32, opName, pattern>;
> +  : VOP1_Helper <op, VReg_64, VSrc_32, opName, [], 0, pattern>;
> +
> +class VOP2_e32 <bits<6> op, RegisterClass vrc, RegisterClass arc,
> +                string opName, list<dag> pattern, string revOp = opName> :
> +  VOP2 <op, (outs vrc:$dst), (ins arc:$src0, vrc:$src1),
> +        opName#"_e32 $dst, $src0, $src1", pattern>,
> +  VOP <opName>,
> +  VOP2_REV<revOp#"_e32", !eq(revOp, opName)>;
>   
> -multiclass VOP2_Helper <bits<6> op, RegisterClass vrc, RegisterClass arc,
> -                        string opName, list<dag> pattern, string revOp> {
> -  def _e32 : VOP2 <
> -    op, (outs vrc:$dst), (ins arc:$src0, vrc:$src1),
> -    opName#"_e32 $dst, $src0, $src1", pattern
> -  >, VOP <opName>, VOP2_REV<revOp#"_e32", !eq(revOp, opName)>;
> +multiclass VOP2_Helper <bits<9> op, RegisterClass vrc, RegisterClass arc,
> +                        string opName, list<dag> pattern64,
> +                        bit HasMods, string revOp, list<dag> pattern32 = []> {
> +  def _e32 : VOP2_e32 <op{5-0}, vrc, arc, opName,  pattern32, revOp>;
>   
>     defm _e64 : VOP3_2_m <
>       op,
>       (outs vrc:$dst),
> -    (ins InputMods:$src0_modifiers, arc:$src0,
> -         InputMods:$src1_modifiers, arc:$src1,
> -         i32imm:$clamp, i32imm:$omod),
> -    opName#"_e64 $dst, $src0_modifiers, $src1_modifiers, $clamp, $omod", [],
> -    opName, revOp>;
> +    !if(HasMods, (ins InputModsNoDefault:$src0_modifiers, arc:$src0,
> +                       InputModsNoDefault:$src1_modifiers, arc:$src1,
> +                       i32imm:$clamp, i32imm:$omod),
> +                  (ins arc:$src0, arc:$src1)),
> +    opName#"_e64 $dst, "#!if(HasMods, "$src0_modifiers, $src1_modifiers, "#
> +                                                       "$clamp, $omod",
> +                                      "$src0, $src1"),
> +    pattern64, opName, revOp, HasMods>;
>   }
>   
> -multiclass VOP2_32 <bits<6> op, string opName, list<dag> pattern,
> +multiclass VOP2InstModHelper <bits<9> op, RegisterClass drc, RegisterClass src,
> +                              ValueType dstVT, ValueType srcVT, string opName,
> +                              SDPatternOperator node,
> +                              bit HasMods, string revOp> : VOP2_Helper <
> +  op, drc, src, opName,
> +  !if(HasMods, [(set dstVT:$dst,
> +                (node (srcVT (VOP3Mods0 srcVT:$src0, i32:$src0_modifiers,
> +                                        i32:$clamp, i32:$omod)),
> +                       (srcVT (VOP3Mods srcVT:$src1, i32:$src1_modifiers))))],
> +               [(set dstVT:$dst, (node srcVT:$src0, srcVT:$src1))]),
> +  HasMods, revOp
> +>;
> +
> +multiclass VOP2Inst <bits<9> op, string opName, ValueType dstVT,
> +                     ValueType srcVT = dstVT,
> +                     SDPatternOperator node = null_frag,
> +                     string revOp = opName> :
> +                     VOP2InstModHelper <
> +  op,
> +  !if(!eq(dstVT.Size, 32), VReg_32, VReg_64),
> +  !if(!eq(srcVT.Size, 32), VSrc_32, VSrc_64),
> +  dstVT, srcVT,
> +  opName,
> +  node,
> +  !if(!eq(srcVT.Value, f32.Value), 1,
> +      !if(!eq(srcVT.Value, f64.Value), 1, 0)), // set HasMods bit for fp sources.
> +  revOp
> +>;
> +
> +multiclass VOP2_32 <bits<9> op, string opName, list<dag> pattern32,
>                       string revOp = opName>
> -  : VOP2_Helper <op, VReg_32, VSrc_32, opName, pattern, revOp>;
> +  : VOP2_Helper <op, VReg_32, VSrc_32, opName, [], 0, revOp, pattern32>;
>   
> -multiclass VOP2_64 <bits<6> op, string opName, list<dag> pattern,
> +multiclass VOP2_64 <bits<9> op, string opName, list<dag> pattern32,
>                       string revOp = opName>
> -  : VOP2_Helper <op, VReg_64, VSrc_64, opName, pattern, revOp>;
> +  : VOP2_Helper <op, VReg_64, VSrc_64, opName, [], 0, revOp, pattern32>;
>   
> -multiclass VOP2b_32 <bits<6> op, string opName, list<dag> pattern,
> -                     RegisterClass src0_rc, string revOp = opName> {
>   
> -  def _e32 : VOP2 <
> -    op, (outs VReg_32:$dst), (ins src0_rc:$src0, VReg_32:$src1),
> -    opName#"_e32 $dst, $src0, $src1", pattern
> -  >, VOP <opName>, VOP2_REV<revOp#"_e32", !eq(revOp, opName)>;
> +multiclass VOP2b_Helper <bits<9> op, string opName, RegisterClass vrc,
> +                         RegisterClass arc, list<dag> pattern64,
> +                         bit HasMods, string revOp, list<dag> pattern32 = []> {
> +  def _e32 : VOP2_e32 <op{5-0}, vrc, arc, opName, pattern32, revOp>;
>   
> -  def _e64 : VOP3b <
> -    {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
> -    (outs VReg_32:$dst),
> -    (ins InputMods: $src0_modifiers, VSrc_32:$src0,
> -         InputMods:$src1_modifiers, VSrc_32:$src1,
> -         i32imm:$clamp, i32imm:$omod),
> -    opName#"_e64 $dst, $src0_modifiers, $src1_modifiers, $clamp, $omod", []
> -  >, VOP <opName>, VOP2_REV<revOp#"_e64", !eq(revOp, opName)> {
> -    let src2 = 0;
> -    let src2_modifiers = 0;
> -    /* the VOP2 variant puts the carry out into VCC, the VOP3 variant
> -       can write it into any SGPR. We currently don't use the carry out,
> -       so for now hardcode it to VCC as well */
> -    let sdst = SIOperand.VCC;
> -  }
> +  defm _e64 : VOP3b_2_m <
> +    op,
> +    (outs vrc:$dst),
> +    !if(HasMods, (ins InputModsNoDefault:$src0_modifiers, arc:$src0,
> +                       InputModsNoDefault:$src1_modifiers, arc:$src1,
> +                       i32imm:$clamp, i32imm:$omod),
> +                  (ins arc:$src0, arc:$src1)),
> +    opName#"_e64 $dst, "#!if(HasMods, "$src0_modifiers, $src1_modifiers, "#
> +                                                       "$clamp, $omod",
> +                                      "$src0, $src1"),
> +    pattern64, opName, revOp, HasMods>;
>   }
>   
> +multiclass VOP2bInstModHelper <bits<9> op, string opName, RegisterClass drc,
> +                               RegisterClass src, ValueType dstVT,
> +                               ValueType srcVT, SDPatternOperator node,
> +                               bit HasMods, string revOp = opName> : VOP2b_Helper <
> +  op, opName, drc, src,
> +  !if(HasMods, [(set dstVT:$dst,
> +                (node (srcVT (VOP3Mods0 srcVT:$src0, i32:$src0_modifiers,
> +                                        i32:$clamp, i32:$omod)),
> +                       (srcVT (VOP3Mods srcVT:$src1, i32:$src1_modifiers))))],
> +               [(set dstVT:$dst, (node srcVT:$src0, srcVT:$src1))]),
> +  HasMods, revOp
> +>;
> +
> +multiclass VOP2bInst <bits<9> op, string opName, ValueType dstVT,
> +                     ValueType srcVT = dstVT,
> +                     SDPatternOperator node = null_frag,
> +                     string revOp = opName> :
> +                     VOP2bInstModHelper <
> +  op, opName,
> +  !if(!eq(dstVT.Size, 32), VReg_32, VReg_64),
> +  !if(!eq(srcVT.Size, 32), VSrc_32, VSrc_64),
> +  dstVT, srcVT,
> +  node,
> +  !if(!eq(srcVT.Value, f32.Value), 1,
> +      !if(!eq(srcVT.Value, f64.Value), 1, 0)), // set HasMods bit for fp sources.
> +  revOp
> +>;
> +
> +
>   multiclass VOPC_Helper <bits<8> op, RegisterClass vrc, RegisterClass arc,
> -                        string opName, ValueType vt, PatLeaf cond, bit defExec = 0> {
> +                        string opName, list<dag> pattern64,
> +                        bit HasMods, bit defExec, list<dag> pattern32 = []> {
>     def _e32 : VOPC <
>       op, (ins arc:$src0, vrc:$src1),
> -    opName#"_e32 $dst, $src0, $src1", []
> +    opName#"_e32 $dst, $src0, $src1", pattern32
>     >, VOP <opName> {
>       let Defs = !if(defExec, [EXEC], []);
>     }
>   
> -  def _e64 : VOP3 <
> -    {0, op{7}, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
> +  defm _e64 : VOP3_C_m <
> +    op,
>       (outs SReg_64:$dst),
> -    (ins InputMods:$src0_modifiers, arc:$src0,
> -         InputMods:$src1_modifiers, arc:$src1,
> -         InstFlag:$clamp, InstFlag:$omod),
> -    opName#"_e64 $dst, $src0_modifiers, $src1_modifiers, $clamp, $omod",
> -    !if(!eq(!cast<string>(cond), "COND_NULL"), []<dag>,
> -      [(set SReg_64:$dst, (i1 (setcc (vt arc:$src0), arc:$src1, cond)))]
> -    )
> -  >, VOP <opName> {
> -    let Defs = !if(defExec, [EXEC], []);
> -    let src2 = 0;
> -    let src2_modifiers = 0;
> -  }
> +    !if(HasMods, (ins InputModsNoDefault:$src0_modifiers, arc:$src0,
> +                      InputModsNoDefault:$src1_modifiers, arc:$src1,
> +                      i32imm:$clamp, i32imm:$omod),
> +                  (ins arc:$src0, arc:$src1)),
> +    opName#"_e64 $dst, "#!if(HasMods, "$src0_modifiers, $src1_modifiers, "#
> +                                                       "$clamp, $omod",
> +                                      "$src0, $src1"),
> +    pattern64, opName, HasMods, defExec>;
>   }
>   
> +multiclass VOPCInstModHelper <bits<8> op, RegisterClass drc, RegisterClass src,
> +                              ValueType VT, string opName,
> +                              SDPatternOperator cond,
> +                              bit HasMods = 1, bit defExec = 0> : VOPC_Helper <
> +  op, drc, src, opName,
> +  !if(!eq(!cast<string>(cond), "COND_NULL"), []<dag>,
> +    !if(HasMods, [(set i1:$dst,
> +                  (setcc (VT (VOP3Mods0 VT:$src0, i32:$src0_modifiers,
> +                                            i32:$clamp, i32:$omod)),
> +                             (VT (VOP3Mods VT:$src1, i32:$src1_modifiers)),
> +                             cond))],
> +                 [(set i1:$dst, (setcc VT:$src0, VT:$src1, cond))])),
> +  HasMods,
> +  defExec
> +>;
> +
> +multiclass VOPCInst <bits<8> op, string opName, ValueType VT,
> +                     SDPatternOperator cond = null_frag,
> +                     bit HasMods = 1,
> +                     bit defExec = 0> :
> +                     VOPCInstModHelper <
> +  op,
> +  !if(!eq(VT.Size, 32), VReg_32, VReg_64),
> +  !if(!eq(VT.Size, 32), VSrc_32, VSrc_64),
> +  VT,
> +  opName,
> +  cond,
> +  !if(!eq(VT.Value, f32.Value), 1,
> +      !if(!eq(VT.Value, f64.Value), 1, 0)), // set HasMods bit for fp sources.
> +  defExec
> +>;
> +
>   multiclass VOPC_32 <bits<8> op, string opName,
>     ValueType vt = untyped, PatLeaf cond = COND_NULL>
> -  : VOPC_Helper <op, VReg_32, VSrc_32, opName, vt, cond>;
> +  : VOPCInstModHelper <op, VReg_32, VSrc_32, vt, opName, cond,
> +                       !eq(vt.Value, f32.Value)>;
>   
>   multiclass VOPC_64 <bits<8> op, string opName,
>     ValueType vt = untyped, PatLeaf cond = COND_NULL>
> -  : VOPC_Helper <op, VReg_64, VSrc_64, opName, vt, cond>;
> +  : VOPCInstModHelper <op, VReg_64, VSrc_64, vt, opName, cond,
> +                       !eq(vt.Value, f64.Value)>;
>   
>   multiclass VOPCX_32 <bits<8> op, string opName,
>     ValueType vt = untyped, PatLeaf cond = COND_NULL>
> -  : VOPC_Helper <op, VReg_32, VSrc_32, opName, vt, cond, 1>;
> +  : VOPCInstModHelper <op, VReg_32, VSrc_32, vt, opName, cond,
> +                       !eq(vt.Value, f32.Value), 1>;
>   
>   multiclass VOPCX_64 <bits<8> op, string opName,
>     ValueType vt = untyped, PatLeaf cond = COND_NULL>
> -  : VOPC_Helper <op, VReg_64, VSrc_64, opName, vt, cond, 1>;
> +  : VOPCInstModHelper <op, VReg_64, VSrc_64, vt, opName, cond, 1, 1>;
>   
>   multiclass VOP3_32 <bits<9> op, string opName, list<dag> pattern> : VOP3_m <
>     op, (outs VReg_32:$dst),
> -  (ins InputMods: $src0_modifiers, VSrc_32:$src0, InputMods:$src1_modifiers,
> +  (ins InputMods:$src0_modifiers, VSrc_32:$src0, InputMods:$src1_modifiers,
>      VSrc_32:$src1, InputMods:$src2_modifiers, VSrc_32:$src2,
>      InstFlag:$clamp, InstFlag:$omod),
>     opName#" $dst, $src0_modifiers, $src1, $src2, $clamp, $omod", pattern, opName
>   >;
>   
> -class VOP3_64_32 <bits <9> op, string opName, list<dag> pattern> : VOP3 <
> +multiclass VOP3_Helper <bits<9> op, RegisterClass vrc, RegisterClass arc,
> +                       string opName, list<dag> pattern, int HasMods> : VOP3_m <
> +    op,
> +    (outs vrc:$dst),
> +    !if(HasMods, (ins InputModsNoDefault:$src0_modifiers, arc:$src0,
> +                      InputModsNoDefault:$src1_modifiers, arc:$src1,
> +                      InputModsNoDefault:$src2_modifiers, arc:$src2,
> +                      i32imm:$clamp, i32imm:$omod),
> +                  (ins arc:$src0, arc:$src1, arc:$src2)),
> +    opName#" $dst, "#!if(HasMods, "$src0_modifiers, $src1_modifiers, "#
> +                                  "$src2_modifiers $clamp, $omod",
> +                                  "$src0, $src1, $src2"),
> +    pattern, opName, HasMods>;
> +
> +multiclass VOP3InstModHelper <bits<9> op, RegisterClass drc, RegisterClass src,
> +                              ValueType dstVT, ValueType srcVT, string opName,
> +                              SDPatternOperator node, bit HasMods> :
> +                              VOP3_Helper <
> +  op, drc, src, opName,
> +  !if(HasMods, [(set dstVT:$dst,
> +                (node (srcVT (VOP3Mods0 srcVT:$src0, i32:$src0_modifiers,
> +                                        i32:$clamp, i32:$omod)),
> +                       (srcVT (VOP3Mods srcVT:$src1, i32:$src1_modifiers)),
> +                       (srcVT (VOP3Mods srcVT:$src2, i32:$src2_modifiers))))],
> +               [(set dstVT:$dst, (node srcVT:$src0, srcVT:$src1, srcVT:$src2))]),
> +  HasMods
> +>;
> +
> +multiclass VOP3Inst <bits<9> op, string opName, ValueType dstVT,
> +                        ValueType srcVT = dstVT,
> +                        SDPatternOperator node = null_frag,
> +                        RegisterClass dstRC =
> +                          !if(!eq(dstVT.Size, 32), VReg_32, VReg_64),
> +                        RegisterClass srcRC =
> +                          !if(!eq(srcVT.Size, 32), VSrc_32, VSrc_64)> :
> +                    VOP3InstModHelper <
> +  op, dstRC, srcRC, dstVT, srcVT, opName, node,
> +    !if(!eq(srcVT.Value, f32.Value), 1,
> +      !if(!eq(srcVT.Value, f64.Value), 1, 0)) // set HasMods bit for fp sources.
> +>;
> +
> +multiclass VOP3_64_32 <bits <9> op, string opName, ValueType VT64,
> +                       ValueType VT32, SDPatternOperator node = null_frag> :
> +  VOP3_2_m <
>     op, (outs VReg_64:$dst),
>     (ins VSrc_64:$src0, VSrc_32:$src1),
> -  opName#" $dst, $src0, $src1", pattern
> ->, VOP <opName> {
> +  opName#" $dst, $src0, $src1",
> +  [(set VT64:$dst, (node VT64:$src0, VT32:$src1))] , opName, opName, 0, 1
> +>;
>   
> -  let src2 = 0;
> -  let src2_modifiers = 0;
> -  let src0_modifiers = 0;
> -  let clamp = 0;
> -  let omod = 0;
> -}
> +multiclass VOP3_64 <bits<9> op, string opName, list<dag> pattern> :
>   
> -class VOP3_64 <bits<9> op, string opName, list<dag> pattern> : VOP3 <
> +  VOP3_2_m <
>     op, (outs VReg_64:$dst),
>     (ins InputMods:$src0_modifiers, VSrc_64:$src0,
>          InputMods:$src1_modifiers, VSrc_64:$src1,
>          InputMods:$src2_modifiers, VSrc_64:$src2,
>          InstFlag:$clamp, InstFlag:$omod),
> -  opName#" $dst, $src0_modifiers, $src1_modifiers, $src2_modifiers, $clamp, $omod", pattern
> ->, VOP <opName>;
> -
> +  opName#" $dst, $src0_modifiers, $src1_modifiers, $src2_modifiers, $clamp, $omod", pattern,
> +  opName, opName, 1, 1
> +>;
>   
> -class VOP3b_Helper <bits<9> op, RegisterClass vrc, RegisterClass arc,
> -                    string opName, list<dag> pattern> : VOP3 <
> +multiclass VOP3b_Helper <bits<9> op, RegisterClass vrc, RegisterClass arc,
> +                    string opName, list<dag> pattern> :
> +  VOP3b_2_m <
>     op, (outs vrc:$dst0, SReg_64:$dst1),
>     (ins arc:$src0, arc:$src1, arc:$src2,
>      InstFlag:$abs, InstFlag:$clamp, InstFlag:$omod, InstFlag:$neg),
> -  opName#" $dst0, $dst1, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern
> ->, VOP <opName>;
> -
> +  opName#" $dst0, $dst1, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern,
> +  opName, opName, 1, 1
> +>;
>   
> -class VOP3b_64 <bits<9> op, string opName, list<dag> pattern> :
> +multiclass VOP3b_64 <bits<9> op, string opName, list<dag> pattern> :
>     VOP3b_Helper <op, VReg_64, VSrc_64, opName, pattern>;
>   
> -class VOP3b_32 <bits<9> op, string opName, list<dag> pattern> :
> +multiclass VOP3b_32 <bits<9> op, string opName, list<dag> pattern> :
>     VOP3b_Helper <op, VReg_32, VSrc_32, opName, pattern>;
>   
>   //===----------------------------------------------------------------------===//
> diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td
> index aecd847..3ccc132 100644
> --- a/lib/Target/R600/SIInstructions.td
> +++ b/lib/Target/R600/SIInstructions.td
> @@ -1048,7 +1048,7 @@ defm IMAGE_SAMPLE_C_CD_CL_O : MIMG_Sampler <0x0000006f, "IMAGE_SAMPLE_C_CD_CL_O"
>   //def V_NOP : VOP1_ <0x00000000, "V_NOP", []>;
>   
>   let neverHasSideEffects = 1, isMoveImm = 1 in {
> -defm V_MOV_B32 : VOP1_32 <0x00000001, "V_MOV_B32", []>;
> +defm V_MOV_B32 : VOP1Inst <0x00000001, "V_MOV_B32", i32>;
>   } // End neverHasSideEffects = 1, isMoveImm = 1
>   
>   let Uses = [EXEC] in {
> @@ -1063,113 +1063,113 @@ def V_READFIRSTLANE_B32 : VOP1 <
>   
>   }
>   
> -defm V_CVT_I32_F64 : VOP1_32_64 <0x00000003, "V_CVT_I32_F64",
> -  [(set i32:$dst, (fp_to_sint f64:$src0))]
> +defm V_CVT_I32_F64 : VOP1Inst <0x00000003, "V_CVT_I32_F64",
> +  i32, f64, fp_to_sint
>   >;
> -defm V_CVT_F64_I32 : VOP1_64_32 <0x00000004, "V_CVT_F64_I32",
> -  [(set f64:$dst, (sint_to_fp i32:$src0))]
> +defm V_CVT_F64_I32 : VOP1Inst <0x00000004, "V_CVT_F64_I32",
> +  f64, i32, sint_to_fp
>   >;
> -defm V_CVT_F32_I32 : VOP1_32 <0x00000005, "V_CVT_F32_I32",
> -  [(set f32:$dst, (sint_to_fp i32:$src0))]
> +defm V_CVT_F32_I32 : VOP1Inst <0x00000005, "V_CVT_F32_I32",
> +  f32, i32, sint_to_fp
>   >;
> -defm V_CVT_F32_U32 : VOP1_32 <0x00000006, "V_CVT_F32_U32",
> -  [(set f32:$dst, (uint_to_fp i32:$src0))]
> +defm V_CVT_F32_U32 : VOP1Inst <0x00000006, "V_CVT_F32_U32",
> +  f32, i32, uint_to_fp
>   >;
> -defm V_CVT_U32_F32 : VOP1_32 <0x00000007, "V_CVT_U32_F32",
> -  [(set i32:$dst, (fp_to_uint f32:$src0))]
> +defm V_CVT_U32_F32 : VOP1Inst <0x00000007, "V_CVT_U32_F32",
> +  i32, f32, fp_to_uint
>   >;
> -defm V_CVT_I32_F32 : VOP1_32 <0x00000008, "V_CVT_I32_F32",
> -  [(set i32:$dst, (fp_to_sint f32:$src0))]
> +defm V_CVT_I32_F32 : VOP1Inst <0x00000008, "V_CVT_I32_F32",
> +  i32, f32, fp_to_sint
>   >;
>   defm V_MOV_FED_B32 : VOP1_32 <0x00000009, "V_MOV_FED_B32", []>;
> -defm V_CVT_F16_F32 : VOP1_32 <0x0000000a, "V_CVT_F16_F32",
> -  [(set i32:$dst, (fp_to_f16 f32:$src0))]
> +defm V_CVT_F16_F32 : VOP1Inst <0x0000000a, "V_CVT_F16_F32",
> +  i32, f32, fp_to_f16
>   >;
> -defm V_CVT_F32_F16 : VOP1_32 <0x0000000b, "V_CVT_F32_F16",
> -  [(set f32:$dst, (f16_to_fp i32:$src0))]
> +defm V_CVT_F32_F16 : VOP1Inst <0x0000000b, "V_CVT_F32_F16",
> +  f32, i32, f16_to_fp
>   >;
>   //defm V_CVT_RPI_I32_F32 : VOP1_32 <0x0000000c, "V_CVT_RPI_I32_F32", []>;
>   //defm V_CVT_FLR_I32_F32 : VOP1_32 <0x0000000d, "V_CVT_FLR_I32_F32", []>;
>   //defm V_CVT_OFF_F32_I4 : VOP1_32 <0x0000000e, "V_CVT_OFF_F32_I4", []>;
> -defm V_CVT_F32_F64 : VOP1_32_64 <0x0000000f, "V_CVT_F32_F64",
> -  [(set f32:$dst, (fround f64:$src0))]
> +defm V_CVT_F32_F64 : VOP1Inst <0x0000000f, "V_CVT_F32_F64",
> +  f32, f64, fround
>   >;
> -defm V_CVT_F64_F32 : VOP1_64_32 <0x00000010, "V_CVT_F64_F32",
> -  [(set f64:$dst, (fextend f32:$src0))]
> +defm V_CVT_F64_F32 : VOP1Inst <0x00000010, "V_CVT_F64_F32",
> +  f64, f32, fextend
>   >;
> -defm V_CVT_F32_UBYTE0 : VOP1_32 <0x00000011, "V_CVT_F32_UBYTE0",
> -  [(set f32:$dst, (AMDGPUcvt_f32_ubyte0 i32:$src0))]
> +defm V_CVT_F32_UBYTE0 : VOP1Inst <0x00000011, "V_CVT_F32_UBYTE0",
> +  f32, i32, AMDGPUcvt_f32_ubyte0
>   >;
> -defm V_CVT_F32_UBYTE1 : VOP1_32 <0x00000012, "V_CVT_F32_UBYTE1",
> -  [(set f32:$dst, (AMDGPUcvt_f32_ubyte1 i32:$src0))]
> +defm V_CVT_F32_UBYTE1 : VOP1Inst <0x00000012, "V_CVT_F32_UBYTE1",
> +  f32, i32, AMDGPUcvt_f32_ubyte1
>   >;
> -defm V_CVT_F32_UBYTE2 : VOP1_32 <0x00000013, "V_CVT_F32_UBYTE2",
> -  [(set f32:$dst, (AMDGPUcvt_f32_ubyte2 i32:$src0))]
> +defm V_CVT_F32_UBYTE2 : VOP1Inst <0x00000013, "V_CVT_F32_UBYTE2",
> +  f32, i32, AMDGPUcvt_f32_ubyte2
>   >;
> -defm V_CVT_F32_UBYTE3 : VOP1_32 <0x00000014, "V_CVT_F32_UBYTE3",
> -  [(set f32:$dst, (AMDGPUcvt_f32_ubyte3 i32:$src0))]
> +defm V_CVT_F32_UBYTE3 : VOP1Inst <0x00000014, "V_CVT_F32_UBYTE3",
> +  f32, i32, AMDGPUcvt_f32_ubyte3
>   >;
> -defm V_CVT_U32_F64 : VOP1_32_64 <0x00000015, "V_CVT_U32_F64",
> -  [(set i32:$dst, (fp_to_uint f64:$src0))]
> +defm V_CVT_U32_F64 : VOP1Inst <0x00000015, "V_CVT_U32_F64",
> +  i32, f64, fp_to_uint
>   >;
> -defm V_CVT_F64_U32 : VOP1_64_32 <0x00000016, "V_CVT_F64_U32",
> -  [(set f64:$dst, (uint_to_fp i32:$src0))]
> +defm V_CVT_F64_U32 : VOP1Inst <0x00000016, "V_CVT_F64_U32",
> +  f64, i32, uint_to_fp
>   >;
>   
> -defm V_FRACT_F32 : VOP1_32 <0x00000020, "V_FRACT_F32",
> -  [(set f32:$dst, (AMDGPUfract f32:$src0))]
> +defm V_FRACT_F32 : VOP1Inst <0x00000020, "V_FRACT_F32",
> +  f32, f32, AMDGPUfract
>   >;
> -defm V_TRUNC_F32 : VOP1_32 <0x00000021, "V_TRUNC_F32",
> -  [(set f32:$dst, (ftrunc f32:$src0))]
> +defm V_TRUNC_F32 : VOP1Inst <0x00000021, "V_TRUNC_F32",
> +  f32, f32, ftrunc
>   >;
> -defm V_CEIL_F32 : VOP1_32 <0x00000022, "V_CEIL_F32",
> -  [(set f32:$dst, (fceil f32:$src0))]
> +defm V_CEIL_F32 : VOP1Inst <0x00000022, "V_CEIL_F32",
> +  f32, f32, fceil
>   >;
> -defm V_RNDNE_F32 : VOP1_32 <0x00000023, "V_RNDNE_F32",
> -  [(set f32:$dst, (frint f32:$src0))]
> +defm V_RNDNE_F32 : VOP1Inst <0x00000023, "V_RNDNE_F32",
> +  f32, f32, frint
>   >;
> -defm V_FLOOR_F32 : VOP1_32 <0x00000024, "V_FLOOR_F32",
> -  [(set f32:$dst, (ffloor f32:$src0))]
> +defm V_FLOOR_F32 : VOP1Inst <0x00000024, "V_FLOOR_F32",
> +  f32, f32, ffloor
>   >;
> -defm V_EXP_F32 : VOP1_32 <0x00000025, "V_EXP_F32",
> -  [(set f32:$dst, (fexp2 f32:$src0))]
> +defm V_EXP_F32 : VOP1Inst <0x00000025, "V_EXP_F32",
> +  f32, f32, fexp2
>   >;
>   defm V_LOG_CLAMP_F32 : VOP1_32 <0x00000026, "V_LOG_CLAMP_F32", []>;
> -defm V_LOG_F32 : VOP1_32 <0x00000027, "V_LOG_F32",
> -  [(set f32:$dst, (flog2 f32:$src0))]
> +defm V_LOG_F32 : VOP1Inst <0x00000027, "V_LOG_F32",
> +  f32, f32, flog2
>   >;
>   
>   defm V_RCP_CLAMP_F32 : VOP1_32 <0x00000028, "V_RCP_CLAMP_F32", []>;
>   defm V_RCP_LEGACY_F32 : VOP1_32 <0x00000029, "V_RCP_LEGACY_F32", []>;
> -defm V_RCP_F32 : VOP1_32 <0x0000002a, "V_RCP_F32",
> -  [(set f32:$dst, (AMDGPUrcp f32:$src0))]
> +defm V_RCP_F32 : VOP1Inst <0x0000002a, "V_RCP_F32",
> +  f32, f32, AMDGPUrcp
>   >;
>   defm V_RCP_IFLAG_F32 : VOP1_32 <0x0000002b, "V_RCP_IFLAG_F32", []>;
> -defm V_RSQ_CLAMP_F32 : VOP1_32 <0x0000002c, "V_RSQ_CLAMP_F32",
> -  [(set f32:$dst, (AMDGPUrsq_clamped f32:$src0))]
> +defm V_RSQ_CLAMP_F32 : VOP1Inst <0x0000002c, "V_RSQ_CLAMP_F32",
> +  f32, f32, AMDGPUrsq_clamped
>   >;
> -defm V_RSQ_LEGACY_F32 : VOP1_32 <
> +defm V_RSQ_LEGACY_F32 : VOP1Inst <
>     0x0000002d, "V_RSQ_LEGACY_F32",
> -  [(set f32:$dst, (AMDGPUrsq_legacy f32:$src0))]
> +  f32, f32, AMDGPUrsq_legacy
>   >;
> -defm V_RSQ_F32 : VOP1_32 <0x0000002e, "V_RSQ_F32",
> -  [(set f32:$dst, (AMDGPUrsq f32:$src0))]
> +defm V_RSQ_F32 : VOP1Inst <0x0000002e, "V_RSQ_F32",
> +  f32, f32, AMDGPUrsq
>   >;
> -defm V_RCP_F64 : VOP1_64 <0x0000002f, "V_RCP_F64",
> -  [(set f64:$dst, (AMDGPUrcp f64:$src0))]
> +defm V_RCP_F64 : VOP1Inst <0x0000002f, "V_RCP_F64",
> +  f64, f64, AMDGPUrcp
>   >;
> -defm V_RCP_CLAMP_F64 : VOP1_64 <0x00000030, "V_RCP_CLAMP_F64", []>;
> -defm V_RSQ_F64 : VOP1_64 <0x00000031, "V_RSQ_F64",
> -  [(set f64:$dst, (AMDGPUrsq f64:$src0))]
> +defm V_RCP_CLAMP_F64 : VOP1_32 <0x00000030, "V_RCP_CLAMP_F64", []>;
> +defm V_RSQ_F64 : VOP1Inst <0x00000031, "V_RSQ_F64",
> +  f64, f64, AMDGPUrsq
>   >;
> -defm V_RSQ_CLAMP_F64 : VOP1_64 <0x00000032, "V_RSQ_CLAMP_F64",
> -  [(set f64:$dst, (AMDGPUrsq_clamped f64:$src0))]
> +defm V_RSQ_CLAMP_F64 : VOP1Inst <0x00000032, "V_RSQ_CLAMP_F64",
> +  f64, f64, AMDGPUrsq_clamped
>   >;
> -defm V_SQRT_F32 : VOP1_32 <0x00000033, "V_SQRT_F32",
> -  [(set f32:$dst, (fsqrt f32:$src0))]
> +defm V_SQRT_F32 : VOP1Inst <0x00000033, "V_SQRT_F32",
> +  f32, f32, fsqrt
>   >;
> -defm V_SQRT_F64 : VOP1_64 <0x00000034, "V_SQRT_F64",
> -  [(set f64:$dst, (fsqrt f64:$src0))]
> +defm V_SQRT_F64 : VOP1Inst <0x00000034, "V_SQRT_F64",
> +  f64, f64, fsqrt
>   >;
>   defm V_SIN_F32 : VOP1_32 <0x00000035, "V_SIN_F32",
>     [(set f32:$dst, (AMDGPUsin f32:$src0))]
> @@ -1183,8 +1183,8 @@ defm V_FFBH_U32 : VOP1_32 <0x00000039, "V_FFBH_U32", []>;
>   defm V_FFBL_B32 : VOP1_32 <0x0000003a, "V_FFBL_B32", []>;
>   defm V_FFBH_I32 : VOP1_32 <0x0000003b, "V_FFBH_I32", []>;
>   //defm V_FREXP_EXP_I32_F64 : VOP1_32 <0x0000003c, "V_FREXP_EXP_I32_F64", []>;
> -defm V_FREXP_MANT_F64 : VOP1_64 <0x0000003d, "V_FREXP_MANT_F64", []>;
> -defm V_FRACT_F64 : VOP1_64 <0x0000003e, "V_FRACT_F64", []>;
> +defm V_FREXP_MANT_F64 : VOP1_32 <0x0000003d, "V_FREXP_MANT_F64", []>;
> +defm V_FRACT_F64 : VOP1_32 <0x0000003e, "V_FRACT_F64", []>;
>   //defm V_FREXP_EXP_I32_F32 : VOP1_32 <0x0000003f, "V_FREXP_EXP_I32_F32", []>;
>   defm V_FREXP_MANT_F32 : VOP1_32 <0x00000040, "V_FREXP_MANT_F32", []>;
>   //def V_CLREXCP : VOP1_ <0x00000041, "V_CLREXCP", []>;
> @@ -1267,92 +1267,90 @@ def V_WRITELANE_B32 : VOP2 <
>   >;
>   
>   let isCommutable = 1 in {
> -defm V_ADD_F32 : VOP2_32 <0x00000003, "V_ADD_F32",
> -  [(set f32:$dst, (fadd f32:$src0, f32:$src1))]
> +defm V_ADD_F32 : VOP2Inst <0x00000003, "V_ADD_F32",
> +  f32, f32, fadd
>   >;
>   
> -defm V_SUB_F32 : VOP2_32 <0x00000004, "V_SUB_F32",
> -  [(set f32:$dst, (fsub f32:$src0, f32:$src1))]
> +defm V_SUB_F32 : VOP2Inst <0x00000004, "V_SUB_F32", f32, f32, fsub>;
> +defm V_SUBREV_F32 : VOP2Inst <
> +  0x00000005, "V_SUBREV_F32", f32, f32, null_frag, "V_SUB_F32"
>   >;
> -defm V_SUBREV_F32 : VOP2_32 <0x00000005, "V_SUBREV_F32", [], "V_SUB_F32">;
>   } // End isCommutable = 1
>   
>   defm V_MAC_LEGACY_F32 : VOP2_32 <0x00000006, "V_MAC_LEGACY_F32", []>;
>   
>   let isCommutable = 1 in {
>   
> -defm V_MUL_LEGACY_F32 : VOP2_32 <
> +defm V_MUL_LEGACY_F32 : VOP2Inst <
>     0x00000007, "V_MUL_LEGACY_F32",
> -  [(set f32:$dst, (int_AMDGPU_mul f32:$src0, f32:$src1))]
> +  f32, f32, int_AMDGPU_mul
>   >;
>   
> -defm V_MUL_F32 : VOP2_32 <0x00000008, "V_MUL_F32",
> -  [(set f32:$dst, (fmul f32:$src0, f32:$src1))]
> +defm V_MUL_F32 : VOP2Inst <0x00000008, "V_MUL_F32",
> +  f32, f32, fmul
>   >;
>   
>   
> -defm V_MUL_I32_I24 : VOP2_32 <0x00000009, "V_MUL_I32_I24",
> -  [(set i32:$dst, (AMDGPUmul_i24 i32:$src0, i32:$src1))]
> +defm V_MUL_I32_I24 : VOP2Inst <0x00000009, "V_MUL_I32_I24",
> +  i32, i32, AMDGPUmul_i24
>   >;
>   //defm V_MUL_HI_I32_I24 : VOP2_32 <0x0000000a, "V_MUL_HI_I32_I24", []>;
> -defm V_MUL_U32_U24 : VOP2_32 <0x0000000b, "V_MUL_U32_U24",
> -  [(set i32:$dst, (AMDGPUmul_u24 i32:$src0, i32:$src1))]
> +defm V_MUL_U32_U24 : VOP2Inst <0x0000000b, "V_MUL_U32_U24",
> +  i32, i32, AMDGPUmul_u24
>   >;
>   //defm V_MUL_HI_U32_U24 : VOP2_32 <0x0000000c, "V_MUL_HI_U32_U24", []>;
>   
>   
> -defm V_MIN_LEGACY_F32 : VOP2_32 <0x0000000d, "V_MIN_LEGACY_F32",
> -  [(set f32:$dst, (AMDGPUfmin f32:$src0, f32:$src1))]
> +defm V_MIN_LEGACY_F32 : VOP2Inst <0x0000000d, "V_MIN_LEGACY_F32",
> +  f32, f32, AMDGPUfmin
>   >;
>   
> -defm V_MAX_LEGACY_F32 : VOP2_32 <0x0000000e, "V_MAX_LEGACY_F32",
> -  [(set f32:$dst, (AMDGPUfmax f32:$src0, f32:$src1))]
> +defm V_MAX_LEGACY_F32 : VOP2Inst <0x0000000e, "V_MAX_LEGACY_F32",
> +  f32, f32, AMDGPUfmax
>   >;
>   
>   defm V_MIN_F32 : VOP2_32 <0x0000000f, "V_MIN_F32", []>;
>   defm V_MAX_F32 : VOP2_32 <0x00000010, "V_MAX_F32", []>;
> -defm V_MIN_I32 : VOP2_32 <0x00000011, "V_MIN_I32",
> -  [(set i32:$dst, (AMDGPUsmin i32:$src0, i32:$src1))]>;
> -defm V_MAX_I32 : VOP2_32 <0x00000012, "V_MAX_I32",
> -  [(set i32:$dst, (AMDGPUsmax i32:$src0, i32:$src1))]>;
> -defm V_MIN_U32 : VOP2_32 <0x00000013, "V_MIN_U32",
> -  [(set i32:$dst, (AMDGPUumin i32:$src0, i32:$src1))]>;
> -defm V_MAX_U32 : VOP2_32 <0x00000014, "V_MAX_U32",
> -  [(set i32:$dst, (AMDGPUumax i32:$src0, i32:$src1))]>;
> -
> -defm V_LSHR_B32 : VOP2_32 <0x00000015, "V_LSHR_B32",
> -  [(set i32:$dst, (srl i32:$src0, i32:$src1))]
> ->;
> +defm V_MIN_I32 : VOP2Inst <0x00000011, "V_MIN_I32", i32, i32, AMDGPUsmin>;
> +defm V_MAX_I32 : VOP2Inst <0x00000012, "V_MAX_I32", i32, i32, AMDGPUsmax>;
> +defm V_MIN_U32 : VOP2Inst <0x00000013, "V_MIN_U32", i32, i32, AMDGPUumin>;
> +defm V_MAX_U32 : VOP2Inst <0x00000014, "V_MAX_U32", i32, i32, AMDGPUumax>;
>   
> -defm V_LSHRREV_B32 : VOP2_32 <0x00000016, "V_LSHRREV_B32", [], "V_LSHR_B32">;
> +defm V_LSHR_B32 : VOP2Inst <0x00000015, "V_LSHR_B32", i32, i32, srl>;
>   
> -defm V_ASHR_I32 : VOP2_32 <0x00000017, "V_ASHR_I32",
> -  [(set i32:$dst, (sra i32:$src0, i32:$src1))]
> +defm V_LSHRREV_B32 : VOP2Inst <
> +  0x00000016, "V_LSHRREV_B32", i32, i32, null_frag, "V_LSHR_B32"
> +>;
> +
> +defm V_ASHR_I32 : VOP2Inst <0x00000017, "V_ASHR_I32",
> +  i32, i32, sra
> +>;
> +defm V_ASHRREV_I32 : VOP2Inst <
> +  0x00000018, "V_ASHRREV_I32", i32, i32, null_frag, "V_ASHR_I32"
>   >;
> -defm V_ASHRREV_I32 : VOP2_32 <0x00000018, "V_ASHRREV_I32", [], "V_ASHR_I32">;
>   
>   let hasPostISelHook = 1 in {
>   
> -defm V_LSHL_B32 : VOP2_32 <0x00000019, "V_LSHL_B32",
> -  [(set i32:$dst, (shl i32:$src0, i32:$src1))]
> ->;
> +defm V_LSHL_B32 : VOP2Inst <0x00000019, "V_LSHL_B32", i32, i32, shl>;
>   
>   }
> -defm V_LSHLREV_B32 : VOP2_32 <0x0000001a, "V_LSHLREV_B32", [], "V_LSHL_B32">;
> +defm V_LSHLREV_B32 : VOP2Inst <
> +  0x0000001a, "V_LSHLREV_B32", i32, i32, null_frag, "V_LSHL_B32"
> +>;
>   
> -defm V_AND_B32 : VOP2_32 <0x0000001b, "V_AND_B32",
> -  [(set i32:$dst, (and i32:$src0, i32:$src1))]>;
> -defm V_OR_B32 : VOP2_32 <0x0000001c, "V_OR_B32",
> -  [(set i32:$dst, (or i32:$src0, i32:$src1))]
> +defm V_AND_B32 : VOP2Inst <0x0000001b, "V_AND_B32",
> +  i32, i32, and>;
> +defm V_OR_B32 : VOP2Inst <0x0000001c, "V_OR_B32",
> +  i32, i32, or
>   >;
> -defm V_XOR_B32 : VOP2_32 <0x0000001d, "V_XOR_B32",
> -  [(set i32:$dst, (xor i32:$src0, i32:$src1))]
> +defm V_XOR_B32 : VOP2Inst <0x0000001d, "V_XOR_B32",
> +  i32, i32, xor
>   >;
>   
>   } // End isCommutable = 1
>   
> -defm V_BFM_B32 : VOP2_32 <0x0000001e, "V_BFM_B32",
> -  [(set i32:$dst, (AMDGPUbfm i32:$src0, i32:$src1))]>;
> +defm V_BFM_B32 : VOP2Inst <0x0000001e, "V_BFM_B32",
> +  i32, i32, AMDGPUbfm>;
>   defm V_MAC_F32 : VOP2_32 <0x0000001f, "V_MAC_F32", []>;
>   defm V_MADMK_F32 : VOP2_32 <0x00000020, "V_MADMK_F32", []>;
>   defm V_MADAK_F32 : VOP2_32 <0x00000021, "V_MADAK_F32", []>;
> @@ -1363,20 +1361,27 @@ defm V_MBCNT_HI_U32_B32 : VOP2_32 <0x00000024, "V_MBCNT_HI_U32_B32", []>;
>   let isCommutable = 1, Defs = [VCC] in { // Carry-out goes to VCC
>   // No patterns so that the scalar instructions are always selected.
>   // The scalar versions will be replaced with vector when needed later.
> -defm V_ADD_I32 : VOP2b_32 <0x00000025, "V_ADD_I32",
> -  [(set i32:$dst, (add i32:$src0, i32:$src1))], VSrc_32>;
> -defm V_SUB_I32 : VOP2b_32 <0x00000026, "V_SUB_I32",
> -  [(set i32:$dst, (sub i32:$src0, i32:$src1))], VSrc_32>;
> -defm V_SUBREV_I32 : VOP2b_32 <0x00000027, "V_SUBREV_I32", [], VSrc_32,
> -                              "V_SUB_I32">;
> +defm V_ADD_I32 : VOP2bInstModHelper <0x00000025, "V_ADD_I32",
> +  VReg_32, VSrc_32, i32, i32, add, 0
> +>;
> +defm V_SUB_I32 : VOP2bInstModHelper <0x00000026, "V_SUB_I32",
> +  VReg_32, VSrc_32, i32, i32, sub, 0
> +>;
> +defm V_SUBREV_I32 : VOP2bInstModHelper <0x00000027, "V_SUBREV_I32",
> +  VReg_32, VSrc_32, i32, i32, null_frag, 0, "V_SUB_I32"
> +>;
>   
>   let Uses = [VCC] in { // Carry-in comes from VCC
> -defm V_ADDC_U32 : VOP2b_32 <0x00000028, "V_ADDC_U32",
> -  [(set i32:$dst, (adde i32:$src0, i32:$src1))], VReg_32>;
> -defm V_SUBB_U32 : VOP2b_32 <0x00000029, "V_SUBB_U32",
> -  [(set i32:$dst, (sube i32:$src0, i32:$src1))], VReg_32>;
> -defm V_SUBBREV_U32 : VOP2b_32 <0x0000002a, "V_SUBBREV_U32", [], VReg_32,
> -                               "V_SUBB_U32">;
> +defm V_ADDC_U32 : VOP2bInstModHelper <0x00000028, "V_ADDC_U32",
> +  VReg_32, VReg_32, i32, i32, adde, 0
> +>;
> +defm V_SUBB_U32 : VOP2bInstModHelper <0x00000029, "V_SUBB_U32",
> +  VReg_32, VReg_32, i32, i32, sube, 0
> +>;
> +defm V_SUBBREV_U32 : VOP2bInstModHelper<0x0000002a, "V_SUBBREV_U32",
> +  VReg_32, VReg_32, i32, i32, null_frag, 0, "V_SUBB_U32"
> +>;
> +
>   } // End Uses = [VCC]
>   } // End isCommutable = 1, Defs = [VCC]
>   
> @@ -1384,8 +1389,8 @@ defm V_LDEXP_F32 : VOP2_32 <0x0000002b, "V_LDEXP_F32", []>;
>   ////def V_CVT_PKACCUM_U8_F32 : VOP2_U8 <0x0000002c, "V_CVT_PKACCUM_U8_F32", []>;
>   ////def V_CVT_PKNORM_I16_F32 : VOP2_I16 <0x0000002d, "V_CVT_PKNORM_I16_F32", []>;
>   ////def V_CVT_PKNORM_U16_F32 : VOP2_U16 <0x0000002e, "V_CVT_PKNORM_U16_F32", []>;
> -defm V_CVT_PKRTZ_F16_F32 : VOP2_32 <0x0000002f, "V_CVT_PKRTZ_F16_F32",
> - [(set i32:$dst, (int_SI_packf16 f32:$src0, f32:$src1))]
> +defm V_CVT_PKRTZ_F16_F32 : VOP2Inst <0x0000002f, "V_CVT_PKRTZ_F16_F32",
> + i32, f32, int_SI_packf16
>   >;
>   ////def V_CVT_PK_U16_U32 : VOP2_U16 <0x00000030, "V_CVT_PK_U16_U32", []>;
>   ////def V_CVT_PK_I16_I32 : VOP2_I16 <0x00000031, "V_CVT_PK_I16_I32", []>;
> @@ -1397,14 +1402,12 @@ defm V_CVT_PKRTZ_F16_F32 : VOP2_32 <0x0000002f, "V_CVT_PKRTZ_F16_F32",
>   let neverHasSideEffects = 1 in {
>   
>   defm V_MAD_LEGACY_F32 : VOP3_32 <0x00000140, "V_MAD_LEGACY_F32", []>;
> -defm V_MAD_F32 : VOP3_32 <0x00000141, "V_MAD_F32",
> -  [(set f32:$dst, (fadd (fmul f32:$src0, f32:$src1), f32:$src2))]
> ->;
> -defm V_MAD_I32_I24 : VOP3_32 <0x00000142, "V_MAD_I32_I24",
> -  [(set i32:$dst, (AMDGPUmad_i24 i32:$src0, i32:$src1, i32:$src2))]
> +defm V_MAD_F32 : VOP3Inst <0x00000141, "V_MAD_F32", f32, f32, fmad>;
> +defm V_MAD_I32_I24 : VOP3Inst <0x00000142, "V_MAD_I32_I24",
> +  i32, i32, AMDGPUmad_i24
>   >;
> -defm V_MAD_U32_U24 : VOP3_32 <0x00000143, "V_MAD_U32_U24",
> -  [(set i32:$dst, (AMDGPUmad_u24 i32:$src0, i32:$src1, i32:$src2))]
> +defm V_MAD_U32_U24 : VOP3Inst <0x00000143, "V_MAD_U32_U24",
> +  i32, i32, AMDGPUmad_u24
>   >;
>   
>   } // End neverHasSideEffects
> @@ -1415,24 +1418,21 @@ defm V_CUBETC_F32 : VOP3_32 <0x00000146, "V_CUBETC_F32", []>;
>   defm V_CUBEMA_F32 : VOP3_32 <0x00000147, "V_CUBEMA_F32", []>;
>   
>   let neverHasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
> -defm V_BFE_U32 : VOP3_32 <0x00000148, "V_BFE_U32",
> -  [(set i32:$dst, (AMDGPUbfe_u32 i32:$src0, i32:$src1, i32:$src2))]>;
> -defm V_BFE_I32 : VOP3_32 <0x00000149, "V_BFE_I32",
> -  [(set i32:$dst, (AMDGPUbfe_i32 i32:$src0, i32:$src1, i32:$src2))]>;
> -}
> -
> -defm V_BFI_B32 : VOP3_32 <0x0000014a, "V_BFI_B32",
> -  [(set i32:$dst, (AMDGPUbfi i32:$src0, i32:$src1, i32:$src2))]>;
> -defm V_FMA_F32 : VOP3_32 <0x0000014b, "V_FMA_F32",
> -  [(set f32:$dst, (fma f32:$src0, f32:$src1, f32:$src2))]
> +defm V_BFE_U32 : VOP3Inst <0x00000148, "V_BFE_U32",
> +  i32, i32, AMDGPUbfe_u32
>   >;
> -def V_FMA_F64 : VOP3_64 <0x0000014c, "V_FMA_F64",
> -  [(set f64:$dst, (fma f64:$src0, f64:$src1, f64:$src2))]
> +defm V_BFE_I32 : VOP3Inst <0x00000149, "V_BFE_I32",
> +  i32, i32, AMDGPUbfe_i32
>   >;
> +}
> +
> +defm V_BFI_B32 : VOP3Inst <0x0000014a, "V_BFI_B32", i32, i32, AMDGPUbfi>;
> +defm V_FMA_F32 : VOP3Inst <0x0000014b, "V_FMA_F32", f32, f32, fma>;
> +defm V_FMA_F64 : VOP3Inst <0x0000014c, "V_FMA_F64", f64, f64, fma>;
>   //def V_LERP_U8 : VOP3_U8 <0x0000014d, "V_LERP_U8", []>;
> -defm V_ALIGNBIT_B32 : VOP3_32 <0x0000014e, "V_ALIGNBIT_B32", []>;
> +defm V_ALIGNBIT_B32 : VOP3Inst <0x0000014e, "V_ALIGNBIT_B32", i32, i32>;
>   
> -defm V_ALIGNBYTE_B32 : VOP3_32 <0x0000014f, "V_ALIGNBYTE_B32", []>;
> +defm V_ALIGNBYTE_B32 : VOP3Inst <0x0000014f, "V_ALIGNBYTE_B32", i32, i32>;
>   defm V_MULLIT_F32 : VOP3_32 <0x00000150, "V_MULLIT_F32", []>;
>   ////def V_MIN3_F32 : VOP3_MIN3 <0x00000151, "V_MIN3_F32", []>;
>   ////def V_MIN3_I32 : VOP3_MIN3 <0x00000152, "V_MIN3_I32", []>;
> @@ -1446,61 +1446,55 @@ defm V_MULLIT_F32 : VOP3_32 <0x00000150, "V_MULLIT_F32", []>;
>   //def V_SAD_U8 : VOP3_U8 <0x0000015a, "V_SAD_U8", []>;
>   //def V_SAD_HI_U8 : VOP3_U8 <0x0000015b, "V_SAD_HI_U8", []>;
>   //def V_SAD_U16 : VOP3_U16 <0x0000015c, "V_SAD_U16", []>;
> -defm V_SAD_U32 : VOP3_32 <0x0000015d, "V_SAD_U32", []>;
> +defm V_SAD_U32 : VOP3Inst <0x0000015d, "V_SAD_U32", i32, i32>;
>   ////def V_CVT_PK_U8_F32 : VOP3_U8 <0x0000015e, "V_CVT_PK_U8_F32", []>;
> -defm V_DIV_FIXUP_F32 : VOP3_32 <0x0000015f, "V_DIV_FIXUP_F32",
> -  [(set f32:$dst, (AMDGPUdiv_fixup f32:$src0, f32:$src1, f32:$src2))]
> +defm V_DIV_FIXUP_F32 : VOP3Inst <
> +  0x0000015f, "V_DIV_FIXUP_F32", f32, f32, AMDGPUdiv_fixup
>   >;
> -def V_DIV_FIXUP_F64 : VOP3_64 <0x00000160, "V_DIV_FIXUP_F64",
> -  [(set f64:$dst, (AMDGPUdiv_fixup f64:$src0, f64:$src1, f64:$src2))]
> +defm V_DIV_FIXUP_F64 : VOP3Inst <
> +  0x00000160, "V_DIV_FIXUP_F64", f64, f64, AMDGPUdiv_fixup
>   >;
>   
> -def V_LSHL_B64 : VOP3_64_32 <0x00000161, "V_LSHL_B64",
> -  [(set i64:$dst, (shl i64:$src0, i32:$src1))]
> ->;
> -def V_LSHR_B64 : VOP3_64_32 <0x00000162, "V_LSHR_B64",
> -  [(set i64:$dst, (srl i64:$src0, i32:$src1))]
> ->;
> -def V_ASHR_I64 : VOP3_64_32 <0x00000163, "V_ASHR_I64",
> -  [(set i64:$dst, (sra i64:$src0, i32:$src1))]
> ->;
> +defm V_LSHL_B64 : VOP3_64_32 <0x00000161, "V_LSHL_B64", i64, i32, shl>;
> +defm V_LSHR_B64 : VOP3_64_32 <0x00000162, "V_LSHR_B64", i64, i32, srl>;
> +defm V_ASHR_I64 : VOP3_64_32 <0x00000163, "V_ASHR_I64", i64, i32, sra>;
>   
>   let isCommutable = 1 in {
>   
> -def V_ADD_F64 : VOP3_64 <0x00000164, "V_ADD_F64", []>;
> -def V_MUL_F64 : VOP3_64 <0x00000165, "V_MUL_F64", []>;
> -def V_MIN_F64 : VOP3_64 <0x00000166, "V_MIN_F64", []>;
> -def V_MAX_F64 : VOP3_64 <0x00000167, "V_MAX_F64", []>;
> +defm V_ADD_F64 : VOP3_64 <0x00000164, "V_ADD_F64", []>;
> +defm V_MUL_F64 : VOP3_64 <0x00000165, "V_MUL_F64", []>;
> +defm V_MIN_F64 : VOP3_64 <0x00000166, "V_MIN_F64", []>;
> +defm V_MAX_F64 : VOP3_64 <0x00000167, "V_MAX_F64", []>;
>   
>   } // isCommutable = 1
>   
> -def V_LDEXP_F64 : VOP3_64 <0x00000168, "V_LDEXP_F64", []>;
> +defm V_LDEXP_F64 : VOP3_64 <0x00000168, "V_LDEXP_F64", []>;
>   
>   let isCommutable = 1 in {
>   
> -defm V_MUL_LO_U32 : VOP3_32 <0x00000169, "V_MUL_LO_U32", []>;
> -defm V_MUL_HI_U32 : VOP3_32 <0x0000016a, "V_MUL_HI_U32", []>;
> -defm V_MUL_LO_I32 : VOP3_32 <0x0000016b, "V_MUL_LO_I32", []>;
> -defm V_MUL_HI_I32 : VOP3_32 <0x0000016c, "V_MUL_HI_I32", []>;
> +defm V_MUL_LO_U32 : VOP3Inst <0x00000169, "V_MUL_LO_U32", i32, i32>;
> +defm V_MUL_HI_U32 : VOP3Inst <0x0000016a, "V_MUL_HI_U32", i32, i32>;
> +defm V_MUL_LO_I32 : VOP3Inst <0x0000016b, "V_MUL_LO_I32", i32, i32>;
> +defm V_MUL_HI_I32 : VOP3Inst <0x0000016c, "V_MUL_HI_I32", i32, i32>;
>   
>   } // isCommutable = 1
>   
> -def V_DIV_SCALE_F32 : VOP3b_32 <0x0000016d, "V_DIV_SCALE_F32", []>;
> +defm V_DIV_SCALE_F32 : VOP3b_32 <0x0000016d, "V_DIV_SCALE_F32", []>;
>   
>   // Double precision division pre-scale.
> -def V_DIV_SCALE_F64 : VOP3b_64 <0x0000016e, "V_DIV_SCALE_F64", []>;
> +defm V_DIV_SCALE_F64 : VOP3b_64 <0x0000016e, "V_DIV_SCALE_F64", []>;
>   
>   defm V_DIV_FMAS_F32 : VOP3_32 <0x0000016f, "V_DIV_FMAS_F32",
>     [(set f32:$dst, (AMDGPUdiv_fmas f32:$src0, f32:$src1, f32:$src2))]
>   >;
> -def V_DIV_FMAS_F64 : VOP3_64 <0x00000170, "V_DIV_FMAS_F64",
> +defm V_DIV_FMAS_F64 : VOP3_64 <0x00000170, "V_DIV_FMAS_F64",
>     [(set f64:$dst, (AMDGPUdiv_fmas f64:$src0, f64:$src1, f64:$src2))]
>   >;
>   //def V_MSAD_U8 : VOP3_U8 <0x00000171, "V_MSAD_U8", []>;
>   //def V_QSAD_U8 : VOP3_U8 <0x00000172, "V_QSAD_U8", []>;
>   //def V_MQSAD_U8 : VOP3_U8 <0x00000173, "V_MQSAD_U8", []>;
> -def V_TRIG_PREOP_F64 : VOP3_64_32 <0x00000174, "V_TRIG_PREOP_F64",
> -  [(set f64:$dst, (AMDGPUtrig_preop f64:$src0, i32:$src1))]
> +defm V_TRIG_PREOP_F64 : VOP3_64_32 <
> +  0x00000174, "V_TRIG_PREOP_F64", f64, i32, AMDGPUtrig_preop
>   >;
>   
>   //===----------------------------------------------------------------------===//
> @@ -1722,7 +1716,9 @@ let Predicates = [isSI] in {
>   
>   def : Pat<
>     (int_AMDGPU_cndlt f32:$src0, f32:$src1, f32:$src2),
> -  (V_CNDMASK_B32_e64 $src2, $src1, (V_CMP_GT_F32_e64 0, $src0))
> +  (V_CNDMASK_B32_e64 $src2, $src1,
> +                     (V_CMP_GT_F32_e64 SRCMODS.NONE, 0, SRCMODS.NONE, $src0,
> +                                       DSTCLAMP.NONE, DSTOMOD.NONE))
>   >;
>   
>   def : Pat <
> @@ -1869,7 +1865,7 @@ def : Pat <
>   
>   def : Pat <
>      (i32 (ctpop i32:$popcnt)),
> -   (V_BCNT_U32_B32_e64 $popcnt, 0, 0, 0)
> +   (V_BCNT_U32_B32_e64 $popcnt, 0)
>   >;
>   
>   def : Pat <
> @@ -1877,7 +1873,7 @@ def : Pat <
>     (INSERT_SUBREG
>       (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
>         (V_BCNT_U32_B32_e32 (EXTRACT_SUBREG $src, sub1),
> -        (V_BCNT_U32_B32_e64 (EXTRACT_SUBREG $src, sub0), 0, 0, 0)),
> +        (V_BCNT_U32_B32_e64 (EXTRACT_SUBREG $src, sub0), 0)),
>         sub0),
>       (V_MOV_B32_e32 0), sub1)
>   >;
> @@ -2430,7 +2426,7 @@ def : Pat <
>   def : Pat <
>     (int_SI_tid),
>     (V_MBCNT_HI_U32_B32_e32 0xffffffff,
> -                          (V_MBCNT_LO_U32_B32_e64 0xffffffff, 0, 0, 0))
> +                          (V_MBCNT_LO_U32_B32_e64 0xffffffff, 0))
>   >;
>   
>   //===----------------------------------------------------------------------===//
> @@ -2727,26 +2723,26 @@ let SubtargetPredicate = isCI in {
>   
>   // Sea island new arithmetic instructinos
>   let neverHasSideEffects = 1 in {
> -defm V_TRUNC_F64 : VOP1_64 <0x00000017, "V_TRUNC_F64",
> -  [(set f64:$dst, (ftrunc f64:$src0))]
> +defm V_TRUNC_F64 : VOP1Inst <0x00000017, "V_TRUNC_F64",
> +  f64, f64, ftrunc
>   >;
> -defm V_CEIL_F64 : VOP1_64 <0x00000018, "V_CEIL_F64",
> -  [(set f64:$dst, (fceil f64:$src0))]
> +defm V_CEIL_F64 : VOP1Inst <0x00000018, "V_CEIL_F64",
> +  f64, f64, fceil
>   >;
> -defm V_FLOOR_F64 : VOP1_64 <0x0000001A, "V_FLOOR_F64",
> -  [(set f64:$dst, (ffloor f64:$src0))]
> +defm V_FLOOR_F64 : VOP1Inst <0x0000001A, "V_FLOOR_F64",
> +  f64, f64, ffloor
>   >;
> -defm V_RNDNE_F64 : VOP1_64 <0x00000019, "V_RNDNE_F64",
> -  [(set f64:$dst, (frint f64:$src0))]
> +defm V_RNDNE_F64 : VOP1Inst <0x00000019, "V_RNDNE_F64",
> +  f64, f64, frint
>   >;
>   
> -defm V_QSAD_PK_U16_U8 : VOP3_32 <0x00000173, "V_QSAD_PK_U16_U8", []>;
> -defm V_MQSAD_U16_U8 : VOP3_32 <0x000000172, "V_MQSAD_U16_U8", []>;
> -defm V_MQSAD_U32_U8 : VOP3_32 <0x00000175, "V_MQSAD_U32_U8", []>;
> -def V_MAD_U64_U32 : VOP3_64 <0x00000176, "V_MAD_U64_U32", []>;
> +defm V_QSAD_PK_U16_U8 : VOP3Inst <0x00000173, "V_QSAD_PK_U16_U8", i32, i32>;
> +defm V_MQSAD_U16_U8 : VOP3Inst <0x000000172, "V_MQSAD_U16_U8", i32, i32>;
> +defm V_MQSAD_U32_U8 : VOP3Inst <0x00000175, "V_MQSAD_U32_U8", i32, i32>;
> +defm V_MAD_U64_U32 : VOP3_64 <0x00000176, "V_MAD_U64_U32", []>;
>   
>   // XXX - Does this set VCC?
> -def V_MAD_I64_I32 : VOP3_64 <0x00000177, "V_MAD_I64_I32", []>;
> +defm V_MAD_I64_I32 : VOP3_64 <0x00000177, "V_MAD_I64_I32", []>;
>   } // End neverHasSideEffects = 1
>   
>   // Remaining instructions:
> diff --git a/lib/Target/R600/SILowerI1Copies.cpp b/lib/Target/R600/SILowerI1Copies.cpp
> index db19235..4ba87a5 100644
> --- a/lib/Target/R600/SILowerI1Copies.cpp
> +++ b/lib/Target/R600/SILowerI1Copies.cpp
> @@ -136,11 +136,7 @@ bool SILowerI1Copies::runOnMachineFunction(MachineFunction &MF) {
>                    SrcRC == &AMDGPU::VReg_1RegClass) {
>           BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(AMDGPU::V_CMP_NE_I32_e64))
>                   .addOperand(MI.getOperand(0))
> -                .addImm(0)
>                   .addOperand(MI.getOperand(1))
> -                .addImm(0)
> -                .addImm(0)
> -                .addImm(0)
>                   .addImm(0);
>           MI.eraseFromParent();
>         }
> diff --git a/lib/Target/R600/SIShrinkInstructions.cpp b/lib/Target/R600/SIShrinkInstructions.cpp
> index 8f1526d..67ee80c 100644
> --- a/lib/Target/R600/SIShrinkInstructions.cpp
> +++ b/lib/Target/R600/SIShrinkInstructions.cpp
> @@ -93,7 +93,7 @@ static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII,
>     const MachineOperand *Src1Mod =
>         TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
>   
> -  if (Src1 && (!isVGPR(Src1, TRI, MRI) || Src1Mod->getImm() != 0))
> +  if (Src1 && (!isVGPR(Src1, TRI, MRI) || (Src1Mod && Src1Mod->getImm() != 0)))
>       return false;
>   
>     // We don't need to check src0, all input types are legal, so just make
> diff --git a/test/CodeGen/R600/fabs.ll b/test/CodeGen/R600/fabs.ll
> index b87ce22..fa1b608 100644
> --- a/test/CodeGen/R600/fabs.ll
> +++ b/test/CodeGen/R600/fabs.ll
> @@ -50,8 +50,9 @@ entry:
>   }
>   
>   ; SI-CHECK-LABEL: @fabs_fold
> +; SI-CHECK: S_LOAD_DWORD [[ABS_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xb
>   ; SI-CHECK-NOT: V_AND_B32_e32
> -; SI-CHECK: V_MUL_F32_e64 v{{[0-9]+}}, s{{[0-9]+}}, |v{{[0-9]+}}|
> +; SI-CHECK: V_MUL_F32_e64 v{{[0-9]+}}, |[[ABS_VALUE]]|, v{{[0-9]+}}
>   define void @fabs_fold(float addrspace(1)* %out, float %in0, float %in1) {
>   entry:
>     %0 = call float @fabs(float %in0)
> diff --git a/test/CodeGen/R600/fneg.ll b/test/CodeGen/R600/fneg.ll
> index 4cddc73..5b47817 100644
> --- a/test/CodeGen/R600/fneg.ll
> +++ b/test/CodeGen/R600/fneg.ll
> @@ -61,8 +61,9 @@ entry:
>   }
>   
>   ; SI-CHECK-LABEL: @fneg_fold
> +; SI-CHECK: S_LOAD_DWORD [[NEG_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xb
>   ; SI-CHECK-NOT: V_XOR_B32
> -; SI-CHECK: V_MUL_F32_e64 v{{[0-9]+}}, s{{[0-9]+}}, -v{{[0-9]+}}
> +; SI-CHECK: V_MUL_F32_e64 v{{[0-9]+}}, -[[NEG_VALUE]], v{{[0-9]+}}
>   define void @fneg_fold(float addrspace(1)* %out, float %in) {
>   entry:
>     %0 = fsub float -0.0, %in
> diff --git a/test/CodeGen/R600/fsub.ll b/test/CodeGen/R600/fsub.ll
> index 4f74efb..5fb9ff6 100644
> --- a/test/CodeGen/R600/fsub.ll
> +++ b/test/CodeGen/R600/fsub.ll
> @@ -20,8 +20,8 @@ declare void @llvm.AMDGPU.store.output(float, i32)
>   ; R600-CHECK-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[3].X, -KC0[3].Z
>   ; R600-CHECK-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].W, -KC0[3].Y
>   ; SI-CHECK: @fsub_v2f32
> -; SI-CHECK: V_SUB_F32
> -; SI-CHECK: V_SUB_F32
> +; SI-CHECK: V_SUBREV_F32
> +; SI-CHECK: V_SUBREV_F32
>   define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) {
>   entry:
>     %0 = fsub <2 x float> %a, %b
> @@ -35,10 +35,10 @@ entry:
>   ; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
>   ; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
>   ; SI-CHECK: @fsub_v4f32
> -; SI-CHECK: V_SUB_F32
> -; SI-CHECK: V_SUB_F32
> -; SI-CHECK: V_SUB_F32
> -; SI-CHECK: V_SUB_F32
> +; SI-CHECK: V_SUBREV_F32
> +; SI-CHECK: V_SUBREV_F32
> +; SI-CHECK: V_SUBREV_F32
> +; SI-CHECK: V_SUBREV_F32
>   define void @fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
>     %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
>     %a = load <4 x float> addrspace(1) * %in
> diff --git a/test/CodeGen/R600/mul_uint24.ll b/test/CodeGen/R600/mul_uint24.ll
> index 419f275..72bbe0f 100644
> --- a/test/CodeGen/R600/mul_uint24.ll
> +++ b/test/CodeGen/R600/mul_uint24.ll
> @@ -23,7 +23,7 @@ entry:
>   ; EG: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
>   ; EG: 16
>   ; SI: V_MUL_U32_U24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
> -; SI: V_BFE_I32 v{{[0-9]}}, [[MUL]], 0, 16,
> +; SI: V_BFE_I32 v{{[0-9]}}, [[MUL]], 0, 16
>   define void @i16_mul24(i32 addrspace(1)* %out, i16 %a, i16 %b) {
>   entry:
>     %0 = mul i16 %a, %b
> @@ -37,7 +37,7 @@ entry:
>   ; The result must be sign-extended
>   ; EG: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
>   ; SI: V_MUL_U32_U24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
> -; SI: V_BFE_I32 v{{[0-9]}}, [[MUL]], 0, 8,
> +; SI: V_BFE_I32 v{{[0-9]}}, [[MUL]], 0, 8
>   
>   define void @i8_mul24(i32 addrspace(1)* %out, i8 %a, i8 %b) {
>   entry:
> diff --git a/test/CodeGen/R600/vop-shrink.ll b/test/CodeGen/R600/vop-shrink.ll
> index bb93ec4..eb9c304 100644
> --- a/test/CodeGen/R600/vop-shrink.ll
> +++ b/test/CodeGen/R600/vop-shrink.ll
> @@ -1,8 +1,6 @@
>   ; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
>   ; XXX: Enable this test once we are selecting 64-bit instructions
>   
> -; XFAIL: *
> -
Doesn't remove the comment about undisabling the test

>   ; Test that we correctly commute a sub instruction
>   ; FUNC-LABEL: @sub_rev
>   ; SI-NOT: V_SUB_I32_e32 v{{[0-9]+}}, s
> -- 1.8.1.5

-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20140724/bc1c7b9b/attachment.html>


More information about the llvm-commits mailing list