[PATCH] R600/SI: operand folding patchset V2

Tom Stellard tom at stellard.net
Tue Feb 26 07:24:24 PST 2013


On Tue, Feb 26, 2013 at 04:04:02PM +0100, Christian König wrote:
> Hi Tom,
> 
> as promised attached is the V2 of the patchset.
> 
> Mostly the changes you suggest, except for patch 2 which now fixes
> 17 piglit tests and additional fixes a minor but visible glitch in
> gears.
> 
> Leave me a note what you think about patch 2, and if it's ok I'm
> going to commit that this evening.
>

Patch 2 looks good to me, thanks.

-Tom
 
> Christian.

> From 5b6cf3ff1ed9604da261ecea2ba29acac27f2c5e Mon Sep 17 00:00:00 2001
> From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig at amd.com>
> Date: Thu, 21 Feb 2013 16:55:35 +0100
> Subject: [PATCH 1/9] R600/SI: fix stupid typo
> MIME-Version: 1.0
> Content-Type: text/plain; charset=UTF-8
> Content-Transfer-Encoding: 8bit
> 
> This is a candidate for the mesa-stable branch.
> 
> Signed-off-by: Christian K??nig <christian.koenig at amd.com>
> Reviewed-by: Tom Stellard <thomas.stellard at amd.com>
> ---
>  lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp |    2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp b/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
> index 2bf8fb8..6cc0077 100644
> --- a/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
> +++ b/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
> @@ -131,7 +131,7 @@ uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO) const {
>    if (Imm.F == 4.0f)
>      return 246;
>  
> -  if (Imm.F == 4.0f)
> +  if (Imm.F == -4.0f)
>      return 247;
>  
>    return 255;
> -- 
> 1.7.10.4
> 

> From 379186ffb5ba3e849c1a91f842e10cbd70ed77c8 Mon Sep 17 00:00:00 2001
> From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig at amd.com>
> Date: Thu, 21 Feb 2013 09:20:13 +0100
> Subject: [PATCH 2/9] R600/SI: fix and cleanup SI register definition v2
> MIME-Version: 1.0
> Content-Type: text/plain; charset=UTF-8
> Content-Transfer-Encoding: 8bit
> 
> Prevent producing real strange tablegen code by using
> proper register sizes, alignments and hierarchy.
> 
> Also cleanup the unused definitions and add some comments.
> 
> v2: add SGPR 512 bit registers, stop registers from wrapping around,
>     fix SGPR alignment
> 
> This is a candidate for the mesa-stable branch.
> 
> Signed-off-by: Christian K??nig <christian.koenig at amd.com>
> ---
>  lib/Target/R600/SIInstructions.td |    5 +-
>  lib/Target/R600/SIRegisterInfo.td |  227 +++++++++++++++++++++----------------
>  2 files changed, 135 insertions(+), 97 deletions(-)
> 
> diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td
> index 907cf49..9701d19 100644
> --- a/lib/Target/R600/SIInstructions.td
> +++ b/lib/Target/R600/SIInstructions.td
> @@ -918,14 +918,15 @@ def S_MAX_U32 : SOP2_32 <0x00000009, "S_MAX_U32", []>;
>  def S_CSELECT_B32 : SOP2 <
>    0x0000000a, (outs SReg_32:$dst),
>    (ins SReg_32:$src0, SReg_32:$src1, SCCReg:$scc), "S_CSELECT_B32",
> -  [(set (i32 SReg_32:$dst), (select SCCReg:$scc, SReg_32:$src0, SReg_32:$src1))]
> +  [(set (i32 SReg_32:$dst), (select (i1 SCCReg:$scc),
> +                                     SReg_32:$src0, SReg_32:$src1))]
>  >;
>  
>  def S_CSELECT_B64 : SOP2_64 <0x0000000b, "S_CSELECT_B64", []>;
>  
>  // f32 pattern for S_CSELECT_B32
>  def : Pat <
> -  (f32 (select SCCReg:$scc, SReg_32:$src0, SReg_32:$src1)),
> +  (f32 (select (i1 SCCReg:$scc), SReg_32:$src0, SReg_32:$src1)),
>    (S_CSELECT_B32 SReg_32:$src0, SReg_32:$src1, SCCReg:$scc)
>  >;
>  
> diff --git a/lib/Target/R600/SIRegisterInfo.td b/lib/Target/R600/SIRegisterInfo.td
> index ab36b87..9e04e24 100644
> --- a/lib/Target/R600/SIRegisterInfo.td
> +++ b/lib/Target/R600/SIRegisterInfo.td
> @@ -1,30 +1,40 @@
> +//===-- SIRegisterInfo.td - SI Register defs ---------------*- tablegen -*-===//
> +//
> +//                     The LLVM Compiler Infrastructure
> +//
> +// This file is distributed under the University of Illinois Open Source
> +// License. See LICENSE.TXT for details.
> +//
> +//===----------------------------------------------------------------------===//
> +
> +//===----------------------------------------------------------------------===//
> +//  Declarations that describe the SI registers
> +//===----------------------------------------------------------------------===//
>  
>  class SIReg <string n, bits<16> encoding = 0> : Register<n> {
>    let Namespace = "AMDGPU";
>    let HWEncoding = encoding;
>  }
>  
> -class SI_64 <string n, list<Register> subregs, bits<16> encoding> : RegisterWithSubRegs<n, subregs> {
> -  let Namespace = "AMDGPU";
> -  let SubRegIndices = [sub0, sub1];
> -  let HWEncoding = encoding;
> -}
> -
> -class SGPR_32 <bits<16> num, string name> : SIReg<name, num>;
> -
> -class VGPR_32 <bits<16> num, string name> : SIReg<name, num> {
> -  let HWEncoding{8} = 1;
> -}
> -
>  // Special Registers
>  def VCC : SIReg<"VCC", 106>;
> -def EXEC_LO : SIReg <"EXEC LO", 126>;
> -def EXEC_HI : SIReg <"EXEC HI", 127>;
> -def EXEC : SI_64<"EXEC", [EXEC_LO, EXEC_HI], 126>;
> +def EXEC : SIReg<"EXEC", 126>;
>  def SCC : SIReg<"SCC", 253>;
>  def M0 : SIReg <"M0", 124>;
>  
> -//Interpolation registers
> +// SGPR registers
> +foreach Index = 0-101 in {
> +  def SGPR#Index : SIReg <"SGPR"#Index, Index>;
> +}
> +
> +// VGPR registers
> +foreach Index = 0-255 in {
> +  def VGPR#Index : SIReg <"VGPR"#Index, Index> {
> +    let HWEncoding{8} = 1;
> +  }
> +}
> +
> +// virtual Interpolation registers
>  def PERSP_SAMPLE_I : SIReg <"PERSP_SAMPLE_I">;
>  def PERSP_SAMPLE_J : SIReg <"PERSP_SAMPLE_J">;
>  def PERSP_CENTER_I : SIReg <"PERSP_CENTER_I">;
> @@ -50,102 +60,150 @@ def ANCILLARY : SIReg <"ANCILLARY">;
>  def SAMPLE_COVERAGE : SIReg <"SAMPLE_COVERAGE">;
>  def POS_FIXED_PT : SIReg <"POS_FIXED_PT">;
>  
> -// SGPR 32-bit registers
> -foreach Index = 0-101 in {
> -  def SGPR#Index : SGPR_32 <Index, "SGPR"#Index>;
> -}
> +//===----------------------------------------------------------------------===//
> +//  Groupings using register classes and tuples
> +//===----------------------------------------------------------------------===//
>  
> +// SGPR 32-bit registers
>  def SGPR_32 : RegisterClass<"AMDGPU", [f32, i32], 32,
>                              (add (sequence "SGPR%u", 0, 101))>;
>  
>  // SGPR 64-bit registers
>  def SGPR_64 : RegisterTuples<[sub0, sub1],
> -                             [(add (decimate SGPR_32, 2)),
> -                              (add(decimate (rotl SGPR_32, 1), 2))]>;
> +                             [(add (decimate (trunc SGPR_32, 101), 2)),
> +                              (add (decimate (shl SGPR_32, 1), 2))]>;
>  
>  // SGPR 128-bit registers
>  def SGPR_128 : RegisterTuples<[sub0, sub1, sub2, sub3],
> -                              [(add (decimate SGPR_32, 4)),
> -                               (add (decimate (rotl SGPR_32, 1), 4)),
> -                               (add (decimate (rotl SGPR_32, 2), 4)),
> -                               (add (decimate (rotl SGPR_32, 3), 4))]>;
> +                              [(add (decimate (trunc SGPR_32, 99), 4)),
> +                               (add (decimate (shl SGPR_32, 1), 4)),
> +                               (add (decimate (shl SGPR_32, 2), 4)),
> +                               (add (decimate (shl SGPR_32, 3), 4))]>;
>  
>  // SGPR 256-bit registers
>  def SGPR_256 : RegisterTuples<[sub0, sub1, sub2, sub3, sub4, sub5, sub6, sub7],
> -                              [(add (decimate SGPR_32, 8)),
> -                               (add (decimate (rotl SGPR_32, 1), 8)),
> -                               (add (decimate (rotl SGPR_32, 2), 8)),
> -                               (add (decimate (rotl SGPR_32, 3), 8)),
> -                               (add (decimate (rotl SGPR_32, 4), 8)),
> -                               (add (decimate (rotl SGPR_32, 5), 8)),
> -                               (add (decimate (rotl SGPR_32, 6), 8)),
> -                               (add (decimate (rotl SGPR_32, 7), 8))]>;
> +                              [(add (decimate (trunc SGPR_32, 95), 4)),
> +                               (add (decimate (shl SGPR_32, 1), 4)),
> +                               (add (decimate (shl SGPR_32, 2), 4)),
> +                               (add (decimate (shl SGPR_32, 3), 4)),
> +                               (add (decimate (shl SGPR_32, 4), 4)),
> +                               (add (decimate (shl SGPR_32, 5), 4)),
> +                               (add (decimate (shl SGPR_32, 6), 4)),
> +                               (add (decimate (shl SGPR_32, 7), 4))]>;
> +
> +// SGPR 512-bit registers
> +def SGPR_512 : RegisterTuples<[sub0, sub1, sub2, sub3, sub4, sub5, sub6, sub7,
> +                               sub8, sub9, sub10, sub11, sub12, sub13, sub14, sub15],
> +                              [(add (decimate (trunc SGPR_32, 87), 4)),
> +                               (add (decimate (shl SGPR_32, 1), 4)),
> +                               (add (decimate (shl SGPR_32, 2), 4)),
> +                               (add (decimate (shl SGPR_32, 3), 4)),
> +                               (add (decimate (shl SGPR_32, 4), 4)),
> +                               (add (decimate (shl SGPR_32, 5), 4)),
> +                               (add (decimate (shl SGPR_32, 6), 4)),
> +                               (add (decimate (shl SGPR_32, 7), 4)),
> +                               (add (decimate (shl SGPR_32, 8), 4)),
> +                               (add (decimate (shl SGPR_32, 9), 4)),
> +                               (add (decimate (shl SGPR_32, 10), 4)),
> +                               (add (decimate (shl SGPR_32, 11), 4)),
> +                               (add (decimate (shl SGPR_32, 12), 4)),
> +                               (add (decimate (shl SGPR_32, 13), 4)),
> +                               (add (decimate (shl SGPR_32, 14), 4)),
> +                               (add (decimate (shl SGPR_32, 15), 4))]>;
>  
>  // VGPR 32-bit registers
> -foreach Index = 0-255 in {
> -  def VGPR#Index : VGPR_32 <Index, "VGPR"#Index>;
> -}
> -
>  def VGPR_32 : RegisterClass<"AMDGPU", [f32, i32], 32,
>                              (add (sequence "VGPR%u", 0, 255))>;
>  
>  // VGPR 64-bit registers
>  def VGPR_64 : RegisterTuples<[sub0, sub1],
> -                             [(add VGPR_32),
> -                              (add (rotl VGPR_32, 1))]>;
> +                             [(add (trunc VGPR_32, 255)),
> +                              (add (shl VGPR_32, 1))]>;
>  
>  // VGPR 128-bit registers
>  def VGPR_128 : RegisterTuples<[sub0, sub1, sub2, sub3],
> -                              [(add VGPR_32),
> -                               (add (rotl VGPR_32, 1)),
> -                               (add (rotl VGPR_32, 2)),
> -                               (add (rotl VGPR_32, 3))]>;
> +                              [(add (trunc VGPR_32, 253)),
> +                               (add (shl VGPR_32, 1)),
> +                               (add (shl VGPR_32, 2)),
> +                               (add (shl VGPR_32, 3))]>;
>  
>  // VGPR 256-bit registers
>  def VGPR_256 : RegisterTuples<[sub0, sub1, sub2, sub3, sub4, sub5, sub6, sub7],
> -                              [(add VGPR_32),
> -                               (add (rotl VGPR_32, 1)),
> -                               (add (rotl VGPR_32, 2)),
> -                               (add (rotl VGPR_32, 3)),
> -                               (add (rotl VGPR_32, 4)),
> -                               (add (rotl VGPR_32, 5)),
> -                               (add (rotl VGPR_32, 6)),
> -                               (add (rotl VGPR_32, 7))]>;
> +                              [(add (trunc VGPR_32, 249)),
> +                               (add (shl VGPR_32, 1)),
> +                               (add (shl VGPR_32, 2)),
> +                               (add (shl VGPR_32, 3)),
> +                               (add (shl VGPR_32, 4)),
> +                               (add (shl VGPR_32, 5)),
> +                               (add (shl VGPR_32, 6)),
> +                               (add (shl VGPR_32, 7))]>;
>  
>  // VGPR 512-bit registers
>  def VGPR_512 : RegisterTuples<[sub0, sub1, sub2, sub3, sub4, sub5, sub6, sub7,
>                                 sub8, sub9, sub10, sub11, sub12, sub13, sub14, sub15],
> -                              [(add VGPR_32),
> -                               (add (rotl VGPR_32, 1)),
> -                               (add (rotl VGPR_32, 2)),
> -                               (add (rotl VGPR_32, 3)),
> -                               (add (rotl VGPR_32, 4)),
> -                               (add (rotl VGPR_32, 5)),
> -                               (add (rotl VGPR_32, 6)),
> -                               (add (rotl VGPR_32, 7)),
> -                               (add (rotl VGPR_32, 8)),
> -                               (add (rotl VGPR_32, 9)),
> -                               (add (rotl VGPR_32, 10)),
> -                               (add (rotl VGPR_32, 11)),
> -                               (add (rotl VGPR_32, 12)),
> -                               (add (rotl VGPR_32, 13)),
> -                               (add (rotl VGPR_32, 14)),
> -                               (add (rotl VGPR_32, 15))]>;
> +                              [(add (trunc VGPR_32, 241)),
> +                               (add (shl VGPR_32, 1)),
> +                               (add (shl VGPR_32, 2)),
> +                               (add (shl VGPR_32, 3)),
> +                               (add (shl VGPR_32, 4)),
> +                               (add (shl VGPR_32, 5)),
> +                               (add (shl VGPR_32, 6)),
> +                               (add (shl VGPR_32, 7)),
> +                               (add (shl VGPR_32, 8)),
> +                               (add (shl VGPR_32, 9)),
> +                               (add (shl VGPR_32, 10)),
> +                               (add (shl VGPR_32, 11)),
> +                               (add (shl VGPR_32, 12)),
> +                               (add (shl VGPR_32, 13)),
> +                               (add (shl VGPR_32, 14)),
> +                               (add (shl VGPR_32, 15))]>;
> +
> +//===----------------------------------------------------------------------===//
> +//  Register classes used as source and destination
> +//===----------------------------------------------------------------------===//
> +
> +// Special register classes for predicates and the M0 register
> +def SCCReg : RegisterClass<"AMDGPU", [i32, i1], 32, (add SCC)>;
> +def VCCReg : RegisterClass<"AMDGPU", [i64, i1], 64, (add VCC)>;
> +def EXECReg : RegisterClass<"AMDGPU", [i64, i1], 64, (add EXEC)>;
> +def M0Reg : RegisterClass<"AMDGPU", [i32], 32, (add M0)>;
>  
>  // Register class for all scalar registers (SGPRs + Special Registers)
>  def SReg_32 : RegisterClass<"AMDGPU", [f32, i32], 32,
> -    (add SGPR_32, M0, EXEC_LO, EXEC_HI)
> +  (add SGPR_32, M0Reg)
>  >;
>  
> -def SReg_64 : RegisterClass<"AMDGPU", [i1, i64], 64, (add SGPR_64, VCC, EXEC)>;
> +def SReg_64 : RegisterClass<"AMDGPU", [i64, i1], 64,
> +  (add SGPR_64, VCCReg, EXECReg)
> +>;
>  
>  def SReg_128 : RegisterClass<"AMDGPU", [v4f32, v4i32], 128, (add SGPR_128)>;
>  
>  def SReg_256 : RegisterClass<"AMDGPU", [v8i32], 256, (add SGPR_256)>;
>  
> +def SReg_512 : RegisterClass<"AMDGPU", [v16i32], 512, (add SGPR_512)>;
> +
>  // Register class for all vector registers (VGPRs + Interploation Registers)
> -def VReg_32 : RegisterClass<"AMDGPU", [f32, i32, v1i32], 32,
> -    (add VGPR_32,
> +def VReg_32 : RegisterClass<"AMDGPU", [f32, i32, v1i32], 32, (add VGPR_32)>;
> +
> +def VReg_64 : RegisterClass<"AMDGPU", [i64, v2i32], 64, (add VGPR_64)>;
> +
> +def VReg_128 : RegisterClass<"AMDGPU", [v4f32, v4i32], 128, (add VGPR_128)>;
> +
> +def VReg_256 : RegisterClass<"AMDGPU", [v8i32], 256, (add VGPR_256)>;
> +
> +def VReg_512 : RegisterClass<"AMDGPU", [v16i32], 512, (add VGPR_512)>;
> +
> +//===----------------------------------------------------------------------===//
> +//  [SV]Src_* register classes, can have either an immediate or an register
> +//===----------------------------------------------------------------------===//
> +
> +def SSrc_32 : RegisterClass<"AMDGPU", [i32, f32], 32, (add SReg_32)>;
> +
> +def SSrc_64 : RegisterClass<"AMDGPU", [i64, i1], 64, (add SReg_64)>;
> +
> +def VSrc_32 : RegisterClass<"AMDGPU", [i32, f32], 32,
> +  (add VReg_32, SReg_32,
>      PERSP_SAMPLE_I, PERSP_SAMPLE_J,
>      PERSP_CENTER_I, PERSP_CENTER_J,
>      PERSP_CENTROID_I, PERSP_CENTROID_J,
> @@ -162,29 +220,8 @@ def VReg_32 : RegisterClass<"AMDGPU", [f32, i32, v1i32], 32,
>      ANCILLARY,
>      SAMPLE_COVERAGE,
>      POS_FIXED_PT
> -    )
> +  )
>  >;
>  
> -def VReg_64 : RegisterClass<"AMDGPU", [i64, v2i32], 64, (add VGPR_64)>;
> -
> -def VReg_128 : RegisterClass<"AMDGPU", [v4f32, v4i32], 128, (add VGPR_128)>;
> -
> -def VReg_256 : RegisterClass<"AMDGPU", [v8i32], 256, (add VGPR_256)>;
> -
> -def VReg_512 : RegisterClass<"AMDGPU", [v16i32], 512, (add VGPR_512)>;
> -
> -// [SV]Src_* operands can have either an immediate or an register
> -def SSrc_32 : RegisterClass<"AMDGPU", [i32, f32], 32, (add SReg_32)>;
> -
> -def SSrc_64 : RegisterClass<"AMDGPU", [i1, i64], 64, (add SReg_64)>;
> -
> -def VSrc_32 : RegisterClass<"AMDGPU", [i32, f32], 32, (add VReg_32, SReg_32)>;
> -
> -def VSrc_64 : RegisterClass<"AMDGPU", [i64], 64, (add SReg_64, VReg_64)>;
> -
> -// Special register classes for predicates and the M0 register
> -def SCCReg : RegisterClass<"AMDGPU", [i1], 1, (add SCC)>;
> -def VCCReg : RegisterClass<"AMDGPU", [i1], 1, (add VCC)>;
> -def EXECReg : RegisterClass<"AMDGPU", [i1], 1, (add EXEC)>;
> -def M0Reg : RegisterClass<"AMDGPU", [i32], 32, (add M0)>;
> +def VSrc_64 : RegisterClass<"AMDGPU", [i64], 64, (add VReg_64, SReg_64)>;
>  
> -- 
> 1.7.10.4
> 

> From 92b23cc493d379727cc4024766de9bc06eda3643 Mon Sep 17 00:00:00 2001
> From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig at amd.com>
> Date: Sat, 23 Feb 2013 20:28:07 +0100
> Subject: [PATCH 3/9] R600/SI: fix VOP3b encoding v2
> MIME-Version: 1.0
> Content-Type: text/plain; charset=UTF-8
> Content-Transfer-Encoding: 8bit
> 
> v2: document why we hardcode VCC for now.
> 
> This is a candidate for the mesa-stable branch.
> 
> Signed-off-by: Christian K??nig <christian.koenig at amd.com>
> Reviewed-by: Tom Stellard <thomas.stellard at amd.com>
> ---
>  lib/Target/R600/SIInstrInfo.td    |   24 ++++++++++++++++++++++++
>  lib/Target/R600/SIInstructions.td |   14 ++++++++------
>  2 files changed, 32 insertions(+), 6 deletions(-)
> 
> diff --git a/lib/Target/R600/SIInstrInfo.td b/lib/Target/R600/SIInstrInfo.td
> index 99168ce..d68fbff 100644
> --- a/lib/Target/R600/SIInstrInfo.td
> +++ b/lib/Target/R600/SIInstrInfo.td
> @@ -51,6 +51,7 @@ class InlineImm <ValueType vt> : ImmLeaf <vt, [{
>  
>  def SIOperand {
>    int ZERO = 0x80;
> +  int VCC = 0x6A;
>  }
>  
>  class GPR4Align <RegisterClass rc> : Operand <vAny> {
> @@ -195,6 +196,29 @@ multiclass VOP2_32 <bits<6> op, string opName, list<dag> pattern>
>  multiclass VOP2_64 <bits<6> op, string opName, list<dag> pattern>
>    : VOP2_Helper <op, VReg_64, VSrc_64, opName, pattern>;
>  
> +multiclass VOP2b_32 <bits<6> op, string opName, list<dag> pattern> {
> +
> +  def _e32 : VOP2 <
> +    op, (outs VReg_32:$dst), (ins VSrc_32:$src0, VReg_32:$src1),
> +    opName#"_e32 $dst, $src0, $src1", pattern
> +  >;
> +
> +  def _e64 : VOP3b <
> +    {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
> +    (outs VReg_32:$dst),
> +    (ins VSrc_32:$src0, VReg_32:$src1,
> +         i32imm:$abs, i32imm:$clamp,
> +         i32imm:$omod, i32imm:$neg),
> +    opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", []
> +  > {
> +    let SRC2 = SIOperand.ZERO;
> +    /* the VOP2 variant puts the carry out into VCC, the VOP3 variant
> +       can write it into any SGPR. We currently don't use the carry out,
> +       so for now hardcode it to VCC as well */
> +    let SDST = SIOperand.VCC;
> +  }
> +}
> +
>  multiclass VOPC_Helper <bits<8> op, RegisterClass vrc, RegisterClass arc,
>                          string opName, ValueType vt, PatLeaf cond> {
>  
> diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td
> index 9701d19..f999025 100644
> --- a/lib/Target/R600/SIInstructions.td
> +++ b/lib/Target/R600/SIInstructions.td
> @@ -805,17 +805,19 @@ defm V_MADAK_F32 : VOP2_32 <0x00000021, "V_MADAK_F32", []>;
>  //defm V_MBCNT_LO_U32_B32 : VOP2_32 <0x00000023, "V_MBCNT_LO_U32_B32", []>;
>  //defm V_MBCNT_HI_U32_B32 : VOP2_32 <0x00000024, "V_MBCNT_HI_U32_B32", []>;
>  let Defs = [VCC] in { // Carry-out goes to VCC
> -defm V_ADD_I32 : VOP2_32 <0x00000025, "V_ADD_I32",
> +defm V_ADD_I32 : VOP2b_32 <0x00000025, "V_ADD_I32",
>    [(set VReg_32:$dst, (add (i32 VSrc_32:$src0), (i32 VReg_32:$src1)))]
>  >;
> -defm V_SUB_I32 : VOP2_32 <0x00000026, "V_SUB_I32",
> +defm V_SUB_I32 : VOP2b_32 <0x00000026, "V_SUB_I32",
>    [(set VReg_32:$dst, (sub (i32 VSrc_32:$src0), (i32 VReg_32:$src1)))]
>  >;
> +defm V_SUBREV_I32 : VOP2b_32 <0x00000027, "V_SUBREV_I32", []>;
> +let Uses = [VCC] in { // Carry-out comes from VCC
> +defm V_ADDC_U32 : VOP2b_32 <0x00000028, "V_ADDC_U32", []>;
> +defm V_SUBB_U32 : VOP2b_32 <0x00000029, "V_SUBB_U32", []>;
> +defm V_SUBBREV_U32 : VOP2b_32 <0x0000002a, "V_SUBBREV_U32", []>;
> +} // End Uses = [VCC]
>  } // End Defs = [VCC]
> -defm V_SUBREV_I32 : VOP2_32 <0x00000027, "V_SUBREV_I32", []>;
> -defm V_ADDC_U32 : VOP2_32 <0x00000028, "V_ADDC_U32", []>;
> -defm V_SUBB_U32 : VOP2_32 <0x00000029, "V_SUBB_U32", []>;
> -defm V_SUBBREV_U32 : VOP2_32 <0x0000002a, "V_SUBBREV_U32", []>;
>  defm V_LDEXP_F32 : VOP2_32 <0x0000002b, "V_LDEXP_F32", []>;
>  ////def V_CVT_PKACCUM_U8_F32 : VOP2_U8 <0x0000002c, "V_CVT_PKACCUM_U8_F32", []>;
>  ////def V_CVT_PKNORM_I16_F32 : VOP2_I16 <0x0000002d, "V_CVT_PKNORM_I16_F32", []>;
> -- 
> 1.7.10.4
> 

> From 00323a27ec8942166c96ee02a946537c0714ce4b Mon Sep 17 00:00:00 2001
> From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig at amd.com>
> Date: Sun, 17 Feb 2013 12:26:04 +0100
> Subject: [PATCH 4/9] R600/SI: add folding helper
> MIME-Version: 1.0
> Content-Type: text/plain; charset=UTF-8
> Content-Transfer-Encoding: 8bit
> 
> Signed-off-by: Christian K??nig <christian.koenig at amd.com>
> Reviewed-by: Tom Stellard <thomas.stellard at amd.com>
> ---
>  lib/Target/R600/AMDGPUISelLowering.h  |    4 ++++
>  lib/Target/R600/AMDILISelDAGToDAG.cpp |   19 +++++++++++++++++++
>  lib/Target/R600/SIISelLowering.cpp    |    6 ++++++
>  lib/Target/R600/SIISelLowering.h      |    1 +
>  4 files changed, 30 insertions(+)
> 
> diff --git a/lib/Target/R600/AMDGPUISelLowering.h b/lib/Target/R600/AMDGPUISelLowering.h
> index 404f620..9e7d997 100644
> --- a/lib/Target/R600/AMDGPUISelLowering.h
> +++ b/lib/Target/R600/AMDGPUISelLowering.h
> @@ -65,6 +65,10 @@ public:
>    SDValue LowerMinMax(SDValue Op, SelectionDAG &DAG) const;
>    virtual const char* getTargetNodeName(unsigned Opcode) const;
>  
> +  virtual SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const {
> +    return N;
> +  }
> +
>  // Functions defined in AMDILISelLowering.cpp
>  public:
>  
> diff --git a/lib/Target/R600/AMDILISelDAGToDAG.cpp b/lib/Target/R600/AMDILISelDAGToDAG.cpp
> index 2e726e9..2f70fa9 100644
> --- a/lib/Target/R600/AMDILISelDAGToDAG.cpp
> +++ b/lib/Target/R600/AMDILISelDAGToDAG.cpp
> @@ -43,6 +43,7 @@ public:
>  
>    SDNode *Select(SDNode *N);
>    virtual const char *getPassName() const;
> +  virtual void PostprocessISelDAG();
>  
>  private:
>    inline SDValue getSmallIPtrImm(unsigned Imm);
> @@ -575,3 +576,21 @@ bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
>  
>    return true;
>  }
> +
> +void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
> +
> +  // Go over all selected nodes and try to fold them a bit more
> +  const AMDGPUTargetLowering& Lowering = ((const AMDGPUTargetLowering&)TLI);
> +  for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
> +       E = CurDAG->allnodes_end(); I != E; ++I) {
> +
> +    MachineSDNode *Node = dyn_cast<MachineSDNode>(I);
> +    if (!Node)
> +      continue;
> +
> +    SDNode *ResNode = Lowering.PostISelFolding(Node, *CurDAG);
> +    if (ResNode != Node)
> +      ReplaceUses(Node, ResNode);
> +  }
> +}
> +
> diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp
> index 212e3f2..13173e8 100644
> --- a/lib/Target/R600/SIISelLowering.cpp
> +++ b/lib/Target/R600/SIISelLowering.cpp
> @@ -357,3 +357,9 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
>    }
>    return SDValue();
>  }
> +
> +SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
> +                                          SelectionDAG &DAG) const {
> +  // TODO: Implement immediate folding
> +  return Node;
> +}
> diff --git a/lib/Target/R600/SIISelLowering.h b/lib/Target/R600/SIISelLowering.h
> index 5d048f8..71f99ac 100644
> --- a/lib/Target/R600/SIISelLowering.h
> +++ b/lib/Target/R600/SIISelLowering.h
> @@ -41,6 +41,7 @@ public:
>    virtual EVT getSetCCResultType(EVT VT) const;
>    virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
>    virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
> +  virtual SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const;
>  };
>  
>  } // End namespace llvm
> -- 
> 1.7.10.4
> 

> From b7a252df07632c3da7f8f81d36e9227cfd7042d6 Mon Sep 17 00:00:00 2001
> From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig at amd.com>
> Date: Fri, 22 Feb 2013 16:12:22 +0100
> Subject: [PATCH 5/9] R600/SI: add post ISel folding for SI v2
> MIME-Version: 1.0
> Content-Type: text/plain; charset=UTF-8
> Content-Transfer-Encoding: 8bit
> 
> Include immediate folding and SGPR limit handling for VOP3 instructions.
> 
> v2: remove leftover hasExtraSrcRegAllocReq
> 
> Signed-off-by: Christian K??nig <christian.koenig at amd.com>
> Reviewed-by: Tom Stellard <thomas.stellard at amd.com>
> ---
>  lib/Target/R600/AMDILISelDAGToDAG.cpp |    1 +
>  lib/Target/R600/SIISelLowering.cpp    |  205 ++++++++++++++++++++++++++++++++-
>  lib/Target/R600/SIISelLowering.h      |    9 ++
>  lib/Target/R600/SIInstrInfo.td        |   17 ++-
>  lib/Target/R600/SIInstructions.td     |   22 ++--
>  5 files changed, 231 insertions(+), 23 deletions(-)
> 
> diff --git a/lib/Target/R600/AMDILISelDAGToDAG.cpp b/lib/Target/R600/AMDILISelDAGToDAG.cpp
> index 2f70fa9..e77b9dc 100644
> --- a/lib/Target/R600/AMDILISelDAGToDAG.cpp
> +++ b/lib/Target/R600/AMDILISelDAGToDAG.cpp
> @@ -16,6 +16,7 @@
>  #include "AMDGPURegisterInfo.h"
>  #include "AMDILDevices.h"
>  #include "R600InstrInfo.h"
> +#include "SIISelLowering.h"
>  #include "llvm/ADT/ValueMap.h"
>  #include "llvm/CodeGen/PseudoSourceValue.h"
>  #include "llvm/CodeGen/SelectionDAGISel.h"
> diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp
> index 13173e8..bf1f3bf 100644
> --- a/lib/Target/R600/SIISelLowering.cpp
> +++ b/lib/Target/R600/SIISelLowering.cpp
> @@ -26,7 +26,8 @@ using namespace llvm;
>  
>  SITargetLowering::SITargetLowering(TargetMachine &TM) :
>      AMDGPUTargetLowering(TM),
> -    TII(static_cast<const SIInstrInfo*>(TM.getInstrInfo())) {
> +    TII(static_cast<const SIInstrInfo*>(TM.getInstrInfo())),
> +    TRI(TM.getRegisterInfo()) {
>    addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
>    addRegisterClass(MVT::f32, &AMDGPU::VReg_32RegClass);
>    addRegisterClass(MVT::i32, &AMDGPU::VReg_32RegClass);
> @@ -358,8 +359,206 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
>    return SDValue();
>  }
>  
> +/// \brief Test if RegClass is one of the VSrc classes 
> +static bool isVSrc(unsigned RegClass) {
> +  return AMDGPU::VSrc_32RegClassID == RegClass ||
> +         AMDGPU::VSrc_64RegClassID == RegClass;
> +}
> +
> +/// \brief Test if RegClass is one of the SSrc classes 
> +static bool isSSrc(unsigned RegClass) {
> +  return AMDGPU::SSrc_32RegClassID == RegClass ||
> +         AMDGPU::SSrc_64RegClassID == RegClass;
> +}
> +
> +/// \brief Analyze the possible immediate value Op
> +///
> +/// Returns -1 if it isn't an immediate, 0 if it's and inline immediate
> +/// and the immediate value if it's a literal immediate
> +int32_t SITargetLowering::analyzeImmediate(const SDNode *N) const {
> +
> +  union {
> +    int32_t I;
> +    float F;
> +  } Imm;
> +
> +  if (const ConstantSDNode *Node = dyn_cast<ConstantSDNode>(N))
> +    Imm.I = Node->getSExtValue();
> +  else if (const ConstantFPSDNode *Node = dyn_cast<ConstantFPSDNode>(N))
> +    Imm.F = Node->getValueAPF().convertToFloat();
> +  else
> +    return -1; // It isn't an immediate
> +
> +  if ((Imm.I >= -16 && Imm.I <= 64) ||
> +      Imm.F == 0.5f || Imm.F == -0.5f ||
> +      Imm.F == 1.0f || Imm.F == -1.0f ||
> +      Imm.F == 2.0f || Imm.F == -2.0f ||
> +      Imm.F == 4.0f || Imm.F == -4.0f)
> +    return 0; // It's an inline immediate
> +
> +  return Imm.I; // It's a literal immediate
> +}
> +
> +/// \brief Try to fold an immediate directly into an instruction
> +bool SITargetLowering::foldImm(SDValue &Operand, int32_t &Immediate,
> +                               bool &ScalarSlotUsed) const {
> +
> +  MachineSDNode *Mov = dyn_cast<MachineSDNode>(Operand);
> +  if (Mov == 0 || !TII->isMov(Mov->getMachineOpcode()))
> +    return false;
> +
> +  const SDValue &Op = Mov->getOperand(0);
> +  int32_t Value = analyzeImmediate(Op.getNode());
> +  if (Value == -1) {
> +    // Not an immediate at all
> +    return false;
> +
> +  } else if (Value == 0) {
> +    // Inline immediates can always be fold
> +    Operand = Op;
> +    return true;
> +
> +  } else if (Value == Immediate) {
> +    // Already fold literal immediate
> +    Operand = Op;
> +    return true;
> +
> +  } else if (!ScalarSlotUsed && !Immediate) {
> +    // Fold this literal immediate
> +    ScalarSlotUsed = true;
> +    Immediate = Value;
> +    Operand = Op;
> +    return true;
> +
> +  }
> +
> +  return false;
> +}
> +
> +/// \brief Does "Op" fit into register class "RegClass" ?
> +bool SITargetLowering::fitsRegClass(SelectionDAG &DAG, SDValue &Op,
> +                                    unsigned RegClass) const {
> +
> +  MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 
> +  SDNode *Node = Op.getNode();
> +
> +  int OpClass;
> +  if (MachineSDNode *MN = dyn_cast<MachineSDNode>(Node)) {
> +    const MCInstrDesc &Desc = TII->get(MN->getMachineOpcode());
> +    OpClass = Desc.OpInfo[Op.getResNo()].RegClass;
> +
> +  } else if (Node->getOpcode() == ISD::CopyFromReg) {
> +    RegisterSDNode *Reg = cast<RegisterSDNode>(Node->getOperand(1).getNode());
> +    OpClass = MRI.getRegClass(Reg->getReg())->getID();
> +
> +  } else
> +    return false;
> +
> +  if (OpClass == -1)
> +    return false;
> +
> +  return TRI->getRegClass(RegClass)->hasSubClassEq(TRI->getRegClass(OpClass));
> +}
> +
> +/// \brief Make sure that we don't exeed the number of allowed scalars
> +void SITargetLowering::ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand,
> +                                       unsigned RegClass,
> +                                       bool &ScalarSlotUsed) const {
> +
> +  // First map the operands register class to a destination class
> +  if (RegClass == AMDGPU::VSrc_32RegClassID)
> +    RegClass = AMDGPU::VReg_32RegClassID;
> +  else if (RegClass == AMDGPU::VSrc_64RegClassID)
> +    RegClass = AMDGPU::VReg_64RegClassID;
> +  else
> +    return;
> +
> +  // Nothing todo if they fit naturaly
> +  if (fitsRegClass(DAG, Operand, RegClass))
> +    return;
> +
> +  // If the scalar slot isn't used yet use it now
> +  if (!ScalarSlotUsed) {
> +    ScalarSlotUsed = true;
> +    return;
> +  }
> +
> +  // This is a conservative aproach, it is possible that we can't determine
> +  // the correct register class and copy too often, but better save than sorry.
> +  SDValue RC = DAG.getTargetConstant(RegClass, MVT::i32);
> +  SDNode *Node = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DebugLoc(),
> +                                    Operand.getValueType(), Operand, RC);
> +  Operand = SDValue(Node, 0);
> +}
> +
>  SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
>                                            SelectionDAG &DAG) const {
> -  // TODO: Implement immediate folding
> -  return Node;
> +
> +  // Original encoding (either e32 or e64)
> +  int Opcode = Node->getMachineOpcode();
> +  const MCInstrDesc *Desc = &TII->get(Opcode);
> +
> +  unsigned NumDefs = Desc->getNumDefs();
> +  unsigned NumOps = Desc->getNumOperands();
> +
> +  int32_t Immediate = Desc->getSize() == 4 ? 0 : -1;
> +  bool HaveVSrc = false, HaveSSrc = false;
> +
> +  // First figure out what we alread have in this instruction
> +  for (unsigned i = 0, e = Node->getNumOperands(), Op = NumDefs;
> +       i != e && Op < NumOps; ++i, ++Op) {
> +
> +    unsigned RegClass = Desc->OpInfo[Op].RegClass;
> +    if (isVSrc(RegClass))
> +      HaveVSrc = true;
> +    else if (isSSrc(RegClass))
> +      HaveSSrc = true;
> +    else
> +      continue;
> +
> +    int32_t Imm = analyzeImmediate(Node->getOperand(i).getNode());
> +    if (Imm != -1 && Imm != 0) {
> +      // Literal immediate
> +      Immediate = Imm;
> +    }
> +  }
> +
> +  // If we neither have VSrc nor SSrc it makes no sense to continue
> +  if (!HaveVSrc && !HaveSSrc)
> +    return Node;
> +
> +  // No scalar allowed when we have both VSrc and SSrc
> +  bool ScalarSlotUsed = HaveVSrc && HaveSSrc;
> +
> +  // Second go over the operands and try to fold them
> +  std::vector<SDValue> Ops;
> +  for (unsigned i = 0, e = Node->getNumOperands(), Op = NumDefs;
> +       i != e && Op < NumOps; ++i, ++Op) {
> +
> +    const SDValue &Operand = Node->getOperand(i);
> +    Ops.push_back(Operand);
> +
> +    // Already folded immediate ?
> +    if (isa<ConstantSDNode>(Operand.getNode()) ||
> +        isa<ConstantFPSDNode>(Operand.getNode()))
> +      continue;
> +
> +    // Is this a VSrc or SSrc operand ?
> +    unsigned RegClass = Desc->OpInfo[Op].RegClass;
> +    if (!isVSrc(RegClass) && !isSSrc(RegClass))
> +      continue;
> +
> +    // Try to fold the immediates
> +    if (!foldImm(Ops[i], Immediate, ScalarSlotUsed)) {
> +      // Folding didn't worked, make sure we don't hit the SReg limit
> +      ensureSRegLimit(DAG, Ops[i], RegClass, ScalarSlotUsed);
> +    }
> +  }
> +
> +  // Add optional chain and glue
> +  for (unsigned i = NumOps - NumDefs, e = Node->getNumOperands(); i < e; ++i)
> +    Ops.push_back(Node->getOperand(i));
> +
> +  // Update the instruction parameters
> +  return DAG.UpdateNodeOperands(Node, Ops.data(), Ops.size());
>  }
> diff --git a/lib/Target/R600/SIISelLowering.h b/lib/Target/R600/SIISelLowering.h
> index 71f99ac..737162f 100644
> --- a/lib/Target/R600/SIISelLowering.h
> +++ b/lib/Target/R600/SIISelLowering.h
> @@ -22,6 +22,7 @@ namespace llvm {
>  
>  class SITargetLowering : public AMDGPUTargetLowering {
>    const SIInstrInfo * TII;
> +  const TargetRegisterInfo * TRI;
>  
>    void LowerMOV_IMM(MachineInstr *MI, MachineBasicBlock &BB,
>                MachineBasicBlock::iterator I, unsigned Opocde) const;
> @@ -34,6 +35,12 @@ class SITargetLowering : public AMDGPUTargetLowering {
>    SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
>    SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
>  
> +  bool foldImm(SDValue &Operand, int32_t &Immediate,
> +               bool &ScalarSlotUsed) const;
> +  bool fitsRegClass(SelectionDAG &DAG, SDValue &Op, unsigned RegClass) const;
> +  void ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand, 
> +                       unsigned RegClass, bool &ScalarSlotUsed) const;
> +
>  public:
>    SITargetLowering(TargetMachine &tm);
>    virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr * MI,
> @@ -42,6 +49,8 @@ public:
>    virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
>    virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
>    virtual SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const;
> +
> +  int32_t analyzeImmediate(const SDNode *N) const;
>  };
>  
>  } // End namespace llvm
> diff --git a/lib/Target/R600/SIInstrInfo.td b/lib/Target/R600/SIInstrInfo.td
> index d68fbff..3a617b4 100644
> --- a/lib/Target/R600/SIInstrInfo.td
> +++ b/lib/Target/R600/SIInstrInfo.td
> @@ -40,11 +40,10 @@ def IMM12bit : ImmLeaf <
>    [{return isUInt<12>(Imm);}]
>  >;
>  
> -class InlineImm <ValueType vt> : ImmLeaf <vt, [{
> -  return -16 <= Imm && Imm <= 64;
> +class InlineImm <ValueType vt> : PatLeaf <(vt imm), [{
> +  return ((const SITargetLowering &)TLI).analyzeImmediate(N) == 0;
>  }]>;
>  
> -
>  //===----------------------------------------------------------------------===//
>  // SI assembler operands
>  //===----------------------------------------------------------------------===//
> @@ -181,7 +180,7 @@ multiclass VOP2_Helper <bits<6> op, RegisterClass vrc, RegisterClass arc,
>    def _e64 : VOP3 <
>      {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
>      (outs vrc:$dst),
> -    (ins arc:$src0, vrc:$src1,
> +    (ins arc:$src0, arc:$src1,
>           i32imm:$abs, i32imm:$clamp,
>           i32imm:$omod, i32imm:$neg),
>      opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", []
> @@ -206,7 +205,7 @@ multiclass VOP2b_32 <bits<6> op, string opName, list<dag> pattern> {
>    def _e64 : VOP3b <
>      {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
>      (outs VReg_32:$dst),
> -    (ins VSrc_32:$src0, VReg_32:$src1,
> +    (ins VSrc_32:$src0, VSrc_32:$src1,
>           i32imm:$abs, i32imm:$clamp,
>           i32imm:$omod, i32imm:$neg),
>      opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", []
> @@ -230,12 +229,12 @@ multiclass VOPC_Helper <bits<8> op, RegisterClass vrc, RegisterClass arc,
>    def _e64 : VOP3 <
>      {0, op{7}, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
>      (outs SReg_64:$dst),
> -    (ins arc:$src0, vrc:$src1,
> +    (ins arc:$src0, arc:$src1,
>           InstFlag:$abs, InstFlag:$clamp,
>           InstFlag:$omod, InstFlag:$neg),
>      opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg",
>      !if(!eq(!cast<string>(cond), "COND_NULL"), []<dag>,
> -      [(set SReg_64:$dst, (i1 (setcc (vt arc:$src0), vrc:$src1, cond)))]
> +      [(set SReg_64:$dst, (i1 (setcc (vt arc:$src0), arc:$src1, cond)))]
>      )
>    > {
>      let SRC2 = SIOperand.ZERO;
> @@ -252,14 +251,14 @@ multiclass VOPC_64 <bits<8> op, string opName,
>  
>  class VOP3_32 <bits<9> op, string opName, list<dag> pattern> : VOP3 <
>    op, (outs VReg_32:$dst),
> -  (ins VSrc_32:$src0, VReg_32:$src1, VReg_32:$src2,
> +  (ins VSrc_32:$src0, VSrc_32:$src1, VSrc_32:$src2,
>     i32imm:$abs, i32imm:$clamp, i32imm:$omod, i32imm:$neg),
>    opName#" $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern
>  >;
>  
>  class VOP3_64 <bits<9> op, string opName, list<dag> pattern> : VOP3 <
>    op, (outs VReg_64:$dst),
> -  (ins VSrc_64:$src0, VReg_64:$src1, VReg_64:$src2,
> +  (ins VSrc_64:$src0, VSrc_64:$src1, VSrc_64:$src2,
>     i32imm:$abs, i32imm:$clamp, i32imm:$omod, i32imm:$neg),
>    opName#" $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern
>  >;
> diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td
> index f999025..822be18 100644
> --- a/lib/Target/R600/SIInstructions.td
> +++ b/lib/Target/R600/SIInstructions.td
> @@ -732,17 +732,17 @@ def V_CNDMASK_B32_e32 : VOP2 <0x00000000, (outs VReg_32:$dst),
>  }
>  
>  def V_CNDMASK_B32_e64 : VOP3 <0x00000100, (outs VReg_32:$dst),
> -  (ins VReg_32:$src0, VReg_32:$src1, SReg_64:$src2,
> +  (ins VSrc_32:$src0, VSrc_32:$src1, SSrc_64:$src2,
>     InstFlag:$abs, InstFlag:$clamp, InstFlag:$omod, InstFlag:$neg),
>    "V_CNDMASK_B32_e64 $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg",
> -  [(set (i32 VReg_32:$dst), (select (i1 SReg_64:$src2),
> -   VReg_32:$src1, VReg_32:$src0))]
> +  [(set (i32 VReg_32:$dst), (select (i1 SSrc_64:$src2),
> +   VSrc_32:$src1, VSrc_32:$src0))]
>  >;
>  
>  //f32 pattern for V_CNDMASK_B32_e64
>  def : Pat <
> -  (f32 (select (i1 SReg_64:$src2), VReg_32:$src1, VReg_32:$src0)),
> -  (V_CNDMASK_B32_e64 VReg_32:$src0, VReg_32:$src1, SReg_64:$src2)
> +  (f32 (select (i1 SSrc_64:$src2), VSrc_32:$src1, VSrc_32:$src0)),
> +  (V_CNDMASK_B32_e64 VSrc_32:$src0, VSrc_32:$src1, SSrc_64:$src2)
>  >;
>  
>  defm V_READLANE_B32 : VOP2_32 <0x00000001, "V_READLANE_B32", []>;
> @@ -895,7 +895,7 @@ def V_MUL_HI_U32 : VOP3_32 <0x0000016a, "V_MUL_HI_U32", []>;
>  def V_MUL_LO_I32 : VOP3_32 <0x0000016b, "V_MUL_LO_I32", []>;
>  def : Pat <
>    (mul VSrc_32:$src0, VReg_32:$src1),
> -  (V_MUL_LO_I32 VSrc_32:$src0, VReg_32:$src1, (i32 SIOperand.ZERO), 0, 0, 0, 0)
> +  (V_MUL_LO_I32 VSrc_32:$src0, VReg_32:$src1, (i32 0), 0, 0, 0, 0)
>  >;
>  def V_MUL_HI_I32 : VOP3_32 <0x0000016c, "V_MUL_HI_I32", []>;
>  def V_DIV_SCALE_F32 : VOP3_32 <0x0000016d, "V_DIV_SCALE_F32", []>;
> @@ -1219,19 +1219,19 @@ def : BitConvert <f32, i32, VReg_32>;
>  
>  def : Pat <
>    (int_AMDIL_clamp VReg_32:$src, (f32 FP_ZERO), (f32 FP_ONE)),
> -  (V_ADD_F32_e64 VReg_32:$src, (i32 0x80 /* SRC1 */),
> +  (V_ADD_F32_e64 VReg_32:$src, (i32 0 /* SRC1 */),
>     0 /* ABS */, 1 /* CLAMP */, 0 /* OMOD */, 0 /* NEG */)
>  >;
>  
>  def : Pat <
>    (fabs VReg_32:$src),
> -  (V_ADD_F32_e64 VReg_32:$src, (i32 0x80 /* SRC1 */),
> +  (V_ADD_F32_e64 VReg_32:$src, (i32 0 /* SRC1 */),
>     1 /* ABS */, 0 /* CLAMP */, 0 /* OMOD */, 0 /* NEG */)
>  >;
>  
>  def : Pat <
>    (fneg VReg_32:$src),
> -  (V_ADD_F32_e64 VReg_32:$src, (i32 0x80 /* SRC1 */),
> +  (V_ADD_F32_e64 VReg_32:$src, (i32 0 /* SRC1 */),
>     0 /* ABS */, 0 /* CLAMP */, 0 /* OMOD */, 1 /* NEG */)
>  >;
>  
> @@ -1394,8 +1394,8 @@ def : Pat <
>  /**********   VOP3 Patterns    **********/
>  /********** ================== **********/
>  
> -def : Pat <(f32 (fadd (fmul VSrc_32:$src0, VReg_32:$src1), VReg_32:$src2)),
> -           (V_MAD_F32 VSrc_32:$src0, VReg_32:$src1, VReg_32:$src2,
> +def : Pat <(f32 (fadd (fmul VSrc_32:$src0, VSrc_32:$src1), VSrc_32:$src2)),
> +           (V_MAD_F32 VSrc_32:$src0, VSrc_32:$src1, VSrc_32:$src2,
>              0, 0, 0, 0)>;
>  
>  /********** ================== **********/
> -- 
> 1.7.10.4
> 

> From 1a0dfba3a708220d61cb82e1e83b6f9d2ef29924 Mon Sep 17 00:00:00 2001
> From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig at amd.com>
> Date: Mon, 25 Feb 2013 11:19:15 +0100
> Subject: [PATCH 6/9] R600/SI: add some more instruction flags
> MIME-Version: 1.0
> Content-Type: text/plain; charset=UTF-8
> Content-Transfer-Encoding: 8bit
> 
> Signed-off-by: Christian K??nig <christian.koenig at amd.com>
> Reviewed-by: Tom Stellard <thomas.stellard at amd.com>
> ---
>  lib/Target/R600/SIInstrInfo.cpp   |   10 ++++
>  lib/Target/R600/SIInstrInfo.h     |    3 ++
>  lib/Target/R600/SIInstructions.td |   93 +++++++++++++++++++++++++++++++------
>  3 files changed, 92 insertions(+), 14 deletions(-)
> 
> diff --git a/lib/Target/R600/SIInstrInfo.cpp b/lib/Target/R600/SIInstrInfo.cpp
> index 4dfd26e..d9dbd6a 100644
> --- a/lib/Target/R600/SIInstrInfo.cpp
> +++ b/lib/Target/R600/SIInstrInfo.cpp
> @@ -66,6 +66,16 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
>    }
>  }
>  
> +MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
> +                                              bool NewMI) const {
> +
> +  if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg() ||
> +      !MI->getOperand(2).isReg())
> +    return 0;
> +
> +  return TargetInstrInfo::commuteInstruction(MI, NewMI);
> +}
> +
>  MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg,
>                                             int64_t Imm) const {
>    MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::V_MOV_B32_e32), DebugLoc());
> diff --git a/lib/Target/R600/SIInstrInfo.h b/lib/Target/R600/SIInstrInfo.h
> index a65f7b6..015cfb3 100644
> --- a/lib/Target/R600/SIInstrInfo.h
> +++ b/lib/Target/R600/SIInstrInfo.h
> @@ -35,6 +35,9 @@ public:
>                             unsigned DestReg, unsigned SrcReg,
>                             bool KillSrc) const;
>  
> +  virtual MachineInstr *commuteInstruction(MachineInstr *MI,
> +                                           bool NewMI=false) const;
> +
>    virtual MachineInstr * getMovImmInstr(MachineFunction *MF, unsigned DstReg,
>                                          int64_t Imm) const;
>  
> diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td
> index 822be18..af116f0 100644
> --- a/lib/Target/R600/SIInstructions.td
> +++ b/lib/Target/R600/SIInstructions.td
> @@ -28,10 +28,14 @@ def isSI : Predicate<"Subtarget.device()"
>  let Predicates = [isSI] in {
>  
>  let neverHasSideEffects = 1 in {
> +
> +let isMoveImm = 1 in {
>  def S_MOV_B32 : SOP1_32 <0x00000003, "S_MOV_B32", []>;
>  def S_MOV_B64 : SOP1_64 <0x00000004, "S_MOV_B64", []>;
>  def S_CMOV_B32 : SOP1_32 <0x00000005, "S_CMOV_B32", []>;
>  def S_CMOV_B64 : SOP1_64 <0x00000006, "S_CMOV_B64", []>;
> +} // End isMoveImm = 1
> +
>  def S_NOT_B32 : SOP1_32 <0x00000007, "S_NOT_B32", []>;
>  def S_NOT_B64 : SOP1_64 <0x00000008, "S_NOT_B64", []>;
>  def S_WQM_B32 : SOP1_32 <0x00000009, "S_WQM_B32", []>;
> @@ -39,6 +43,7 @@ def S_WQM_B64 : SOP1_64 <0x0000000a, "S_WQM_B64", []>;
>  def S_BREV_B32 : SOP1_32 <0x0000000b, "S_BREV_B32", []>;
>  def S_BREV_B64 : SOP1_64 <0x0000000c, "S_BREV_B64", []>;
>  } // End neverHasSideEffects = 1
> +
>  ////def S_BCNT0_I32_B32 : SOP1_BCNT0 <0x0000000d, "S_BCNT0_I32_B32", []>;
>  ////def S_BCNT0_I32_B64 : SOP1_BCNT0 <0x0000000e, "S_BCNT0_I32_B64", []>;
>  ////def S_BCNT1_I32_B32 : SOP1_BCNT1 <0x0000000f, "S_BCNT1_I32_B32", []>;
> @@ -107,6 +112,7 @@ def S_CMPK_EQ_I32 : SOPK <
>  >;
>  */
>  
> +let isCompare = 1 in {
>  def S_CMPK_LG_I32 : SOPK_32 <0x00000004, "S_CMPK_LG_I32", []>;
>  def S_CMPK_GT_I32 : SOPK_32 <0x00000005, "S_CMPK_GT_I32", []>;
>  def S_CMPK_GE_I32 : SOPK_32 <0x00000006, "S_CMPK_GE_I32", []>;
> @@ -118,6 +124,8 @@ def S_CMPK_GT_U32 : SOPK_32 <0x0000000b, "S_CMPK_GT_U32", []>;
>  def S_CMPK_GE_U32 : SOPK_32 <0x0000000c, "S_CMPK_GE_U32", []>;
>  def S_CMPK_LT_U32 : SOPK_32 <0x0000000d, "S_CMPK_LT_U32", []>;
>  def S_CMPK_LE_U32 : SOPK_32 <0x0000000e, "S_CMPK_LE_U32", []>;
> +} // End isCompare = 1
> +
>  def S_ADDK_I32 : SOPK_32 <0x0000000f, "S_ADDK_I32", []>;
>  def S_MULK_I32 : SOPK_32 <0x00000010, "S_MULK_I32", []>;
>  //def S_CBRANCH_I_FORK : SOPK_ <0x00000011, "S_CBRANCH_I_FORK", []>;
> @@ -127,6 +135,8 @@ def S_GETREG_REGRD_B32 : SOPK_32 <0x00000014, "S_GETREG_REGRD_B32", []>;
>  //def S_SETREG_IMM32_B32 : SOPK_32 <0x00000015, "S_SETREG_IMM32_B32", []>;
>  //def EXP : EXP_ <0x00000000, "EXP", []>;
>  
> +let isCompare = 1 in {
> +
>  defm V_CMP_F_F32 : VOPC_32 <0x00000000, "V_CMP_F_F32">;
>  defm V_CMP_LT_F32 : VOPC_32 <0x00000001, "V_CMP_LT_F32", f32, COND_LT>;
>  defm V_CMP_EQ_F32 : VOPC_32 <0x00000002, "V_CMP_EQ_F32", f32, COND_EQ>;
> @@ -144,8 +154,7 @@ defm V_CMP_NEQ_F32 : VOPC_32 <0x0000000d, "V_CMP_NEQ_F32", f32, COND_NE>;
>  defm V_CMP_NLT_F32 : VOPC_32 <0x0000000e, "V_CMP_NLT_F32">;
>  defm V_CMP_TRU_F32 : VOPC_32 <0x0000000f, "V_CMP_TRU_F32">;
>  
> -//Side effect is writing to EXEC
> -let hasSideEffects = 1 in {
> +let hasSideEffects = 1, Defs = [EXEC] in {
>  
>  defm V_CMPX_F_F32 : VOPC_32 <0x00000010, "V_CMPX_F_F32">;
>  defm V_CMPX_LT_F32 : VOPC_32 <0x00000011, "V_CMPX_LT_F32">;
> @@ -164,7 +173,7 @@ defm V_CMPX_NEQ_F32 : VOPC_32 <0x0000001d, "V_CMPX_NEQ_F32">;
>  defm V_CMPX_NLT_F32 : VOPC_32 <0x0000001e, "V_CMPX_NLT_F32">;
>  defm V_CMPX_TRU_F32 : VOPC_32 <0x0000001f, "V_CMPX_TRU_F32">;
>  
> -} // End hasSideEffects = 1
> +} // End hasSideEffects = 1, Defs = [EXEC]
>  
>  defm V_CMP_F_F64 : VOPC_64 <0x00000020, "V_CMP_F_F64">;
>  defm V_CMP_LT_F64 : VOPC_64 <0x00000021, "V_CMP_LT_F64">;
> @@ -183,8 +192,7 @@ defm V_CMP_NEQ_F64 : VOPC_64 <0x0000002d, "V_CMP_NEQ_F64">;
>  defm V_CMP_NLT_F64 : VOPC_64 <0x0000002e, "V_CMP_NLT_F64">;
>  defm V_CMP_TRU_F64 : VOPC_64 <0x0000002f, "V_CMP_TRU_F64">;
>  
> -//Side effect is writing to EXEC
> -let hasSideEffects = 1 in {
> +let hasSideEffects = 1, Defs = [EXEC] in {
>  
>  defm V_CMPX_F_F64 : VOPC_64 <0x00000030, "V_CMPX_F_F64">;
>  defm V_CMPX_LT_F64 : VOPC_64 <0x00000031, "V_CMPX_LT_F64">;
> @@ -203,7 +211,7 @@ defm V_CMPX_NEQ_F64 : VOPC_64 <0x0000003d, "V_CMPX_NEQ_F64">;
>  defm V_CMPX_NLT_F64 : VOPC_64 <0x0000003e, "V_CMPX_NLT_F64">;
>  defm V_CMPX_TRU_F64 : VOPC_64 <0x0000003f, "V_CMPX_TRU_F64">;
>  
> -} // End hasSideEffects = 1
> +} // End hasSideEffects = 1, Defs = [EXEC]
>  
>  defm V_CMPS_F_F32 : VOPC_32 <0x00000040, "V_CMPS_F_F32">;
>  defm V_CMPS_LT_F32 : VOPC_32 <0x00000041, "V_CMPS_LT_F32">;
> @@ -221,6 +229,9 @@ defm V_CMPS_NLE_F32 : VOPC_32 <0x0000004c, "V_CMPS_NLE_F32">;
>  defm V_CMPS_NEQ_F32 : VOPC_32 <0x0000004d, "V_CMPS_NEQ_F32">;
>  defm V_CMPS_NLT_F32 : VOPC_32 <0x0000004e, "V_CMPS_NLT_F32">;
>  defm V_CMPS_TRU_F32 : VOPC_32 <0x0000004f, "V_CMPS_TRU_F32">;
> +
> +let hasSideEffects = 1, Defs = [EXEC] in {
> +
>  defm V_CMPSX_F_F32 : VOPC_32 <0x00000050, "V_CMPSX_F_F32">;
>  defm V_CMPSX_LT_F32 : VOPC_32 <0x00000051, "V_CMPSX_LT_F32">;
>  defm V_CMPSX_EQ_F32 : VOPC_32 <0x00000052, "V_CMPSX_EQ_F32">;
> @@ -237,6 +248,9 @@ defm V_CMPSX_NLE_F32 : VOPC_32 <0x0000005c, "V_CMPSX_NLE_F32">;
>  defm V_CMPSX_NEQ_F32 : VOPC_32 <0x0000005d, "V_CMPSX_NEQ_F32">;
>  defm V_CMPSX_NLT_F32 : VOPC_32 <0x0000005e, "V_CMPSX_NLT_F32">;
>  defm V_CMPSX_TRU_F32 : VOPC_32 <0x0000005f, "V_CMPSX_TRU_F32">;
> +
> +} // End hasSideEffects = 1, Defs = [EXEC]
> +
>  defm V_CMPS_F_F64 : VOPC_64 <0x00000060, "V_CMPS_F_F64">;
>  defm V_CMPS_LT_F64 : VOPC_64 <0x00000061, "V_CMPS_LT_F64">;
>  defm V_CMPS_EQ_F64 : VOPC_64 <0x00000062, "V_CMPS_EQ_F64">;
> @@ -253,6 +267,9 @@ defm V_CMPS_NLE_F64 : VOPC_64 <0x0000006c, "V_CMPS_NLE_F64">;
>  defm V_CMPS_NEQ_F64 : VOPC_64 <0x0000006d, "V_CMPS_NEQ_F64">;
>  defm V_CMPS_NLT_F64 : VOPC_64 <0x0000006e, "V_CMPS_NLT_F64">;
>  defm V_CMPS_TRU_F64 : VOPC_64 <0x0000006f, "V_CMPS_TRU_F64">;
> +
> +let hasSideEffects = 1, Defs = [EXEC] in {
> +
>  defm V_CMPSX_F_F64 : VOPC_64 <0x00000070, "V_CMPSX_F_F64">;
>  defm V_CMPSX_LT_F64 : VOPC_64 <0x00000071, "V_CMPSX_LT_F64">;
>  defm V_CMPSX_EQ_F64 : VOPC_64 <0x00000072, "V_CMPSX_EQ_F64">;
> @@ -269,6 +286,9 @@ defm V_CMPSX_NLE_F64 : VOPC_64 <0x0000007c, "V_CMPSX_NLE_F64">;
>  defm V_CMPSX_NEQ_F64 : VOPC_64 <0x0000007d, "V_CMPSX_NEQ_F64">;
>  defm V_CMPSX_NLT_F64 : VOPC_64 <0x0000007e, "V_CMPSX_NLT_F64">;
>  defm V_CMPSX_TRU_F64 : VOPC_64 <0x0000007f, "V_CMPSX_TRU_F64">;
> +
> +} // End hasSideEffects = 1, Defs = [EXEC]
> +
>  defm V_CMP_F_I32 : VOPC_32 <0x00000080, "V_CMP_F_I32">;
>  defm V_CMP_LT_I32 : VOPC_32 <0x00000081, "V_CMP_LT_I32", i32, COND_LT>;
>  defm V_CMP_EQ_I32 : VOPC_32 <0x00000082, "V_CMP_EQ_I32", i32, COND_EQ>;
> @@ -278,7 +298,7 @@ defm V_CMP_NE_I32 : VOPC_32 <0x00000085, "V_CMP_NE_I32", i32, COND_NE>;
>  defm V_CMP_GE_I32 : VOPC_32 <0x00000086, "V_CMP_GE_I32", i32, COND_GE>;
>  defm V_CMP_T_I32 : VOPC_32 <0x00000087, "V_CMP_T_I32">;
>  
> -let hasSideEffects = 1 in {
> +let hasSideEffects = 1, Defs = [EXEC] in {
>  
>  defm V_CMPX_F_I32 : VOPC_32 <0x00000090, "V_CMPX_F_I32">;
>  defm V_CMPX_LT_I32 : VOPC_32 <0x00000091, "V_CMPX_LT_I32">;
> @@ -289,7 +309,7 @@ defm V_CMPX_NE_I32 : VOPC_32 <0x00000095, "V_CMPX_NE_I32">;
>  defm V_CMPX_GE_I32 : VOPC_32 <0x00000096, "V_CMPX_GE_I32">;
>  defm V_CMPX_T_I32 : VOPC_32 <0x00000097, "V_CMPX_T_I32">;
>  
> -} // End hasSideEffects
> +} // End hasSideEffects = 1, Defs = [EXEC]
>  
>  defm V_CMP_F_I64 : VOPC_64 <0x000000a0, "V_CMP_F_I64">;
>  defm V_CMP_LT_I64 : VOPC_64 <0x000000a1, "V_CMP_LT_I64">;
> @@ -300,7 +320,7 @@ defm V_CMP_NE_I64 : VOPC_64 <0x000000a5, "V_CMP_NE_I64">;
>  defm V_CMP_GE_I64 : VOPC_64 <0x000000a6, "V_CMP_GE_I64">;
>  defm V_CMP_T_I64 : VOPC_64 <0x000000a7, "V_CMP_T_I64">;
>  
> -let hasSideEffects = 1 in {
> +let hasSideEffects = 1, Defs = [EXEC] in {
>  
>  defm V_CMPX_F_I64 : VOPC_64 <0x000000b0, "V_CMPX_F_I64">;
>  defm V_CMPX_LT_I64 : VOPC_64 <0x000000b1, "V_CMPX_LT_I64">;
> @@ -311,7 +331,7 @@ defm V_CMPX_NE_I64 : VOPC_64 <0x000000b5, "V_CMPX_NE_I64">;
>  defm V_CMPX_GE_I64 : VOPC_64 <0x000000b6, "V_CMPX_GE_I64">;
>  defm V_CMPX_T_I64 : VOPC_64 <0x000000b7, "V_CMPX_T_I64">;
>  
> -} // End hasSideEffects
> +} // End hasSideEffects = 1, Defs = [EXEC]
>  
>  defm V_CMP_F_U32 : VOPC_32 <0x000000c0, "V_CMP_F_U32">;
>  defm V_CMP_LT_U32 : VOPC_32 <0x000000c1, "V_CMP_LT_U32">;
> @@ -322,7 +342,7 @@ defm V_CMP_NE_U32 : VOPC_32 <0x000000c5, "V_CMP_NE_U32">;
>  defm V_CMP_GE_U32 : VOPC_32 <0x000000c6, "V_CMP_GE_U32">;
>  defm V_CMP_T_U32 : VOPC_32 <0x000000c7, "V_CMP_T_U32">;
>  
> -let hasSideEffects = 1 in {
> +let hasSideEffects = 1, Defs = [EXEC] in {
>  
>  defm V_CMPX_F_U32 : VOPC_32 <0x000000d0, "V_CMPX_F_U32">;
>  defm V_CMPX_LT_U32 : VOPC_32 <0x000000d1, "V_CMPX_LT_U32">;
> @@ -333,7 +353,7 @@ defm V_CMPX_NE_U32 : VOPC_32 <0x000000d5, "V_CMPX_NE_U32">;
>  defm V_CMPX_GE_U32 : VOPC_32 <0x000000d6, "V_CMPX_GE_U32">;
>  defm V_CMPX_T_U32 : VOPC_32 <0x000000d7, "V_CMPX_T_U32">;
>  
> -} // End hasSideEffects
> +} // End hasSideEffects = 1, Defs = [EXEC]
>  
>  defm V_CMP_F_U64 : VOPC_64 <0x000000e0, "V_CMP_F_U64">;
>  defm V_CMP_LT_U64 : VOPC_64 <0x000000e1, "V_CMP_LT_U64">;
> @@ -343,6 +363,9 @@ defm V_CMP_GT_U64 : VOPC_64 <0x000000e4, "V_CMP_GT_U64">;
>  defm V_CMP_NE_U64 : VOPC_64 <0x000000e5, "V_CMP_NE_U64">;
>  defm V_CMP_GE_U64 : VOPC_64 <0x000000e6, "V_CMP_GE_U64">;
>  defm V_CMP_T_U64 : VOPC_64 <0x000000e7, "V_CMP_T_U64">;
> +
> +let hasSideEffects = 1, Defs = [EXEC] in {
> +
>  defm V_CMPX_F_U64 : VOPC_64 <0x000000f0, "V_CMPX_F_U64">;
>  defm V_CMPX_LT_U64 : VOPC_64 <0x000000f1, "V_CMPX_LT_U64">;
>  defm V_CMPX_EQ_U64 : VOPC_64 <0x000000f2, "V_CMPX_EQ_U64">;
> @@ -351,10 +374,23 @@ defm V_CMPX_GT_U64 : VOPC_64 <0x000000f4, "V_CMPX_GT_U64">;
>  defm V_CMPX_NE_U64 : VOPC_64 <0x000000f5, "V_CMPX_NE_U64">;
>  defm V_CMPX_GE_U64 : VOPC_64 <0x000000f6, "V_CMPX_GE_U64">;
>  defm V_CMPX_T_U64 : VOPC_64 <0x000000f7, "V_CMPX_T_U64">;
> +
> +} // End hasSideEffects = 1, Defs = [EXEC]
> +
>  defm V_CMP_CLASS_F32 : VOPC_32 <0x00000088, "V_CMP_CLASS_F32">;
> +
> +let hasSideEffects = 1, Defs = [EXEC] in {
>  defm V_CMPX_CLASS_F32 : VOPC_32 <0x00000098, "V_CMPX_CLASS_F32">;
> +} // End hasSideEffects = 1, Defs = [EXEC]
> +
>  defm V_CMP_CLASS_F64 : VOPC_64 <0x000000a8, "V_CMP_CLASS_F64">;
> +
> +let hasSideEffects = 1, Defs = [EXEC] in {
>  defm V_CMPX_CLASS_F64 : VOPC_64 <0x000000b8, "V_CMPX_CLASS_F64">;
> +} // End hasSideEffects = 1, Defs = [EXEC]
> +
> +} // End isCompare = 1
> +
>  //def BUFFER_LOAD_FORMAT_X : MUBUF_ <0x00000000, "BUFFER_LOAD_FORMAT_X", []>;
>  //def BUFFER_LOAD_FORMAT_XY : MUBUF_ <0x00000001, "BUFFER_LOAD_FORMAT_XY", []>;
>  //def BUFFER_LOAD_FORMAT_XYZ : MUBUF_ <0x00000002, "BUFFER_LOAD_FORMAT_XYZ", []>;
> @@ -535,9 +571,11 @@ def IMAGE_SAMPLE_C_B : MIMG_Load_Helper <0x0000002d, "IMAGE_SAMPLE_C_B">;
>  //def IMAGE_SAMPLER : MIMG_NoPattern_ <"IMAGE_SAMPLER", 0x0000007f>;
>  //def V_NOP : VOP1_ <0x00000000, "V_NOP", []>;
>  
> -let neverHasSideEffects = 1 in {
> +
> +let neverHasSideEffects = 1, isMoveImm = 1 in {
>  defm V_MOV_B32 : VOP1_32 <0x00000001, "V_MOV_B32", []>;
> -}  // End neverHasSideEffects
> +} // End neverHasSideEffects = 1, isMoveImm = 1
> +
>  defm V_READFIRSTLANE_B32 : VOP1_32 <0x00000002, "V_READFIRSTLANE_B32", []>;
>  //defm V_CVT_I32_F64 : VOP1_32 <0x00000003, "V_CVT_I32_F64", []>;
>  //defm V_CVT_F64_I32 : VOP1_64 <0x00000004, "V_CVT_F64_I32", []>;
> @@ -748,15 +786,21 @@ def : Pat <
>  defm V_READLANE_B32 : VOP2_32 <0x00000001, "V_READLANE_B32", []>;
>  defm V_WRITELANE_B32 : VOP2_32 <0x00000002, "V_WRITELANE_B32", []>;
>  
> +let isCommutable = 1 in {
>  defm V_ADD_F32 : VOP2_32 <0x00000003, "V_ADD_F32",
>    [(set VReg_32:$dst, (fadd VSrc_32:$src0, VReg_32:$src1))]
>  >;
> +} // End isCommutable = 1
> +
>  defm V_SUB_F32 : VOP2_32 <0x00000004, "V_SUB_F32",
>    [(set VReg_32:$dst, (fsub VSrc_32:$src0, VReg_32:$src1))]
>  >;
>  
>  defm V_SUBREV_F32 : VOP2_32 <0x00000005, "V_SUBREV_F32", []>;
>  defm V_MAC_LEGACY_F32 : VOP2_32 <0x00000006, "V_MAC_LEGACY_F32", []>;
> +
> +let isCommutable = 1 in {
> +
>  defm V_MUL_LEGACY_F32 : VOP2_32 <
>    0x00000007, "V_MUL_LEGACY_F32",
>    [(set VReg_32:$dst, (int_AMDGPU_mul VSrc_32:$src0, VReg_32:$src1))]
> @@ -765,10 +809,16 @@ defm V_MUL_LEGACY_F32 : VOP2_32 <
>  defm V_MUL_F32 : VOP2_32 <0x00000008, "V_MUL_F32",
>    [(set VReg_32:$dst, (fmul VSrc_32:$src0, VReg_32:$src1))]
>  >;
> +
> +} // End isCommutable = 1
> +
>  //defm V_MUL_I32_I24 : VOP2_32 <0x00000009, "V_MUL_I32_I24", []>;
>  //defm V_MUL_HI_I32_I24 : VOP2_32 <0x0000000a, "V_MUL_HI_I32_I24", []>;
>  //defm V_MUL_U32_U24 : VOP2_32 <0x0000000b, "V_MUL_U32_U24", []>;
>  //defm V_MUL_HI_U32_U24 : VOP2_32 <0x0000000c, "V_MUL_HI_U32_U24", []>;
> +
> +let isCommutable = 1 in {
> +
>  defm V_MIN_LEGACY_F32 : VOP2_32 <0x0000000d, "V_MIN_LEGACY_F32",
>    [(set VReg_32:$dst, (AMDGPUfmin VSrc_32:$src0, VReg_32:$src1))]
>  >;
> @@ -776,18 +826,25 @@ defm V_MIN_LEGACY_F32 : VOP2_32 <0x0000000d, "V_MIN_LEGACY_F32",
>  defm V_MAX_LEGACY_F32 : VOP2_32 <0x0000000e, "V_MAX_LEGACY_F32",
>    [(set VReg_32:$dst, (AMDGPUfmax VSrc_32:$src0, VReg_32:$src1))]
>  >;
> +
>  defm V_MIN_F32 : VOP2_32 <0x0000000f, "V_MIN_F32", []>;
>  defm V_MAX_F32 : VOP2_32 <0x00000010, "V_MAX_F32", []>;
>  defm V_MIN_I32 : VOP2_32 <0x00000011, "V_MIN_I32", []>;
>  defm V_MAX_I32 : VOP2_32 <0x00000012, "V_MAX_I32", []>;
>  defm V_MIN_U32 : VOP2_32 <0x00000013, "V_MIN_U32", []>;
>  defm V_MAX_U32 : VOP2_32 <0x00000014, "V_MAX_U32", []>;
> +
> +} // End isCommutable = 1
> +
>  defm V_LSHR_B32 : VOP2_32 <0x00000015, "V_LSHR_B32", []>;
>  defm V_LSHRREV_B32 : VOP2_32 <0x00000016, "V_LSHRREV_B32", []>;
>  defm V_ASHR_I32 : VOP2_32 <0x00000017, "V_ASHR_I32", []>;
>  defm V_ASHRREV_I32 : VOP2_32 <0x00000018, "V_ASHRREV_I32", []>;
>  defm V_LSHL_B32 : VOP2_32 <0x00000019, "V_LSHL_B32", []>;
>  defm V_LSHLREV_B32 : VOP2_32 <0x0000001a, "V_LSHLREV_B32", []>;
> +
> +let isCommutable = 1 in {
> +
>  defm V_AND_B32 : VOP2_32 <0x0000001b, "V_AND_B32",
>    [(set VReg_32:$dst, (and VSrc_32:$src0, VReg_32:$src1))]
>  >;
> @@ -797,6 +854,9 @@ defm V_OR_B32 : VOP2_32 <0x0000001c, "V_OR_B32",
>  defm V_XOR_B32 : VOP2_32 <0x0000001d, "V_XOR_B32",
>    [(set VReg_32:$dst, (xor VSrc_32:$src0, VReg_32:$src1))]
>  >;
> +
> +} // End isCommutable = 1
> +
>  defm V_BFM_B32 : VOP2_32 <0x0000001e, "V_BFM_B32", []>;
>  defm V_MAC_F32 : VOP2_32 <0x0000001f, "V_MAC_F32", []>;
>  defm V_MADMK_F32 : VOP2_32 <0x00000020, "V_MADMK_F32", []>;
> @@ -805,12 +865,17 @@ defm V_MADAK_F32 : VOP2_32 <0x00000021, "V_MADAK_F32", []>;
>  //defm V_MBCNT_LO_U32_B32 : VOP2_32 <0x00000023, "V_MBCNT_LO_U32_B32", []>;
>  //defm V_MBCNT_HI_U32_B32 : VOP2_32 <0x00000024, "V_MBCNT_HI_U32_B32", []>;
>  let Defs = [VCC] in { // Carry-out goes to VCC
> +
> +let isCommutable = 1 in {
>  defm V_ADD_I32 : VOP2b_32 <0x00000025, "V_ADD_I32",
>    [(set VReg_32:$dst, (add (i32 VSrc_32:$src0), (i32 VReg_32:$src1)))]
>  >;
> +} // End isCommutable = 1
> +
>  defm V_SUB_I32 : VOP2b_32 <0x00000026, "V_SUB_I32",
>    [(set VReg_32:$dst, (sub (i32 VSrc_32:$src0), (i32 VReg_32:$src1)))]
>  >;
> +
>  defm V_SUBREV_I32 : VOP2b_32 <0x00000027, "V_SUBREV_I32", []>;
>  let Uses = [VCC] in { // Carry-out comes from VCC
>  defm V_ADDC_U32 : VOP2b_32 <0x00000028, "V_ADDC_U32", []>;
> -- 
> 1.7.10.4
> 

> From 4fbfa9d3d8e01f063416305c1ab52c59fe5518eb Mon Sep 17 00:00:00 2001
> From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig at amd.com>
> Date: Mon, 25 Feb 2013 14:25:20 +0100
> Subject: [PATCH 7/9] R600/SI: swap operands if it helps folding
> MIME-Version: 1.0
> Content-Type: text/plain; charset=UTF-8
> Content-Transfer-Encoding: 8bit
> 
> Signed-off-by: Christian K??nig <christian.koenig at amd.com>
> Reviewed-by: Tom Stellard <thomas.stellard at amd.com>
> ---
>  lib/Target/R600/SIISelLowering.cpp |   16 +++++++++++++++-
>  1 file changed, 15 insertions(+), 1 deletion(-)
> 
> diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp
> index bf1f3bf..da30c07 100644
> --- a/lib/Target/R600/SIISelLowering.cpp
> +++ b/lib/Target/R600/SIISelLowering.cpp
> @@ -545,8 +545,22 @@ SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
>  
>      // Is this a VSrc or SSrc operand ?
>      unsigned RegClass = Desc->OpInfo[Op].RegClass;
> -    if (!isVSrc(RegClass) && !isSSrc(RegClass))
> +    if (!isVSrc(RegClass) && !isSSrc(RegClass)) {
> +
> +      if (i == 1 && Desc->isCommutable() &&
> +          fitsRegClass(DAG, Ops[0], RegClass) &&
> +          foldImm(Ops[1], Immediate, ScalarSlotUsed)) {
> +
> +        assert(isVSrc(Desc->OpInfo[NumDefs].RegClass) ||
> +               isSSrc(Desc->OpInfo[NumDefs].RegClass));
> +
> +        // Swap commutable operands
> +        SDValue Tmp = Ops[1];
> +        Ops[1] = Ops[0];
> +        Ops[0] = Tmp;
> +      }
>        continue;
> +    }
>  
>      // Try to fold the immediates
>      if (!foldImm(Ops[i], Immediate, ScalarSlotUsed)) {
> -- 
> 1.7.10.4
> 

> From d6653a619c7419ab47e85f3fcd7e28baacff0bd1 Mon Sep 17 00:00:00 2001
> From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig at amd.com>
> Date: Fri, 15 Feb 2013 20:32:37 +0100
> Subject: [PATCH 8/9] R600/SI: add VOP mapping functions
> MIME-Version: 1.0
> Content-Type: text/plain; charset=UTF-8
> Content-Transfer-Encoding: 8bit
> 
> Make it possible to map between e32 and e64 encoding opcodes.
> 
> Signed-off-by: Christian K??nig <christian.koenig at amd.com>
> Reviewed-by: Tom Stellard <thomas.stellard at amd.com>
> ---
>  lib/Target/R600/AMDGPUInstrInfo.cpp |    1 +
>  lib/Target/R600/SIInstrInfo.h       |    6 ++++++
>  lib/Target/R600/SIInstrInfo.td      |   39 +++++++++++++++++++++++++----------
>  3 files changed, 35 insertions(+), 11 deletions(-)
> 
> diff --git a/lib/Target/R600/AMDGPUInstrInfo.cpp b/lib/Target/R600/AMDGPUInstrInfo.cpp
> index 640707d..30f736c 100644
> --- a/lib/Target/R600/AMDGPUInstrInfo.cpp
> +++ b/lib/Target/R600/AMDGPUInstrInfo.cpp
> @@ -22,6 +22,7 @@
>  #include "llvm/CodeGen/MachineRegisterInfo.h"
>  
>  #define GET_INSTRINFO_CTOR
> +#define GET_INSTRMAP_INFO
>  #include "AMDGPUGenInstrInfo.inc"
>  
>  using namespace llvm;
> diff --git a/lib/Target/R600/SIInstrInfo.h b/lib/Target/R600/SIInstrInfo.h
> index 015cfb3..5789af5 100644
> --- a/lib/Target/R600/SIInstrInfo.h
> +++ b/lib/Target/R600/SIInstrInfo.h
> @@ -73,6 +73,12 @@ public:
>    virtual const TargetRegisterClass *getSuperIndirectRegClass() const;
>    };
>  
> +namespace AMDGPU {
> +
> +  int getVOPe64(uint16_t Opcode);
> +
> +} // End namespace AMDGPU
> +
>  } // End namespace llvm
>  
>  namespace SIInstrFlags {
> diff --git a/lib/Target/R600/SIInstrInfo.td b/lib/Target/R600/SIInstrInfo.td
> index 3a617b4..d6c3f06 100644
> --- a/lib/Target/R600/SIInstrInfo.td
> +++ b/lib/Target/R600/SIInstrInfo.td
> @@ -143,13 +143,17 @@ multiclass SMRD_Helper <bits<5> op, string asm, RegisterClass dstClass> {
>  // Vector ALU classes
>  //===----------------------------------------------------------------------===//
>  
> +class VOP <string opName> {
> +  string OpName = opName;
> +}
> +
>  multiclass VOP1_Helper <bits<8> op, RegisterClass drc, RegisterClass src,
>                          string opName, list<dag> pattern> {
>  
> -  def _e32: VOP1 <
> +  def _e32 : VOP1 <
>      op, (outs drc:$dst), (ins src:$src0),
>      opName#"_e32 $dst, $src0", pattern
> -  >;
> +  >, VOP <opName>;
>  
>    def _e64 : VOP3 <
>      {1, 1, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
> @@ -158,7 +162,7 @@ multiclass VOP1_Helper <bits<8> op, RegisterClass drc, RegisterClass src,
>           i32imm:$abs, i32imm:$clamp,
>           i32imm:$omod, i32imm:$neg),
>      opName#"_e64 $dst, $src0, $abs, $clamp, $omod, $neg", []
> -  > {
> +  >, VOP <opName> {
>      let SRC1 = SIOperand.ZERO;
>      let SRC2 = SIOperand.ZERO;
>    }
> @@ -175,7 +179,7 @@ multiclass VOP2_Helper <bits<6> op, RegisterClass vrc, RegisterClass arc,
>    def _e32 : VOP2 <
>      op, (outs vrc:$dst), (ins arc:$src0, vrc:$src1),
>      opName#"_e32 $dst, $src0, $src1", pattern
> -  >;
> +  >, VOP <opName>;
>  
>    def _e64 : VOP3 <
>      {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
> @@ -184,7 +188,7 @@ multiclass VOP2_Helper <bits<6> op, RegisterClass vrc, RegisterClass arc,
>           i32imm:$abs, i32imm:$clamp,
>           i32imm:$omod, i32imm:$neg),
>      opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", []
> -  > {
> +  >, VOP <opName> {
>      let SRC2 = SIOperand.ZERO;
>    }
>  }
> @@ -200,7 +204,7 @@ multiclass VOP2b_32 <bits<6> op, string opName, list<dag> pattern> {
>    def _e32 : VOP2 <
>      op, (outs VReg_32:$dst), (ins VSrc_32:$src0, VReg_32:$src1),
>      opName#"_e32 $dst, $src0, $src1", pattern
> -  >;
> +  >, VOP <opName>;
>  
>    def _e64 : VOP3b <
>      {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
> @@ -209,7 +213,7 @@ multiclass VOP2b_32 <bits<6> op, string opName, list<dag> pattern> {
>           i32imm:$abs, i32imm:$clamp,
>           i32imm:$omod, i32imm:$neg),
>      opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", []
> -  > {
> +  >, VOP <opName> {
>      let SRC2 = SIOperand.ZERO;
>      /* the VOP2 variant puts the carry out into VCC, the VOP3 variant
>         can write it into any SGPR. We currently don't use the carry out,
> @@ -224,7 +228,7 @@ multiclass VOPC_Helper <bits<8> op, RegisterClass vrc, RegisterClass arc,
>    def _e32 : VOPC <
>      op, (ins arc:$src0, vrc:$src1),
>      opName#"_e32 $dst, $src0, $src1", []
> -  >;
> +  >, VOP <opName>;
>  
>    def _e64 : VOP3 <
>      {0, op{7}, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
> @@ -236,7 +240,7 @@ multiclass VOPC_Helper <bits<8> op, RegisterClass vrc, RegisterClass arc,
>      !if(!eq(!cast<string>(cond), "COND_NULL"), []<dag>,
>        [(set SReg_64:$dst, (i1 (setcc (vt arc:$src0), arc:$src1, cond)))]
>      )
> -  > {
> +  >, VOP <opName> {
>      let SRC2 = SIOperand.ZERO;
>    }
>  }
> @@ -254,14 +258,14 @@ class VOP3_32 <bits<9> op, string opName, list<dag> pattern> : VOP3 <
>    (ins VSrc_32:$src0, VSrc_32:$src1, VSrc_32:$src2,
>     i32imm:$abs, i32imm:$clamp, i32imm:$omod, i32imm:$neg),
>    opName#" $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern
> ->;
> +>, VOP <opName>;
>  
>  class VOP3_64 <bits<9> op, string opName, list<dag> pattern> : VOP3 <
>    op, (outs VReg_64:$dst),
>    (ins VSrc_64:$src0, VSrc_64:$src1, VSrc_64:$src2,
>     i32imm:$abs, i32imm:$clamp, i32imm:$omod, i32imm:$neg),
>    opName#" $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern
> ->;
> +>, VOP <opName>;
>  
>  //===----------------------------------------------------------------------===//
>  // Vector I/O classes
> @@ -319,4 +323,17 @@ class MIMG_Load_Helper <bits<7> op, string asm> : MIMG <
>    let mayStore = 0;
>  }
>  
> +//===----------------------------------------------------------------------===//
> +// Vector instruction mappings
> +//===----------------------------------------------------------------------===//
> +
> +// Maps an opcode in e32 form to its e64 equivalent
> +def getVOPe64 : InstrMapping {
> +  let FilterClass = "VOP";
> +  let RowFields = ["OpName"];
> +  let ColFields = ["Size"];
> +  let KeyCol = ["4"];
> +  let ValueCols = [["8"]];
> +}
> +
>  include "SIInstructions.td"
> -- 
> 1.7.10.4
> 

> From bae00f8d1ddeb3f2b60f2af2b9f847eaa61906f3 Mon Sep 17 00:00:00 2001
> From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig at amd.com>
> Date: Sat, 23 Feb 2013 20:04:33 +0100
> Subject: [PATCH 9/9] R600/SI: Add promotion of e32 to e64 in operand folding
> MIME-Version: 1.0
> Content-Type: text/plain; charset=UTF-8
> Content-Transfer-Encoding: 8bit
> 
> Signed-off-by: Christian K??nig <christian.koenig at amd.com>
> Reviewed-by: Tom Stellard <thomas.stellard at amd.com>
> ---
>  lib/Target/R600/SIISelLowering.cpp |   36 ++++++++++++++++++++++++++++++++++--
>  1 file changed, 34 insertions(+), 2 deletions(-)
> 
> diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp
> index da30c07..0a0fbd9 100644
> --- a/lib/Target/R600/SIISelLowering.cpp
> +++ b/lib/Target/R600/SIISelLowering.cpp
> @@ -501,6 +501,13 @@ SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
>    unsigned NumDefs = Desc->getNumDefs();
>    unsigned NumOps = Desc->getNumOperands();
>  
> +  // e64 version if available, -1 otherwise
> +  int OpcodeE64 = AMDGPU::getVOPe64(Opcode);
> +  const MCInstrDesc *DescE64 = OpcodeE64 == -1 ? 0 : &TII->get(OpcodeE64);
> +
> +  assert(!DescE64 || DescE64->getNumDefs() == NumDefs);
> +  assert(!DescE64 || DescE64->getNumOperands() == (NumOps + 4));
> +
>    int32_t Immediate = Desc->getSize() == 4 ? 0 : -1;
>    bool HaveVSrc = false, HaveSSrc = false;
>  
> @@ -532,6 +539,7 @@ SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
>  
>    // Second go over the operands and try to fold them
>    std::vector<SDValue> Ops;
> +  bool Promote2e64 = false;
>    for (unsigned i = 0, e = Node->getNumOperands(), Op = NumDefs;
>         i != e && Op < NumOps; ++i, ++Op) {
>  
> @@ -558,6 +566,20 @@ SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
>          SDValue Tmp = Ops[1];
>          Ops[1] = Ops[0];
>          Ops[0] = Tmp;
> +
> +      } else if (DescE64 && !Immediate) {
> +        // Test if it makes sense to switch to e64 encoding
> +
> +        RegClass = DescE64->OpInfo[Op].RegClass;
> +        int32_t TmpImm = -1;
> +        if ((isVSrc(RegClass) || isSSrc(RegClass)) &&
> +            foldImm(Ops[i], TmpImm, ScalarSlotUsed)) {
> +
> +          Immediate = -1;
> +          Promote2e64 = true;
> +          Desc = DescE64;
> +          DescE64 = 0;
> +        }
>        }
>        continue;
>      }
> @@ -569,10 +591,20 @@ SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
>      }
>    }
>  
> +  if (Promote2e64) {
> +    // Add the modifier flags while promoting
> +    for (unsigned i = 0; i < 4; ++i)
> +      Ops.push_back(DAG.getTargetConstant(0, MVT::i32));
> +  }
> +
>    // Add optional chain and glue
>    for (unsigned i = NumOps - NumDefs, e = Node->getNumOperands(); i < e; ++i)
>      Ops.push_back(Node->getOperand(i));
>  
> -  // Update the instruction parameters
> -  return DAG.UpdateNodeOperands(Node, Ops.data(), Ops.size());
> +  // Either create a complete new or update the current instruction
> +  if (Promote2e64)
> +    return DAG.getMachineNode(OpcodeE64, Node->getDebugLoc(),
> +                              Node->getVTList(), Ops.data(), Ops.size());
> +  else
> +    return DAG.UpdateNodeOperands(Node, Ops.data(), Ops.size());
>  }
> -- 
> 1.7.10.4
> 





More information about the llvm-commits mailing list