[llvm] r363934 - [AMDGPU] gfx1010 core wave32 changes

Stanislav Mekhanoshin via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 20 08:08:35 PDT 2019


Author: rampitec
Date: Thu Jun 20 08:08:34 2019
New Revision: 363934

URL: http://llvm.org/viewvc/llvm-project?rev=363934&view=rev
Log:
[AMDGPU] gfx1010 core wave32 changes

Differential Revision: https://reviews.llvm.org/D63204

Added:
    llvm/trunk/test/CodeGen/AMDGPU/wave32.ll
    llvm/trunk/test/MC/AMDGPU/wave32.s
    llvm/trunk/test/MC/Disassembler/AMDGPU/wave32.txt
Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPU.td
    llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.td
    llvm/trunk/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
    llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h
    llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td
    llvm/trunk/lib/Target/AMDGPU/SIInstructions.td
    llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.td
    llvm/trunk/lib/Target/AMDGPU/VOP2Instructions.td
    llvm/trunk/lib/Target/AMDGPU/VOP3Instructions.td
    llvm/trunk/lib/Target/AMDGPU/VOPCInstructions.td
    llvm/trunk/test/CodeGen/AMDGPU/add3.ll
    llvm/trunk/test/CodeGen/AMDGPU/add_i1.ll
    llvm/trunk/test/CodeGen/AMDGPU/add_shl.ll
    llvm/trunk/test/CodeGen/AMDGPU/and_or.ll
    llvm/trunk/test/CodeGen/AMDGPU/hsa-metadata-kernel-code-props-v3.ll
    llvm/trunk/test/CodeGen/AMDGPU/huge-private-buffer.ll
    llvm/trunk/test/CodeGen/AMDGPU/insert-skip-from-vcc.mir
    llvm/trunk/test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll
    llvm/trunk/test/CodeGen/AMDGPU/mubuf-legalize-operands.mir
    llvm/trunk/test/CodeGen/AMDGPU/or3.ll
    llvm/trunk/test/CodeGen/AMDGPU/regbank-reassign.mir
    llvm/trunk/test/CodeGen/AMDGPU/shl_add.ll
    llvm/trunk/test/CodeGen/AMDGPU/shl_or.ll
    llvm/trunk/test/CodeGen/AMDGPU/smrd.ll
    llvm/trunk/test/CodeGen/AMDGPU/sub_i1.ll
    llvm/trunk/test/CodeGen/AMDGPU/xor3.ll
    llvm/trunk/test/CodeGen/AMDGPU/xor_add.ll
    llvm/trunk/test/MC/AMDGPU/gfx10-constant-bus.s
    llvm/trunk/test/MC/Disassembler/AMDGPU/gfx10-sgpr-max.txt

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPU.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPU.td?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPU.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPU.td Thu Jun 20 08:08:34 2019
@@ -777,7 +777,7 @@ def FeatureISAVersion10_1_0 : FeatureSet
      FeatureLDSBankCount32,
      FeatureDLInsts,
      FeatureNSAEncoding,
-     FeatureWavefrontSize64,
+     FeatureWavefrontSize32,
      FeatureScalarStores,
      FeatureScalarAtomics,
      FeatureScalarFlatScratchInsts,
@@ -795,7 +795,7 @@ def FeatureISAVersion10_1_1 : FeatureSet
      FeatureDot5Insts,
      FeatureDot6Insts,
      FeatureNSAEncoding,
-     FeatureWavefrontSize64,
+     FeatureWavefrontSize32,
      FeatureScalarStores,
      FeatureScalarAtomics,
      FeatureScalarFlatScratchInsts,
@@ -812,7 +812,7 @@ def FeatureISAVersion10_1_2 : FeatureSet
      FeatureDot5Insts,
      FeatureDot6Insts,
      FeatureNSAEncoding,
-     FeatureWavefrontSize64,
+     FeatureWavefrontSize32,
      FeatureScalarStores,
      FeatureScalarAtomics,
      FeatureScalarFlatScratchInsts,

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.td?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.td Thu Jun 20 08:08:34 2019
@@ -50,19 +50,19 @@ def AMDGPUFmasOp : SDTypeProfile<1, 4,
 def AMDGPUKillSDT : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
 
 def AMDGPUIfOp : SDTypeProfile<1, 2,
-  [SDTCisVT<0, i64>, SDTCisVT<1, i1>, SDTCisVT<2, OtherVT>]
+  [SDTCisVT<0, i1>, SDTCisVT<1, i1>, SDTCisVT<2, OtherVT>]
 >;
 
 def AMDGPUElseOp : SDTypeProfile<1, 2,
-  [SDTCisVT<0, i64>, SDTCisVT<1, i64>, SDTCisVT<2, OtherVT>]
+  [SDTCisVT<0, i1>, SDTCisVT<1, i1>, SDTCisVT<2, OtherVT>]
 >;
 
 def AMDGPULoopOp : SDTypeProfile<0, 2,
-  [SDTCisVT<0, i64>, SDTCisVT<1, OtherVT>]
+  [SDTCisVT<0, i1>, SDTCisVT<1, OtherVT>]
 >;
 
 def AMDGPUIfBreakOp : SDTypeProfile<1, 2,
-  [SDTCisVT<0, i64>, SDTCisVT<1, i1>, SDTCisVT<2, i64>]
+  [SDTCisVT<0, i1>, SDTCisVT<1, i1>, SDTCisVT<2, i1>]
 >;
 
 //===----------------------------------------------------------------------===//

Modified: llvm/trunk/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp Thu Jun 20 08:08:34 2019
@@ -101,6 +101,12 @@ static DecodeStatus decodeSoppBrTarget(M
   return addOperand(Inst, MCOperand::createImm(Imm));
 }
 
+static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val,
+                                  uint64_t Addr, const void *Decoder) {
+  auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
+  return addOperand(Inst, DAsm->decodeBoolReg(Val));
+}
+
 #define DECODE_OPERAND(StaticDecoderName, DecoderName) \
 static DecodeStatus StaticDecoderName(MCInst &Inst, \
                                        unsigned Imm, \

Modified: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h Thu Jun 20 08:08:34 2019
@@ -946,6 +946,15 @@ public:
   /// not exist. If Opcode is not a pseudo instruction, this is identity.
   int pseudoToMCOpcode(int Opcode) const;
 
+  const TargetRegisterClass *getRegClass(const MCInstrDesc &TID, unsigned OpNum,
+                                         const TargetRegisterInfo *TRI,
+                                         const MachineFunction &MF)
+    const override {
+    if (OpNum >= TID.getNumOperands())
+      return nullptr;
+    return RI.getRegClass(TID.OpInfo[OpNum].RegClass);
+  }
+
   void fixImplicitOperands(MachineInstr &MI) const;
 };
 

Modified: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td Thu Jun 20 08:08:34 2019
@@ -766,6 +766,15 @@ def VOPDstS64orS32 : BoolRC {
   let PrintMethod = "printVOPDst";
 }
 
+// SCSrc_i1 is the operand for pseudo instructions only.
+// Boolean immeadiates shall not be exposed to codegen instructions.
+def SCSrc_i1 : RegisterOperand<SReg_1_XEXEC> {
+  let OperandNamespace = "AMDGPU";
+  let OperandType = "OPERAND_REG_IMM_INT32";
+  let ParserMatchClass = BoolReg;
+  let DecoderMethod = "decodeBoolReg";
+}
+
 // ===----------------------------------------------------------------------===//
 // ExpSrc* Special cases for exp src operands which are printed as
 // "off" depending on en operand.
@@ -804,11 +813,12 @@ def SDWASrc_i16 : SDWASrc<i16>;
 def SDWASrc_f32 : SDWASrc<f32>;
 def SDWASrc_f16 : SDWASrc<f16>;
 
-def SDWAVopcDst : VOPDstOperand<SReg_64> {
+def SDWAVopcDst : BoolRC {
   let OperandNamespace = "AMDGPU";
   let OperandType = "OPERAND_SDWA_VOPC_DST";
   let EncoderMethod = "getSDWAVopcDstEncoding";
   let DecoderMethod = "decodeSDWAVopcDst";
+  let PrintMethod = "printVOPDst";
 }
 
 class NamedMatchClass<string CName, bit Optional = 1> : AsmOperandClass {
@@ -940,11 +950,6 @@ def f32kimm : kimmOperand<i32>;
 def KImmFP16MatchClass : KImmMatchClass<16>;
 def f16kimm : kimmOperand<i16>;
 
-
-def VOPDstS64 : VOPDstOperand <SReg_64> {
-  let PrintMethod = "printVOPDst";
-}
-
 class FPInputModsMatchClass <int opSize> : AsmOperandClass {
   let Name = "RegOrImmWithFP"#opSize#"InputMods";
   let ParserMethod = "parseRegOrImmWithFPInputMods";
@@ -1237,7 +1242,7 @@ class getVALUDstForVT<ValueType VT> {
                           !if(!eq(VT.Size, 128), VOPDstOperand<VReg_128>,
                             !if(!eq(VT.Size, 64), VOPDstOperand<VReg_64>,
                               !if(!eq(VT.Size, 16), VOPDstOperand<VGPR_32>,
-                              VOPDstOperand<SReg_64>)))); // else VT == i1
+                              VOPDstS64orS32)))); // else VT == i1
 }
 
 // Returns the register class to use for the destination of VOP[12C]
@@ -1313,7 +1318,7 @@ class getVOP3SrcForVT<ValueType VT> {
            VSrc_f64,
            VSrc_b64),
         !if(!eq(VT.Value, i1.Value),
-           SCSrc_i1,
+           SSrc_i1,
            !if(isFP,
               !if(!eq(VT.Value, f16.Value),
                  VSrc_f16,

Modified: llvm/trunk/lib/Target/AMDGPU/SIInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstructions.td?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInstructions.td Thu Jun 20 08:08:34 2019
@@ -121,14 +121,14 @@ def WWM : PseudoInstSI <(outs unknown:$v
 
 } // End let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC]
 
-def ENTER_WWM : SPseudoInstSI <(outs SReg_64:$sdst), (ins i64imm:$src0)> {
+def ENTER_WWM : SPseudoInstSI <(outs SReg_1:$sdst), (ins i64imm:$src0)> {
   let Defs = [EXEC];
   let hasSideEffects = 0;
   let mayLoad = 0;
   let mayStore = 0;
 }
 
-def EXIT_WWM : SPseudoInstSI <(outs SReg_64:$sdst), (ins SReg_64:$src0)> {
+def EXIT_WWM : SPseudoInstSI <(outs SReg_1:$sdst), (ins SReg_1:$src0)> {
   let hasSideEffects = 0;
   let mayLoad = 0;
   let mayStore = 0;
@@ -161,11 +161,11 @@ def S_SUB_U64_PSEUDO : SPseudoInstSI <
 >;
 
 def S_ADD_U64_CO_PSEUDO : SPseudoInstSI <
-  (outs SReg_64:$vdst, VOPDstS64:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1)
+  (outs SReg_64:$vdst, VOPDstS64orS32:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1)
 >;
 
 def S_SUB_U64_CO_PSEUDO : SPseudoInstSI <
-  (outs SReg_64:$vdst, VOPDstS64:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1)
+  (outs SReg_64:$vdst, VOPDstS64orS32:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1)
 >;
 } // End usesCustomInserter = 1, Defs = [SCC]
 
@@ -233,30 +233,30 @@ let isTerminator = 1 in {
 let OtherPredicates = [EnableLateCFGStructurize] in {
  def SI_NON_UNIFORM_BRCOND_PSEUDO : CFPseudoInstSI <
   (outs),
-  (ins SReg_64:$vcc, brtarget:$target),
+  (ins SReg_1:$vcc, brtarget:$target),
   [(brcond i1:$vcc, bb:$target)]> {
     let Size = 12;
 }
 }
 
 def SI_IF: CFPseudoInstSI <
-  (outs SReg_64:$dst), (ins SReg_64:$vcc, brtarget:$target),
-  [(set i64:$dst, (AMDGPUif i1:$vcc, bb:$target))], 1, 1> {
+  (outs SReg_1:$dst), (ins SReg_1:$vcc, brtarget:$target),
+  [(set i1:$dst, (AMDGPUif i1:$vcc, bb:$target))], 1, 1> {
   let Constraints = "";
   let Size = 12;
   let hasSideEffects = 1;
 }
 
 def SI_ELSE : CFPseudoInstSI <
-  (outs SReg_64:$dst),
-  (ins SReg_64:$src, brtarget:$target, i1imm:$execfix), [], 1, 1> {
+  (outs SReg_1:$dst),
+  (ins SReg_1:$src, brtarget:$target, i1imm:$execfix), [], 1, 1> {
   let Size = 12;
   let hasSideEffects = 1;
 }
 
 def SI_LOOP : CFPseudoInstSI <
-  (outs), (ins SReg_64:$saved, brtarget:$target),
-  [(AMDGPUloop i64:$saved, bb:$target)], 1, 1> {
+  (outs), (ins SReg_1:$saved, brtarget:$target),
+  [(AMDGPUloop i1:$saved, bb:$target)], 1, 1> {
   let Size = 8;
   let isBranch = 1;
   let hasSideEffects = 1;
@@ -265,8 +265,7 @@ def SI_LOOP : CFPseudoInstSI <
 } // End isTerminator = 1
 
 def SI_END_CF : CFPseudoInstSI <
-  (outs), (ins SReg_64:$saved),
-  [(int_amdgcn_end_cf i64:$saved)], 1, 1> {
+  (outs), (ins SReg_1:$saved), [], 1, 1> {
   let Size = 4;
   let isAsCheapAsAMove = 1;
   let isReMaterializable = 1;
@@ -276,8 +275,7 @@ def SI_END_CF : CFPseudoInstSI <
 }
 
 def SI_IF_BREAK : CFPseudoInstSI <
-  (outs SReg_64:$dst), (ins SReg_64:$vcc, SReg_64:$src),
-  [(set i64:$dst, (int_amdgcn_if_break i1:$vcc, i64:$src))]> {
+  (outs SReg_1:$dst), (ins SReg_1:$vcc, SReg_1:$src), []> {
   let Size = 4;
   let isAsCheapAsAMove = 1;
   let isReMaterializable = 1;
@@ -303,7 +301,7 @@ multiclass PseudoInstKill <dag ins> {
   }
 }
 
-defm SI_KILL_I1 : PseudoInstKill <(ins SSrc_b64:$src, i1imm:$killvalue)>;
+defm SI_KILL_I1 : PseudoInstKill <(ins SCSrc_i1:$src, i1imm:$killvalue)>;
 defm SI_KILL_F32_COND_IMM : PseudoInstKill <(ins VSrc_b32:$src0, i32imm:$src1, i32imm:$cond)>;
 
 let Defs = [EXEC,VCC] in
@@ -322,7 +320,7 @@ def SI_BR_UNDEF : SPseudoInstSI <(outs),
 }
 
 def SI_PS_LIVE : PseudoInstSI <
-  (outs SReg_64:$dst), (ins),
+  (outs SReg_1:$dst), (ins),
   [(set i1:$dst, (int_amdgcn_ps_live))]> {
   let SALU = 1;
 }
@@ -584,7 +582,7 @@ def : GCNPat<
 >;
 
 def : GCNPat<
-  (AMDGPUelse i64:$src, bb:$target),
+  (AMDGPUelse i1:$src, bb:$target),
   (SI_ELSE $src, $target, 0)
 >;
 

Modified: llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.td?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.td Thu Jun 20 08:08:34 2019
@@ -733,8 +733,6 @@ def SSrcOrLds_b32 : RegisterOperand<SReg
 
 defm SCSrc : RegInlineOperand<"SReg", "SCSrc"> ;
 
-def SCSrc_i1 : RegisterOperand<SReg_64_XEXEC>;
-
 //===----------------------------------------------------------------------===//
 //  VSrc_* Operands with an SGPR, VGPR or a 32-bit immediate
 //===----------------------------------------------------------------------===//

Modified: llvm/trunk/lib/Target/AMDGPU/VOP2Instructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/VOP2Instructions.td?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/VOP2Instructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/VOP2Instructions.td Thu Jun 20 08:08:34 2019
@@ -344,7 +344,7 @@ def VOP2b_I32_I1_I32_I32 : VOPProfile<[i
   let AsmDPP8 = "$vdst, vcc, $src0, $src1 $dpp8$fi";
   let AsmDPP16 = AsmDPP#"$fi";
   let Outs32 = (outs DstRC:$vdst);
-  let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst);
+  let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst);
 }
 
 // Write out to vcc or arbitrary SGPR and read in from vcc or
@@ -358,7 +358,7 @@ def VOP2b_I32_I1_I32_I32_I1 : VOPProfile
   let AsmDPP8 = "$vdst, vcc, $src0, $src1, vcc $dpp8$fi";
   let AsmDPP16 = AsmDPP#"$fi";
   let Outs32 = (outs DstRC:$vdst);
-  let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst);
+  let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst);
 
   // Suppress src2 implied by type since the 32-bit encoding uses an
   // implicit VCC use.

Modified: llvm/trunk/lib/Target/AMDGPU/VOP3Instructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/VOP3Instructions.td?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/VOP3Instructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/VOP3Instructions.td Thu Jun 20 08:08:34 2019
@@ -183,7 +183,7 @@ class VOP3b_Profile<ValueType vt> : VOPP
   let HasModifiers = 0;
   let HasClamp = 0;
   let HasOMod = 0;
-  let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst);
+  let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst);
   let Asm64 = " $vdst, $sdst, $src0, $src1, $src2";
 }
 
@@ -203,7 +203,7 @@ def VOP3b_I64_I1_I32_I32_I64 : VOPProfil
   // FIXME: Hack to stop printing _e64
   let DstRC = RegisterOperand<VReg_64>;
 
-  let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst);
+  let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst);
   let Asm64 = " $vdst, $sdst, $src0, $src1, $src2$clamp";
 }
 

Modified: llvm/trunk/lib/Target/AMDGPU/VOPCInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/VOPCInstructions.td?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/VOPCInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/VOPCInstructions.td Thu Jun 20 08:08:34 2019
@@ -56,7 +56,7 @@ class VOPC_Profile<list<SchedReadWrite>
   let Asm32 = "$src0, $src1";
   // The destination for 32-bit encoding is implicit.
   let HasDst32 = 0;
-  let Outs64 = (outs VOPDstS64:$sdst);
+  let Outs64 = (outs VOPDstS64orS32:$sdst);
   list<SchedReadWrite> Schedule = sched;
 }
 

Modified: llvm/trunk/test/CodeGen/AMDGPU/add3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/add3.ll?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/add3.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/add3.ll Thu Jun 20 08:08:34 2019
@@ -22,6 +22,7 @@ define amdgpu_ps float @add3(i32 %a, i32
 ; GFX10-LABEL: add3:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_add3_u32 v0, v0, v1, v2
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = add i32 %a, %b
   %result = add i32 %x, %c
@@ -46,6 +47,7 @@ define amdgpu_ps float @mad_no_add3(i32
 ; GFX10-LABEL: mad_no_add3:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_mad_u32_u24 v0, v0, v1, v4
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    v_mad_u32_u24 v0, v2, v3, v0
 ; GFX10-NEXT:    ; return to shader part epilog
   %a0 = shl i32 %a, 8
@@ -85,6 +87,7 @@ define amdgpu_ps float @add3_vgpr_b(i32
 ; GFX10-LABEL: add3_vgpr_b:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_add3_u32 v0, s3, s2, v0
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = add i32 %a, %b
   %result = add i32 %x, %c
@@ -107,6 +110,7 @@ define amdgpu_ps float @add3_vgpr_all2(i
 ; GFX10-LABEL: add3_vgpr_all2:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_add3_u32 v0, v1, v2, v0
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = add i32 %b, %c
   %result = add i32 %a, %x
@@ -129,6 +133,7 @@ define amdgpu_ps float @add3_vgpr_bc(i32
 ; GFX10-LABEL: add3_vgpr_bc:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_add3_u32 v0, s2, v0, v1
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = add i32 %a, %b
   %result = add i32 %x, %c
@@ -151,6 +156,7 @@ define amdgpu_ps float @add3_vgpr_const(
 ; GFX10-LABEL: add3_vgpr_const:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_add3_u32 v0, v0, v1, 16
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = add i32 %a, %b
   %result = add i32 %x, 16
@@ -175,6 +181,7 @@ define amdgpu_ps <2 x float> @add3_multi
 ; GFX10-LABEL: add3_multiuse_outer:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_add3_u32 v0, v0, v1, v2
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    v_mul_lo_u32 v1, v0, v3
 ; GFX10-NEXT:    ; return to shader part epilog
   %inner = add i32 %a, %b
@@ -202,6 +209,7 @@ define amdgpu_ps <2 x float> @add3_multi
 ; GFX10-LABEL: add3_multiuse_inner:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_add_nc_u32_e32 v0, v0, v1
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    v_add_nc_u32_e32 v1, v0, v2
 ; GFX10-NEXT:    ; return to shader part epilog
   %inner = add i32 %a, %b
@@ -240,6 +248,7 @@ define amdgpu_ps float @add3_uniform_vgp
 ; GFX10-NEXT:    v_add_f32_e64 v1, s3, 2.0
 ; GFX10-NEXT:    v_add_f32_e64 v2, s2, 1.0
 ; GFX10-NEXT:    v_add_f32_e64 v0, 0x40400000, s4
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    v_add_nc_u32_e32 v1, v2, v1
 ; GFX10-NEXT:    v_add_nc_u32_e32 v0, v1, v0
 ; GFX10-NEXT:    ; return to shader part epilog

Modified: llvm/trunk/test/CodeGen/AMDGPU/add_i1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/add_i1.ll?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/add_i1.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/add_i1.ll Thu Jun 20 08:08:34 2019
@@ -1,8 +1,10 @@
-; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX10 %s
 
 
 ; GCN-LABEL: {{^}}add_var_var_i1:
-; GCN: s_xor_b64
+; GFX9:  s_xor_b64
+; GFX10: s_xor_b32
 define amdgpu_kernel void @add_var_var_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in0, i1 addrspace(1)* %in1) {
   %a = load volatile i1, i1 addrspace(1)* %in0
   %b = load volatile i1, i1 addrspace(1)* %in1
@@ -12,7 +14,8 @@ define amdgpu_kernel void @add_var_var_i
 }
 
 ; GCN-LABEL: {{^}}add_var_imm_i1:
-; GCN: s_not_b64
+; GFX9:  s_not_b64
+; GFX10: s_not_b32
 define amdgpu_kernel void @add_var_imm_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) {
   %a = load volatile i1, i1 addrspace(1)* %in
   %add = add i1 %a, 1
@@ -22,7 +25,8 @@ define amdgpu_kernel void @add_var_imm_i
 
 ; GCN-LABEL: {{^}}add_i1_cf:
 ; GCN: ; %endif
-; GCN: s_not_b64
+; GFX9: s_not_b64
+; GFX10: s_not_b32
 define amdgpu_kernel void @add_i1_cf(i1 addrspace(1)* %out, i1 addrspace(1)* %a, i1 addrspace(1)* %b) {
 entry:
   %tid = call i32 @llvm.amdgcn.workitem.id.x()

Modified: llvm/trunk/test/CodeGen/AMDGPU/add_shl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/add_shl.ll?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/add_shl.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/add_shl.ll Thu Jun 20 08:08:34 2019
@@ -22,6 +22,7 @@ define amdgpu_ps float @add_shl(i32 %a,
 ; GFX10-LABEL: add_shl:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_add_lshl_u32 v0, v0, v1, v2
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = add i32 %a, %b
   %result = shl i32 %x, %c
@@ -45,6 +46,7 @@ define amdgpu_ps float @add_shl_vgpr_c(i
 ; GFX10-LABEL: add_shl_vgpr_c:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_add_lshl_u32 v0, s2, s3, v0
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = add i32 %a, %b
   %result = shl i32 %x, %c
@@ -67,6 +69,7 @@ define amdgpu_ps float @add_shl_vgpr_ac(
 ; GFX10-LABEL: add_shl_vgpr_ac:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_add_lshl_u32 v0, v0, s2, v1
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = add i32 %a, %b
   %result = shl i32 %x, %c
@@ -89,6 +92,7 @@ define amdgpu_ps float @add_shl_vgpr_con
 ; GFX10-LABEL: add_shl_vgpr_const:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_add_lshl_u32 v0, v0, v1, 9
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = add i32 %a, %b
   %result = shl i32 %x, 9
@@ -112,6 +116,7 @@ define amdgpu_ps float @add_shl_vgpr_con
 ; GFX10-LABEL: add_shl_vgpr_const_inline_const:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_lshl_add_u32 v0, v0, 9, 0x7e800
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = add i32 %a, 1012
   %result = shl i32 %x, 9
@@ -138,6 +143,7 @@ define amdgpu_ps float @add_shl_vgpr_inl
 ; GFX10-LABEL: add_shl_vgpr_inline_const_x2:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_lshl_add_u32 v0, v0, 9, 0x600
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = add i32 %a, 3
   %result = shl i32 %x, 9

Modified: llvm/trunk/test/CodeGen/AMDGPU/and_or.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/and_or.ll?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/and_or.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/and_or.ll Thu Jun 20 08:08:34 2019
@@ -22,6 +22,7 @@ define amdgpu_ps float @and_or(i32 %a, i
 ; GFX10-LABEL: and_or:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_and_or_b32 v0, v0, v1, v2
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = and i32 %a, %b
   %result = or i32 %x, %c
@@ -46,6 +47,7 @@ define amdgpu_ps float @and_or_vgpr_b(i3
 ; GFX10-LABEL: and_or_vgpr_b:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_and_or_b32 v0, s2, v0, s3
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = and i32 %a, %b
   %result = or i32 %x, %c
@@ -68,6 +70,7 @@ define amdgpu_ps float @and_or_vgpr_ab(i
 ; GFX10-LABEL: and_or_vgpr_ab:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_and_or_b32 v0, v0, v1, s2
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = and i32 %a, %b
   %result = or i32 %x, %c
@@ -90,6 +93,7 @@ define amdgpu_ps float @and_or_vgpr_cons
 ; GFX10-LABEL: and_or_vgpr_const:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_and_or_b32 v0, v0, 4, v1
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = and i32 4, %a
   %result = or i32 %x, %b
@@ -113,6 +117,7 @@ define amdgpu_ps float @and_or_vgpr_cons
 ; GFX10-LABEL: and_or_vgpr_const_inline_const:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_and_or_b32 v0, v0, 20, 0x808
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = and i32 20, %a
   %result = or i32 %x, 2056
@@ -135,6 +140,7 @@ define amdgpu_ps float @and_or_vgpr_inli
 ; GFX10-LABEL: and_or_vgpr_inline_const_x2:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_and_or_b32 v0, v0, 4, 1
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = and i32 4, %a
   %result = or i32 %x, 1

Modified: llvm/trunk/test/CodeGen/AMDGPU/hsa-metadata-kernel-code-props-v3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/hsa-metadata-kernel-code-props-v3.ll?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/hsa-metadata-kernel-code-props-v3.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/hsa-metadata-kernel-code-props-v3.ll Thu Jun 20 08:08:34 2019
@@ -1,7 +1,7 @@
 ; RUN: llc -mattr=+code-object-v3 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx700 -enable-misched=0 -filetype=obj -o - < %s | llvm-readobj -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX700 --check-prefix=WAVE64 --check-prefix=NOTES %s
 ; RUN: llc -mattr=+code-object-v3 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx803 -enable-misched=0 -filetype=obj -o - < %s | llvm-readobj -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX803 --check-prefix=WAVE64 --check-prefix=NOTES %s
 ; RUN: llc -mattr=+code-object-v3 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -enable-misched=0 -filetype=obj -o - < %s | llvm-readobj -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX900 --check-prefix=WAVE64 --check-prefix=NOTES %s
-; run: llc -mattr=+code-object-v3 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -enable-misched=0 -filetype=obj -o - < %s | llvm-readobj -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX1010 --check-prefix=WAVE32 --check-prefix=NOTES %s
+; RUN: llc -mattr=+code-object-v3 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -enable-misched=0 -filetype=obj -o - < %s | llvm-readobj -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX1010 --check-prefix=WAVE32 --check-prefix=NOTES %s
 
 @var = addrspace(1) global float 0.0
 

Modified: llvm/trunk/test/CodeGen/AMDGPU/huge-private-buffer.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/huge-private-buffer.ll?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/huge-private-buffer.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/huge-private-buffer.ll Thu Jun 20 08:08:34 2019
@@ -1,9 +1,23 @@
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,WAVE64 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,WAVE32 %s
+
+; GCN-LABEL: {{^}}scratch_buffer_known_high_masklo14:
+; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 4
+; GCN: v_and_b32_e32 [[MASKED:v[0-9]+]], 0x3ffc, [[FI]]
+; GCN: {{flat|global}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MASKED]]
+define amdgpu_kernel void @scratch_buffer_known_high_masklo14() #0 {
+  %alloca = alloca i32, align 4, addrspace(5)
+  store volatile i32 0, i32 addrspace(5)* %alloca
+  %toint = ptrtoint i32 addrspace(5)* %alloca to i32
+  %masked = and i32 %toint, 16383
+  store volatile i32 %masked, i32 addrspace(1)* undef
+  ret void
+}
 
 ; GCN-LABEL: {{^}}scratch_buffer_known_high_masklo16:
 ; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 4
 ; GCN: v_and_b32_e32 [[MASKED:v[0-9]+]], 0xfffc, [[FI]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MASKED]]
+; GCN: {{flat|global}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MASKED]]
 define amdgpu_kernel void @scratch_buffer_known_high_masklo16() #0 {
   %alloca = alloca i32, align 4, addrspace(5)
   store volatile i32 0, i32 addrspace(5)* %alloca
@@ -15,8 +29,11 @@ define amdgpu_kernel void @scratch_buffe
 
 ; GCN-LABEL: {{^}}scratch_buffer_known_high_masklo17:
 ; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 4
-; GCN-NOT: [[FI]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FI]]
+; WAVE64-NOT: [[FI]]
+; WAVE64: {{flat|global}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FI]]
+
+; WAVE32: v_and_b32_e32 [[MASKED:v[0-9]+]], 0x1fffc, [[FI]]
+; WAVE32: {{flat|global}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MASKED]]
 define amdgpu_kernel void @scratch_buffer_known_high_masklo17() #0 {
   %alloca = alloca i32, align 4, addrspace(5)
   store volatile i32 0, i32 addrspace(5)* %alloca
@@ -29,7 +46,7 @@ define amdgpu_kernel void @scratch_buffe
 ; GCN-LABEL: {{^}}scratch_buffer_known_high_mask18:
 ; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 4
 ; GCN-NOT: [[FI]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FI]]
+; GCN: {{flat|global}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FI]]
 define amdgpu_kernel void @scratch_buffer_known_high_mask18() #0 {
   %alloca = alloca i32, align 4, addrspace(5)
   store volatile i32 0, i32 addrspace(5)* %alloca

Modified: llvm/trunk/test/CodeGen/AMDGPU/insert-skip-from-vcc.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/insert-skip-from-vcc.mir?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/insert-skip-from-vcc.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/insert-skip-from-vcc.mir Thu Jun 20 08:08:34 2019
@@ -1,4 +1,5 @@
 # RUN: llc -march=amdgcn -mcpu=fiji -run-pass si-insert-skips -verify-machineinstrs -o - %s | FileCheck -check-prefix=GCN %s
+# RUN: llc -march=amdgcn -mcpu=gfx1010 -run-pass si-insert-skips -verify-machineinstrs -o - %s | FileCheck -check-prefix=W32 %s
 
 ---
 # GCN-LABEL: name: and_execz_mov_vccz
@@ -318,3 +319,22 @@ body:             |
     S_CBRANCH_VCCZ %bb.1, implicit killed $vcc
     S_ENDPGM 0, implicit $scc
 ...
+---
+# W32-LABEL: name: and_execz_mov_vccz_w32
+# W32-NOT: S_MOV_
+# W32-NOT: S_AND_
+# W32: S_CBRANCH_EXECZ %bb.1, implicit $exec
+name:            and_execz_mov_vccz_w32
+body:             |
+  bb.0:
+    S_NOP 0
+
+  bb.1:
+    S_NOP 0
+
+  bb.2:
+    $sgpr0 = S_MOV_B32 -1
+    $vcc_lo = S_AND_B32 $exec_lo, killed $sgpr0, implicit-def dead $scc
+    S_CBRANCH_VCCZ %bb.1, implicit killed $vcc
+    S_ENDPGM 0
+...

Modified: llvm/trunk/test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll Thu Jun 20 08:08:34 2019
@@ -1,5 +1,6 @@
-; RUN: opt -S -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck --check-prefix=SI --check-prefix=ALL %s
-; RUN: opt -S -mcpu=tonga -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck --check-prefix=CI --check-prefix=ALL %s
+; RUN: opt -S -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck --check-prefixes=SI,SICI,ALL %s
+; RUN: opt -S -mcpu=tonga -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck --check-prefixes=CI,SICI,ALL %s
+; RUN: opt -S -mcpu=gfx1010 -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck --check-prefixes=GFX10,ALL %s
 
 ; SI-NOT: @promote_alloca_size_63.stack = internal unnamed_addr addrspace(3) global [63 x [5 x i32]] undef, align 4
 ; CI: @promote_alloca_size_63.stack = internal unnamed_addr addrspace(3) global [63 x [5 x i32]] undef, align 4
@@ -46,7 +47,8 @@ entry:
   ret void
 }
 
-; ALL: @promote_alloca_size_1600.stack = internal unnamed_addr addrspace(3) global [1600 x [5 x i32]] undef, align 4
+; SICI: @promote_alloca_size_1600.stack = internal unnamed_addr addrspace(3) global [1600 x [5 x i32]] undef, align 4
+; GFX10: alloca [5 x i32]
 
 define amdgpu_kernel void @promote_alloca_size_1600(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #2 {
 entry:
@@ -141,7 +143,9 @@ entry:
 }
 
 ; ALL-LABEL: @occupancy_6_over(
-; ALL: alloca [43 x i8]
+; SICI: alloca [43 x i8]
+; GFX10-NOT: alloca
+
 define amdgpu_kernel void @occupancy_6_over(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #5 {
 entry:
   %stack = alloca [43 x i8], align 4
@@ -191,7 +195,9 @@ entry:
 }
 
 ; ALL-LABEL: @occupancy_8_over(
-; ALL: alloca [33 x i8]
+; SICI: alloca [33 x i8]
+; GFX10-NOT: alloca
+
 define amdgpu_kernel void @occupancy_8_over(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #6 {
 entry:
   %stack = alloca [33 x i8], align 4
@@ -241,7 +247,9 @@ entry:
 }
 
 ; ALL-LABEL: @occupancy_9_over(
-; ALL: alloca [29 x i8]
+; SICI: alloca [29 x i8]
+; GFX10-NOT: alloca
+
 define amdgpu_kernel void @occupancy_9_over(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #7 {
 entry:
   %stack = alloca [29 x i8], align 4

Modified: llvm/trunk/test/CodeGen/AMDGPU/mubuf-legalize-operands.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/mubuf-legalize-operands.mir?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/mubuf-legalize-operands.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/mubuf-legalize-operands.mir Thu Jun 20 08:08:34 2019
@@ -1,6 +1,7 @@
 # RUN: llc -march=amdgcn -mcpu=gfx700 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=W64,ADDR64
 # RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=W64,W64-NO-ADDR64
 # RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=W64,W64-NO-ADDR64
+# RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=W32
 
 # Test that we correctly legalize VGPR Rsrc operands in MUBUF instructions.
 #

Modified: llvm/trunk/test/CodeGen/AMDGPU/or3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/or3.ll?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/or3.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/or3.ll Thu Jun 20 08:08:34 2019
@@ -22,6 +22,7 @@ define amdgpu_ps float @or3(i32 %a, i32
 ; GFX10-LABEL: or3:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_or3_b32 v0, v0, v1, v2
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = or i32 %a, %b
   %result = or i32 %x, %c
@@ -47,6 +48,7 @@ define amdgpu_ps float @or3_vgpr_a(i32 %
 ; GFX10-LABEL: or3_vgpr_a:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_or3_b32 v0, v0, s2, s3
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = or i32 %a, %b
   %result = or i32 %x, %c
@@ -69,6 +71,7 @@ define amdgpu_ps float @or3_vgpr_all2(i3
 ; GFX10-LABEL: or3_vgpr_all2:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_or3_b32 v0, v1, v2, v0
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = or i32 %b, %c
   %result = or i32 %a, %x
@@ -91,6 +94,7 @@ define amdgpu_ps float @or3_vgpr_bc(i32
 ; GFX10-LABEL: or3_vgpr_bc:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_or3_b32 v0, s2, v0, v1
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = or i32 %a, %b
   %result = or i32 %x, %c
@@ -113,6 +117,7 @@ define amdgpu_ps float @or3_vgpr_const(i
 ; GFX10-LABEL: or3_vgpr_const:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_or3_b32 v0, v1, v0, 64
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = or i32 64, %b
   %result = or i32 %x, %a

Modified: llvm/trunk/test/CodeGen/AMDGPU/regbank-reassign.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/regbank-reassign.mir?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/regbank-reassign.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/regbank-reassign.mir Thu Jun 20 08:08:34 2019
@@ -49,6 +49,24 @@ body: |
     S_ENDPGM 0
 ...
 
+# GCN-LABEL: s11_vs_vcc{{$}}
+# GCN: $vgpr0, $vcc_lo = V_ADDC_U32_e64 killed $sgpr14, killed $vgpr0, killed $vcc_lo, 0
+---
+name:            s11_vs_vcc
+tracksRegLiveness: true
+registers:
+  - { id: 0, class: sgpr_32, preferred-register: '$sgpr11' }
+  - { id: 1, class: vgpr_32 }
+  - { id: 2, class: vgpr_32 }
+body: |
+  bb.0:
+    %0 = IMPLICIT_DEF
+    %1 = IMPLICIT_DEF
+    $vcc_lo = IMPLICIT_DEF
+    %2, $vcc_lo = V_ADDC_U32_e64 killed %0, killed %1, killed $vcc_lo, 0, implicit $exec
+    S_ENDPGM 0
+...
+
 # GCN-LABEL: s0_vs_s16{{$}}
 # GCN: S_AND_B32 killed renamable $sgpr14, $sgpr0,
 ---

Modified: llvm/trunk/test/CodeGen/AMDGPU/shl_add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/shl_add.ll?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/shl_add.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/shl_add.ll Thu Jun 20 08:08:34 2019
@@ -22,6 +22,7 @@ define amdgpu_ps float @shl_add(i32 %a,
 ; GFX10-LABEL: shl_add:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_lshl_add_u32 v0, v0, v1, v2
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = shl i32 %a, %b
   %result = add i32 %x, %c
@@ -46,6 +47,7 @@ define amdgpu_ps float @shl_add_vgpr_a(i
 ; GFX10-LABEL: shl_add_vgpr_a:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_lshl_add_u32 v0, v0, s2, s3
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = shl i32 %a, %b
   %result = add i32 %x, %c
@@ -68,6 +70,7 @@ define amdgpu_ps float @shl_add_vgpr_all
 ; GFX10-LABEL: shl_add_vgpr_all:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_lshl_add_u32 v0, v0, v1, v2
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = shl i32 %a, %b
   %result = add i32 %x, %c
@@ -90,6 +93,7 @@ define amdgpu_ps float @shl_add_vgpr_ab(
 ; GFX10-LABEL: shl_add_vgpr_ab:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_lshl_add_u32 v0, v0, v1, s2
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = shl i32 %a, %b
   %result = add i32 %x, %c
@@ -112,6 +116,7 @@ define amdgpu_ps float @shl_add_vgpr_con
 ; GFX10-LABEL: shl_add_vgpr_const:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_lshl_add_u32 v0, v0, 3, v1
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = shl i32 %a, 3
   %result = add i32 %x, %b

Modified: llvm/trunk/test/CodeGen/AMDGPU/shl_or.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/shl_or.ll?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/shl_or.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/shl_or.ll Thu Jun 20 08:08:34 2019
@@ -22,6 +22,7 @@ define amdgpu_ps float @shl_or(i32 %a, i
 ; GFX10-LABEL: shl_or:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_lshl_or_b32 v0, v0, v1, v2
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = shl i32 %a, %b
   %result = or i32 %x, %c
@@ -45,6 +46,7 @@ define amdgpu_ps float @shl_or_vgpr_c(i3
 ; GFX10-LABEL: shl_or_vgpr_c:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_lshl_or_b32 v0, s2, s3, v0
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = shl i32 %a, %b
   %result = or i32 %x, %c
@@ -67,6 +69,7 @@ define amdgpu_ps float @shl_or_vgpr_all2
 ; GFX10-LABEL: shl_or_vgpr_all2:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_lshl_or_b32 v0, v0, v1, v2
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = shl i32 %a, %b
   %result = or i32 %c, %x
@@ -89,6 +92,7 @@ define amdgpu_ps float @shl_or_vgpr_ac(i
 ; GFX10-LABEL: shl_or_vgpr_ac:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_lshl_or_b32 v0, v0, s2, v1
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = shl i32 %a, %b
   %result = or i32 %x, %c
@@ -111,6 +115,7 @@ define amdgpu_ps float @shl_or_vgpr_cons
 ; GFX10-LABEL: shl_or_vgpr_const:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_lshl_or_b32 v0, v0, v1, 6
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = shl i32 %a, %b
   %result = or i32 %x, 6
@@ -133,6 +138,7 @@ define amdgpu_ps float @shl_or_vgpr_cons
 ; GFX10-LABEL: shl_or_vgpr_const2:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_lshl_or_b32 v0, v0, 6, v1
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = shl i32 %a, 6
   %result = or i32 %x, %b
@@ -155,6 +161,7 @@ define amdgpu_ps float @shl_or_vgpr_cons
 ; GFX10-LABEL: shl_or_vgpr_const_scalar1:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_lshl_or_b32 v0, s2, 6, v0
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = shl i32 %a, 6
   %result = or i32 %x, %b
@@ -177,6 +184,7 @@ define amdgpu_ps float @shl_or_vgpr_cons
 ; GFX10-LABEL: shl_or_vgpr_const_scalar2:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_lshl_or_b32 v0, v0, 6, s2
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = shl i32 %a, 6
   %result = or i32 %x, %b

Modified: llvm/trunk/test/CodeGen/AMDGPU/smrd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/smrd.ll?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/smrd.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/smrd.ll Thu Jun 20 08:08:34 2019
@@ -92,6 +92,7 @@ entry:
 ; GCN-DAG: s_mov_b32 s1, 1
 ; GCN-DAG: s_mov_b32 s0, 0
 ; SI-NEXT: nop 3
+; GFX10-NEXT: ; implicit-def: $vcc_hi
 ; GCN-NEXT: s_buffer_load_dword s0, s[0:3], 0x0
 define amdgpu_ps float @smrd_hazard(<4 x i32> inreg %desc) #0 {
 main_body:

Modified: llvm/trunk/test/CodeGen/AMDGPU/sub_i1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sub_i1.ll?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/sub_i1.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/sub_i1.ll Thu Jun 20 08:08:34 2019
@@ -1,8 +1,10 @@
-; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,WAVE64 %s
+; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,WAVE32 %s
 
 
 ; GCN-LABEL: {{^}}sub_var_var_i1:
-; GCN: s_xor_b64
+; WAVE32: s_xor_b32
+; WAVE64: s_xor_b64
 define amdgpu_kernel void @sub_var_var_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in0, i1 addrspace(1)* %in1) {
   %a = load volatile i1, i1 addrspace(1)* %in0
   %b = load volatile i1, i1 addrspace(1)* %in1
@@ -12,7 +14,8 @@ define amdgpu_kernel void @sub_var_var_i
 }
 
 ; GCN-LABEL: {{^}}sub_var_imm_i1:
-; GCN: s_not_b64
+; WAVE32: s_not_b32
+; WAVE64: s_not_b64
 define amdgpu_kernel void @sub_var_imm_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) {
   %a = load volatile i1, i1 addrspace(1)* %in
   %sub = sub i1 %a, 1
@@ -22,7 +25,8 @@ define amdgpu_kernel void @sub_var_imm_i
 
 ; GCN-LABEL: {{^}}sub_i1_cf:
 ; GCN: ; %endif
-; GCN: s_not_b64
+; WAVE32: s_not_b32
+; WAVE64: s_not_b64
 define amdgpu_kernel void @sub_i1_cf(i1 addrspace(1)* %out, i1 addrspace(1)* %a, i1 addrspace(1)* %b) {
 entry:
   %tid = call i32 @llvm.amdgcn.workitem.id.x()

Added: llvm/trunk/test/CodeGen/AMDGPU/wave32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/wave32.ll?rev=363934&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/wave32.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/wave32.ll Thu Jun 20 08:08:34 2019
@@ -0,0 +1,1140 @@
+; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1032 %s
+; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1064 %s
+; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-early-ifcvt=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1032 %s
+; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -amdgpu-early-ifcvt=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1064 %s
+; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1032,GFX10DEFWAVE %s
+
+; GCN-LABEL: {{^}}test_vopc_i32:
+; GFX1032: v_cmp_lt_i32_e32 vcc_lo, 0, v{{[0-9]+}}
+; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, vcc_lo
+; GFX1064: v_cmp_lt_i32_e32 vcc, 0, v{{[0-9]+}}
+; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, vcc{{$}}
+define amdgpu_kernel void @test_vopc_i32(i32 addrspace(1)* %arg) {
+  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid
+  %load = load i32, i32 addrspace(1)* %gep, align 4
+  %cmp = icmp sgt i32 %load, 0
+  %sel = select i1 %cmp, i32 1, i32 2
+  store i32 %sel, i32 addrspace(1)* %gep, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_vopc_f32:
+; GFX1032: v_cmp_nge_f32_e32 vcc_lo, 0, v{{[0-9]+}}
+; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, vcc_lo
+; GFX1064: v_cmp_nge_f32_e32 vcc, 0, v{{[0-9]+}}
+; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, vcc{{$}}
+define amdgpu_kernel void @test_vopc_f32(float addrspace(1)* %arg) {
+  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %lid
+  %load = load float, float addrspace(1)* %gep, align 4
+  %cmp = fcmp ugt float %load, 0.0
+  %sel = select i1 %cmp, float 1.0, float 2.0
+  store float %sel, float addrspace(1)* %gep, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_vopc_vcmpx:
+; GFX1032: v_cmpx_le_f32_e32 0, v{{[0-9]+}}
+; GFX1064: v_cmpx_le_f32_e32 0, v{{[0-9]+}}
+define amdgpu_ps void @test_vopc_vcmpx(float %x) {
+  %cmp = fcmp oge float %x, 0.0
+  call void @llvm.amdgcn.kill(i1 %cmp)
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_vopc_2xf16:
+; GFX1032: v_cmp_le_f16_sdwa [[SC:s[0-9]+]], {{[vs][0-9]+}}, v{{[0-9]+}} src0_sel:WORD_1 src1_sel:DWORD
+; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 0x3c003c00, v{{[0-9]+}}, [[SC]]
+; GFX1064: v_cmp_le_f16_sdwa [[SC:s\[[0-9:]+\]]], {{[vs][0-9]+}}, v{{[0-9]+}} src0_sel:WORD_1 src1_sel:DWORD
+; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 0x3c003c00, v{{[0-9]+}}, [[SC]]
+define amdgpu_kernel void @test_vopc_2xf16(<2 x half> addrspace(1)* %arg) {
+  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %arg, i32 %lid
+  %load = load <2 x half>, <2 x half> addrspace(1)* %gep, align 4
+  %elt = extractelement <2 x half> %load, i32 1
+  %cmp = fcmp ugt half %elt, 0.0
+  %sel = select i1 %cmp, <2 x half> <half 1.0, half 1.0>, <2 x half> %load
+  store <2 x half> %sel, <2 x half> addrspace(1)* %gep, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_vopc_class:
+; GFX1032: v_cmp_class_f32_e64 [[C:vcc_lo|s[0-9:]+]], s{{[0-9]+}}, 0x204
+; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]]
+; GFX1064: v_cmp_class_f32_e64 [[C:vcc|s\[[0-9:]+\]]], s{{[0-9]+}}, 0x204
+; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]]{{$}}
+define amdgpu_kernel void @test_vopc_class(i32 addrspace(1)* %out, float %x) #0 {
+  %fabs = tail call float @llvm.fabs.f32(float %x)
+  %cmp = fcmp oeq float %fabs, 0x7FF0000000000000
+  %ext = zext i1 %cmp to i32
+  store i32 %ext, i32 addrspace(1)* %out, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_vcmp_vcnd_f16:
+; GFX1032: v_cmp_neq_f16_e64 [[C:vcc_lo|s\[[0-9:]+\]]], 0x7c00, s{{[0-9]+}}
+; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c00, v{{[0-9]+}}, [[C]]
+
+; GFX1064: v_cmp_neq_f16_e64 [[C:vcc|s\[[0-9:]+\]]], 0x7c00, s{{[0-9]+}}
+; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c00, v{{[0-9]+}}, [[C]]{{$}}
+define amdgpu_kernel void @test_vcmp_vcnd_f16(half addrspace(1)* %out, half %x) #0 {
+  %cmp = fcmp oeq half %x, 0x7FF0000000000000
+  %sel = select i1 %cmp, half 1.0, half %x
+  store half %sel, half addrspace(1)* %out, align 2
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_vop3_cmp_f32_sop_and:
+; GFX1032: v_cmp_nge_f32_e32 vcc_lo, 0, v{{[0-9]+}}
+; GFX1032: v_cmp_nle_f32_e64 [[C2:s[0-9]+]], 1.0, v{{[0-9]+}}
+; GFX1032: s_and_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]]
+; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, [[AND]]
+; GFX1064: v_cmp_nge_f32_e32 vcc, 0, v{{[0-9]+}}
+; GFX1064: v_cmp_nle_f32_e64 [[C2:s\[[0-9:]+\]]], 1.0, v{{[0-9]+}}
+; GFX1064: s_and_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]]
+; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, [[AND]]
+define amdgpu_kernel void @test_vop3_cmp_f32_sop_and(float addrspace(1)* %arg) {
+  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %lid
+  %load = load float, float addrspace(1)* %gep, align 4
+  %cmp = fcmp ugt float %load, 0.0
+  %cmp2 = fcmp ult float %load, 1.0
+  %and = and i1 %cmp, %cmp2
+  %sel = select i1 %and, float 1.0, float 2.0
+  store float %sel, float addrspace(1)* %gep, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_vop3_cmp_i32_sop_xor:
+; GFX1032: v_cmp_lt_i32_e32 vcc_lo, 0, v{{[0-9]+}}
+; GFX1032: v_cmp_gt_i32_e64 [[C2:s[0-9]+]], 1, v{{[0-9]+}}
+; GFX1032: s_xor_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]]
+; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]]
+; GFX1064: v_cmp_lt_i32_e32 vcc, 0, v{{[0-9]+}}
+; GFX1064: v_cmp_gt_i32_e64 [[C2:s\[[0-9:]+\]]], 1, v{{[0-9]+}}
+; GFX1064: s_xor_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]]
+; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]]
+define amdgpu_kernel void @test_vop3_cmp_i32_sop_xor(i32 addrspace(1)* %arg) {
+  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid
+  %load = load i32, i32 addrspace(1)* %gep, align 4
+  %cmp = icmp sgt i32 %load, 0
+  %cmp2 = icmp slt i32 %load, 1
+  %xor = xor i1 %cmp, %cmp2
+  %sel = select i1 %xor, i32 1, i32 2
+  store i32 %sel, i32 addrspace(1)* %gep, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_vop3_cmp_u32_sop_or:
+; GFX1032: v_cmp_lt_u32_e32 vcc_lo, 3, v{{[0-9]+}}
+; GFX1032: v_cmp_gt_u32_e64 [[C2:s[0-9]+]], 2, v{{[0-9]+}}
+; GFX1032: s_or_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]]
+; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]]
+; GFX1064: v_cmp_lt_u32_e32 vcc, 3, v{{[0-9]+}}
+; GFX1064: v_cmp_gt_u32_e64 [[C2:s\[[0-9:]+\]]], 2, v{{[0-9]+}}
+; GFX1064: s_or_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]]
+; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]]
+define amdgpu_kernel void @test_vop3_cmp_u32_sop_or(i32 addrspace(1)* %arg) {
+  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid
+  %load = load i32, i32 addrspace(1)* %gep, align 4
+  %cmp = icmp ugt i32 %load, 3
+  %cmp2 = icmp ult i32 %load, 2
+  %or = or i1 %cmp, %cmp2
+  %sel = select i1 %or, i32 1, i32 2
+  store i32 %sel, i32 addrspace(1)* %gep, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_mask_if:
+; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo
+; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}}
+; GCN: ; mask branch
+define amdgpu_kernel void @test_mask_if(i32 addrspace(1)* %arg) #0 {
+  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %cmp = icmp ugt i32 %lid, 10
+  br i1 %cmp, label %if, label %endif
+
+if:
+  store i32 0, i32 addrspace(1)* %arg, align 4
+  br label %endif
+
+endif:
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_loop_with_if:
+; GFX1032: s_or_b32 s{{[0-9]+}}, vcc_lo, s{{[0-9]+}}
+; GFX1032: s_andn2_b32 exec_lo, exec_lo, s{{[0-9]+}}
+; GFX1064: s_or_b64 s[{{[0-9:]+}}], vcc, s[{{[0-9:]+}}]
+; GFX1064: s_andn2_b64 exec, exec, s[{{[0-9:]+}}]
+; GCN:     s_cbranch_execz
+; GCN:   BB{{.*}}:
+; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo
+; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}}
+; GCN:     s_cbranch_execz
+; GCN:   BB{{.*}}:
+; GCN:   BB{{.*}}:
+; GFX1032: s_xor_b32 s{{[0-9]+}}, exec_lo, s{{[0-9]+}}
+; GFX1064: s_xor_b64 s[{{[0-9:]+}}], exec, s[{{[0-9:]+}}]
+; GCN:     ; mask branch BB
+; GCN:   BB{{.*}}:
+; GCN:   BB{{.*}}:
+; GFX1032: s_or_b32 exec_lo, exec_lo, s{{[0-9]+}}
+; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, s{{[0-9]+}}
+; GFX1064: s_or_b64 exec, exec, s[{{[0-9:]+}}]
+; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}]{{$}}
+; GCN:     ; mask branch BB
+; GCN:   BB{{.*}}:
+; GCN:   BB{{.*}}:
+; GCN:     s_endpgm
+define amdgpu_kernel void @test_loop_with_if(i32 addrspace(1)* %arg) #0 {
+bb:
+  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb2
+
+bb1:
+  ret void
+
+bb2:
+  %tmp3 = phi i32 [ 0, %bb ], [ %tmp15, %bb13 ]
+  %tmp4 = icmp slt i32 %tmp3, %tmp
+  br i1 %tmp4, label %bb5, label %bb11
+
+bb5:
+  %tmp6 = sext i32 %tmp3 to i64
+  %tmp7 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp6
+  %tmp8 = load i32, i32 addrspace(1)* %tmp7, align 4
+  %tmp9 = icmp sgt i32 %tmp8, 10
+  br i1 %tmp9, label %bb10, label %bb11
+
+bb10:
+  store i32 %tmp, i32 addrspace(1)* %tmp7, align 4
+  br label %bb13
+
+bb11:
+  %tmp12 = sdiv i32 %tmp3, 2
+  br label %bb13
+
+bb13:
+  %tmp14 = phi i32 [ %tmp3, %bb10 ], [ %tmp12, %bb11 ]
+  %tmp15 = add nsw i32 %tmp14, 1
+  %tmp16 = icmp slt i32 %tmp14, 255
+  br i1 %tmp16, label %bb2, label %bb1
+}
+
+; GCN-LABEL: {{^}}test_loop_with_if_else_break:
+; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo
+; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}}
+; GCN:     ; mask branch
+; GCN:     s_cbranch_execz
+; GCN:   BB{{.*}}:
+; GCN:   BB{{.*}}:
+; GFX1032: s_andn2_b32 s{{[0-9]+}}, s{{[0-9]+}}, exec_lo
+; GFX1064: s_andn2_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], exec
+; GFX1032: s_or_b32 s{{[0-9]+}}, vcc_lo, s{{[0-9]+}}
+; GFX1032: s_or_b32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
+; GFX1064: s_or_b64 s[{{[0-9:]+}}], vcc, s[{{[0-9:]+}}]
+; GFX1064: s_or_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], s[{{[0-9:]+}}]
+; GCN:     s_cbranch_execz
+; GCN:   BB{{.*}}:
+define amdgpu_kernel void @test_loop_with_if_else_break(i32 addrspace(1)* %arg) #0 {
+bb:
+  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %tmp1 = icmp eq i32 %tmp, 0
+  br i1 %tmp1, label %.loopexit, label %.preheader
+
+.preheader:
+  br label %bb2
+
+bb2:
+  %tmp3 = phi i32 [ %tmp9, %bb8 ], [ 0, %.preheader ]
+  %tmp4 = zext i32 %tmp3 to i64
+  %tmp5 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp4
+  %tmp6 = load i32, i32 addrspace(1)* %tmp5, align 4
+  %tmp7 = icmp sgt i32 %tmp6, 10
+  br i1 %tmp7, label %bb8, label %.loopexit
+
+bb8:
+  store i32 %tmp, i32 addrspace(1)* %tmp5, align 4
+  %tmp9 = add nuw nsw i32 %tmp3, 1
+  %tmp10 = icmp ult i32 %tmp9, 256
+  %tmp11 = icmp ult i32 %tmp9, %tmp
+  %tmp12 = and i1 %tmp10, %tmp11
+  br i1 %tmp12, label %bb2, label %.loopexit
+
+.loopexit:
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_addc_vop2b:
+; GFX1032: v_add_co_u32_e64 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, s{{[0-9]+}}
+; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}, vcc_lo
+; GFX1064: v_add_co_u32_e64 v{{[0-9]+}}, vcc, v{{[0-9]+}}, s{{[0-9]+}}
+; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}, vcc{{$}}
+define amdgpu_kernel void @test_addc_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 {
+bb:
+  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp
+  %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8
+  %tmp5 = add nsw i64 %tmp4, %arg1
+  store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_subbrev_vop2b:
+; GFX1032: v_sub_co_u32_e64 v{{[0-9]+}}, [[A0:s[0-9]+|vcc_lo]], v{{[0-9]+}}, s{{[0-9]+}}{{$}}
+; GFX1032: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, {{[vs][0-9]+}}, {{[vs][0-9]+}}, [[A0]]{{$}}
+; GFX1064: v_sub_co_u32_e64 v{{[0-9]+}}, [[A0:s\[[0-9:]+\]|vcc]], v{{[0-9]+}}, s{{[0-9]+}}{{$}}
+; GFX1064: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, {{[vs][0-9]+}}, [[A0]]{{$}}
+define amdgpu_kernel void @test_subbrev_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 {
+bb:
+  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp
+  %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8
+  %tmp5 = sub nsw i64 %tmp4, %arg1
+  store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_subb_vop2b:
+; GFX1032: v_sub_co_u32_e64 v{{[0-9]+}}, [[A0:s[0-9]+|vcc_lo]], s{{[0-9]+}}, v{{[0-9]+}}{{$}}
+; GFX1032: v_sub_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, {{[vs][0-9]+}}, v{{[0-9]+}}, [[A0]]{{$}}
+; GFX1064: v_sub_co_u32_e64 v{{[0-9]+}}, [[A0:s\[[0-9:]+\]|vcc]], s{{[0-9]+}}, v{{[0-9]+}}{{$}}
+; GFX1064: v_sub_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, v{{[0-9]+}}, [[A0]]{{$}}
+define amdgpu_kernel void @test_subb_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 {
+bb:
+  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp
+  %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8
+  %tmp5 = sub nsw i64 %arg1, %tmp4
+  store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_udiv64:
+; GFX1032: v_add_co_u32_e64 v{{[0-9]+}}, [[SDST:s[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, 0, v{{[0-9]+}}, vcc_lo
+; GFX1032: v_add_co_ci_u32_e64 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}}, [[SDST]]
+; GFX1032: v_add_co_u32_e64 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}}
+; GFX1032: v_add_co_u32_e64 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}}
+; GFX1032: v_add_co_u32_e64 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}}
+; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, 0, v{{[0-9]+}}, vcc_lo
+; GFX1032: v_sub_co_u32_e64 v{{[0-9]+}}, vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}
+; GFX1032: v_sub_co_ci_u32_e64 v{{[0-9]+}}, s{{[0-9]+}}, {{[vs][0-9]+}}, v{{[0-9]+}}, vcc_lo
+; GFX1032: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, {{[vs][0-9]+}}, v{{[0-9]+}}, vcc_lo
+; GFX1064: v_add_co_u32_e64 v{{[0-9]+}}, [[SDST:s\[[0-9:]+\]]], v{{[0-9]+}}, v{{[0-9]+}}
+; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}}
+; GFX1064: v_add_co_ci_u32_e64 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}, [[SDST]]
+; GFX1064: v_add_co_u32_e64 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
+; GFX1064: v_add_co_u32_e64 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
+; GFX1064: v_add_co_u32_e64 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
+; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}}
+; GFX1064: v_sub_co_u32_e64 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
+; GFX1064: v_sub_co_ci_u32_e64 v{{[0-9]+}}, s[{{[0-9:]+}}], {{[vs][0-9]+}}, v{{[0-9]+}}, vcc{{$}}
+; GFX1064: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, v{{[0-9]+}}, vcc{{$}}
+define amdgpu_kernel void @test_udiv64(i64 addrspace(1)* %arg) #0 {
+bb:
+  %tmp = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 1
+  %tmp1 = load i64, i64 addrspace(1)* %tmp, align 8
+  %tmp2 = load i64, i64 addrspace(1)* %arg, align 8
+  %tmp3 = udiv i64 %tmp1, %tmp2
+  %tmp4 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 2
+  store i64 %tmp3, i64 addrspace(1)* %tmp4, align 8
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_div_scale_f32:
+; GFX1032: v_div_scale_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GFX1064: v_div_scale_f32 v{{[0-9]+}}, s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @test_div_scale_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
+  %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+  %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+
+  %a = load volatile float, float addrspace(1)* %gep.0, align 4
+  %b = load volatile float, float addrspace(1)* %gep.1, align 4
+
+  %result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 false) nounwind readnone
+  %result0 = extractvalue { float, i1 } %result, 0
+  store float %result0, float addrspace(1)* %out, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_div_scale_f64:
+; GFX1032: v_div_scale_f64 v[{{[0-9:]+}}], s{{[0-9]+}}, v[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}]
+; GFX1064: v_div_scale_f64 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}]
+define amdgpu_kernel void @test_div_scale_f64(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %in) #0 {
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
+  %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+  %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
+
+  %a = load volatile double, double addrspace(1)* %gep.0, align 8
+  %b = load volatile double, double addrspace(1)* %gep.1, align 8
+
+  %result = call { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 true) nounwind readnone
+  %result0 = extractvalue { double, i1 } %result, 0
+  store double %result0, double addrspace(1)* %out, align 8
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_mad_i64_i32:
+; GFX1032: v_mad_i64_i32 v[{{[0-9:]+}}], s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}]
+; GFX1064: v_mad_i64_i32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}]
+define i64 @test_mad_i64_i32(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
+  %sext0 = sext i32 %arg0 to i64
+  %sext1 = sext i32 %arg1 to i64
+  %mul = mul i64 %sext0, %sext1
+  %mad = add i64 %mul, %arg2
+  ret i64 %mad
+}
+
+; GCN-LABEL: {{^}}test_mad_u64_u32:
+; GFX1032: v_mad_u64_u32 v[{{[0-9:]+}}], s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}]
+; GFX1064: v_mad_u64_u32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}]
+define i64 @test_mad_u64_u32(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
+  %sext0 = zext i32 %arg0 to i64
+  %sext1 = zext i32 %arg1 to i64
+  %mul = mul i64 %sext0, %sext1
+  %mad = add i64 %mul, %arg2
+  ret i64 %mad
+}
+
+; GCN-LABEL: {{^}}test_div_fmas_f32:
+; GFX1032: v_cmp_eq_u32_e64 vcc_lo,
+; GFX1064: v_cmp_eq_u32_e64 vcc,
+; GCN:     v_div_fmas_f32 v{{[0-9]+}}, {{[vs][0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @test_div_fmas_f32(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind {
+  %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %d) nounwind readnone
+  store float %result, float addrspace(1)* %out, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_div_fmas_f64:
+; GFX1032: v_cmp_eq_u32_e64 vcc_lo,
+; GFX1064: v_cmp_eq_u32_e64 vcc,
+; GCN-DAG: v_div_fmas_f64 v[{{[0-9:]+}}], {{[vs]}}[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}]
+define amdgpu_kernel void @test_div_fmas_f64(double addrspace(1)* %out, double %a, double %b, double %c, i1 %d) nounwind {
+  %result = call double @llvm.amdgcn.div.fmas.f64(double %a, double %b, double %c, i1 %d) nounwind readnone
+  store double %result, double addrspace(1)* %out, align 8
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_div_fmas_f32_i1_phi_vcc:
+; GFX1032: s_mov_b32 [[VCC:vcc_lo]], 0{{$}}
+; GFX1064: s_mov_b64 [[VCC:vcc]], 0{{$}}
+; GFX1032: s_and_saveexec_b32 [[SAVE:s[0-9]+]], s{{[0-9]+}}{{$}}
+; GFX1064: s_and_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], s[{{[0-9:]+}}]{{$}}
+
+; GCN: load_dword [[LOAD:v[0-9]+]]
+; GCN: v_cmp_ne_u32_e32 [[VCC]], 0, [[LOAD]]
+
+; GCN: BB{{[0-9_]+}}:
+; GFX1032: s_or_b32 exec_lo, exec_lo, [[SAVE]]
+; GFX1064: s_or_b64 exec, exec, [[SAVE]]
+; GCN: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+define amdgpu_kernel void @test_div_fmas_f32_i1_phi_vcc(float addrspace(1)* %out, float addrspace(1)* %in, i32 addrspace(1)* %dummy) #0 {
+entry:
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
+  %gep.out = getelementptr float, float addrspace(1)* %out, i32 2
+  %gep.a = getelementptr float, float addrspace(1)* %in, i32 %tid
+  %gep.b = getelementptr float, float addrspace(1)* %gep.a, i32 1
+  %gep.c = getelementptr float, float addrspace(1)* %gep.a, i32 2
+
+  %a = load float, float addrspace(1)* %gep.a
+  %b = load float, float addrspace(1)* %gep.b
+  %c = load float, float addrspace(1)* %gep.c
+
+  %cmp0 = icmp eq i32 %tid, 0
+  br i1 %cmp0, label %bb, label %exit
+
+bb:
+  %val = load volatile i32, i32 addrspace(1)* %dummy
+  %cmp1 = icmp ne i32 %val, 0
+  br label %exit
+
+exit:
+  %cond = phi i1 [false, %entry], [%cmp1, %bb]
+  %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %cond) nounwind readnone
+  store float %result, float addrspace(1)* %gep.out, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}fdiv_f32:
+; GFC1032: v_div_scale_f32 v{{[0-9]+}}, vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
+; GFC1064: v_div_scale_f32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
+; GCN: v_rcp_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NOT: vcc
+; GCN: v_div_fmas_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @fdiv_f32(float addrspace(1)* %out, float %a, float %b) #0 {
+entry:
+  %fdiv = fdiv float %a, %b
+  store float %fdiv, float addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_br_cc_f16:
+; GFX1032:      v_cmp_nlt_f16_e32 vcc_lo,
+; GFX1032-NEXT: s_and_b32 vcc_lo, exec_lo, vcc_lo
+; GFX1064:      v_cmp_nlt_f16_e32 vcc,
+; GFX1064-NEXT: s_and_b64 vcc, exec, vcc{{$}}
+; GCN-NEXT: s_cbranch_vccnz
+define amdgpu_kernel void @test_br_cc_f16(
+    half addrspace(1)* %r,
+    half addrspace(1)* %a,
+    half addrspace(1)* %b) {
+entry:
+  %a.val = load half, half addrspace(1)* %a
+  %b.val = load half, half addrspace(1)* %b
+  %fcmp = fcmp olt half %a.val, %b.val
+  br i1 %fcmp, label %one, label %two
+
+one:
+  store half %a.val, half addrspace(1)* %r
+  ret void
+
+two:
+  store half %b.val, half addrspace(1)* %r
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_brcc_i1:
+; GCN:      s_cmp_eq_u32 s{{[0-9]+}}, 0
+; GCN-NEXT: s_cbranch_scc1
+define amdgpu_kernel void @test_brcc_i1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i1 %val) #0 {
+  %cmp0 = icmp ne i1 %val, 0
+  br i1 %cmp0, label %store, label %end
+
+store:
+  store i32 222, i32 addrspace(1)* %out
+  ret void
+
+end:
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_preserve_condition_undef_flag:
+; GFX1032: v_cmp_nlt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 1.0
+; GFX1032: v_cmp_ngt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 0
+; GFX1032: v_cmp_nlt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 1.0
+; GFX1032: s_or_b32 [[OR1:s[0-9]+]], s{{[0-9]+}}, s{{[0-9]+}}
+; GFX1032: s_or_b32 [[OR2:s[0-9]+]], [[OR1]], s{{[0-9]+}}
+; GFX1032: s_and_b32 vcc_lo, exec_lo, [[OR2]]
+; GFX1064: v_cmp_nlt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 1.0
+; GFX1064: v_cmp_ngt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 0
+; GFX1064: v_cmp_nlt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 1.0
+; GFX1064: s_or_b64 [[OR1:s\[[0-9:]+\]]], s[{{[0-9:]+}}], s[{{[0-9:]+}}]
+; GFX1064: s_or_b64 [[OR2:s\[[0-9:]+\]]], [[OR1]], s[{{[0-9:]+}}]
+; GFX1064: s_and_b64 vcc, exec, [[OR2]]
+; GCN:     s_cbranch_vccnz
+define amdgpu_kernel void @test_preserve_condition_undef_flag(float %arg, i32 %arg1, float %arg2) #0 {
+bb0:
+  %tmp = icmp sgt i32 %arg1, 4
+  %undef = call i1 @llvm.amdgcn.class.f32(float undef, i32 undef)
+  %tmp4 = select i1 %undef, float %arg, float 1.000000e+00
+  %tmp5 = fcmp ogt float %arg2, 0.000000e+00
+  %tmp6 = fcmp olt float %arg2, 1.000000e+00
+  %tmp7 = fcmp olt float %arg, %tmp4
+  %tmp8 = and i1 %tmp5, %tmp6
+  %tmp9 = and i1 %tmp8, %tmp7
+  br i1 %tmp9, label %bb1, label %bb2
+
+bb1:
+  store volatile i32 0, i32 addrspace(1)* undef
+  br label %bb2
+
+bb2:
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_invert_true_phi_cond_break_loop:
+; GFX1032: s_xor_b32 s{{[0-9]+}}, s{{[0-9]+}}, -1
+; GFX1032: s_or_b32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
+; GFX1064: s_xor_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], -1
+; GFX1064: s_or_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], s[{{[0-9:]+}}]
+define amdgpu_kernel void @test_invert_true_phi_cond_break_loop(i32 %arg) #0 {
+bb:
+  %id = call i32 @llvm.amdgcn.workitem.id.x()
+  %tmp = sub i32 %id, %arg
+  br label %bb1
+
+bb1:                                              ; preds = %Flow, %bb
+  %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
+  %lsr.iv.next = add i32 %lsr.iv, 1
+  %cmp0 = icmp slt i32 %lsr.iv.next, 0
+  br i1 %cmp0, label %bb4, label %Flow
+
+bb4:                                              ; preds = %bb1
+  %load = load volatile i32, i32 addrspace(1)* undef, align 4
+  %cmp1 = icmp sge i32 %tmp, %load
+  br label %Flow
+
+Flow:                                             ; preds = %bb4, %bb1
+  %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
+  %tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ]
+  br i1 %tmp3, label %bb1, label %bb9
+
+bb9:                                              ; preds = %Flow
+  store volatile i32 7, i32 addrspace(3)* undef
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_movrels_extract_neg_offset_vgpr:
+; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 1, v{{[0-9]+}}
+; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc_lo
+; GFX1032: v_cmp_ne_u32_e32 vcc_lo, 2, v{{[0-9]+}}
+; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 2, v{{[0-9]+}}, vcc_lo
+; GFX1032: v_cmp_ne_u32_e32 vcc_lo, 3, v{{[0-9]+}}
+; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}, vcc_lo
+; GFX1064: v_cmp_eq_u32_e32 vcc, 1, v{{[0-9]+}}
+; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
+; GFX1064: v_cmp_ne_u32_e32 vcc, 2, v{{[0-9]+}}
+; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 2, v{{[0-9]+}}, vcc
+; GFX1064: v_cmp_ne_u32_e32 vcc, 3, v{{[0-9]+}}
+; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}, vcc
+define amdgpu_kernel void @test_movrels_extract_neg_offset_vgpr(i32 addrspace(1)* %out) #0 {
+entry:
+  %id = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %index = add i32 %id, -512
+  %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
+  store i32 %value, i32 addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_set_inactive:
+; GFX1032: s_not_b32 exec_lo, exec_lo
+; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 42
+; GFX1032: s_not_b32 exec_lo, exec_lo
+; GFX1064: s_not_b64 exec, exec{{$}}
+; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 42
+; GFX1064: s_not_b64 exec, exec{{$}}
+define amdgpu_kernel void @test_set_inactive(i32 addrspace(1)* %out, i32 %in) #0 {
+  %tmp = call i32 @llvm.amdgcn.set.inactive.i32(i32 %in, i32 42)
+  store i32 %tmp, i32 addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_set_inactive_64:
+; GFX1032: s_not_b32 exec_lo, exec_lo
+; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 0
+; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 0
+; GFX1032: s_not_b32 exec_lo, exec_lo
+; GFX1064: s_not_b64 exec, exec{{$}}
+; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 0
+; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 0
+; GFX1064: s_not_b64 exec, exec{{$}}
+define amdgpu_kernel void @test_set_inactive_64(i64 addrspace(1)* %out, i64 %in) #0 {
+  %tmp = call i64 @llvm.amdgcn.set.inactive.i64(i64 %in, i64 0)
+  store i64 %tmp, i64 addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_kill_i1_terminator_float:
+; GFX1032: s_mov_b32 exec_lo, 0
+; GFX1064: s_mov_b64 exec, 0
+define amdgpu_ps void @test_kill_i1_terminator_float() #0 {
+  call void @llvm.amdgcn.kill(i1 false)
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_kill_i1_terminator_i1:
+; GFX1032: s_or_b32 [[OR:s[0-9]+]],
+; GFX1032: s_and_b32 exec_lo, exec_lo, [[OR]]
+; GFX1064: s_or_b64 [[OR:s\[[0-9:]+\]]],
+; GFX1064: s_and_b64 exec, exec, [[OR]]
+define amdgpu_gs void @test_kill_i1_terminator_i1(i32 %a, i32 %b, i32 %c, i32 %d) #0 {
+  %c1 = icmp slt i32 %a, %b
+  %c2 = icmp slt i32 %c, %d
+  %x = or i1 %c1, %c2
+  call void @llvm.amdgcn.kill(i1 %x)
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_loop_vcc:
+; GFX1032: v_cmp_lt_f32_e32 vcc_lo,
+; GFX1064: v_cmp_lt_f32_e32 vcc,
+; GCN: s_cbranch_vccnz
+define amdgpu_ps <4 x float> @test_loop_vcc(<4 x float> %in) #0 {
+entry:
+  br label %loop
+
+loop:
+  %ctr.iv = phi float [ 0.0, %entry ], [ %ctr.next, %body ]
+  %c.iv = phi <4 x float> [ %in, %entry ], [ %c.next, %body ]
+  %cc = fcmp ogt float %ctr.iv, 7.0
+  br i1 %cc, label %break, label %body
+
+body:
+  %c.iv0 = extractelement <4 x float> %c.iv, i32 0
+  %c.next = call <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32 15, float %c.iv0, <8 x i32> undef, <4 x i32> undef, i1 0, i32 0, i32 0)
+  %ctr.next = fadd float %ctr.iv, 2.0
+  br label %loop
+
+break:
+  ret <4 x float> %c.iv
+}
+
+; GCN-LABEL: {{^}}test_wwm1:
+; GFX1032: s_or_saveexec_b32 [[SAVE:s[0-9]+]], -1
+; GFX1032: s_mov_b32 exec_lo, [[SAVE]]
+; GFX1064: s_or_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], -1
+; GFX1064: s_mov_b64 exec, [[SAVE]]
+define amdgpu_ps float @test_wwm1(i32 inreg %idx0, i32 inreg %idx1, float %src0, float %src1) {
+main_body:
+  %out = fadd float %src0, %src1
+  %out.0 = call float @llvm.amdgcn.wwm.f32(float %out)
+  ret float %out.0
+}
+
+; GCN-LABEL: {{^}}test_wwm2:
+; GFX1032: v_cmp_gt_u32_e32 vcc_lo, 32, v{{[0-9]+}}
+; GFX1032: s_and_saveexec_b32 [[SAVE1:s[0-9]+]], vcc_lo
+; GFX1032: s_or_saveexec_b32 [[SAVE2:s[0-9]+]], -1
+; GFX1032: s_mov_b32 exec_lo, [[SAVE2]]
+; GFX1032: s_or_b32 exec_lo, exec_lo, [[SAVE1]]
+; GFX1064: v_cmp_gt_u32_e32 vcc, 32, v{{[0-9]+}}
+; GFX1064: s_and_saveexec_b64 [[SAVE1:s\[[0-9:]+\]]], vcc{{$}}
+; GFX1064: s_or_saveexec_b64 [[SAVE2:s\[[0-9:]+\]]], -1
+; GFX1064: s_mov_b64 exec, [[SAVE2]]
+; GFX1064: s_or_b64 exec, exec, [[SAVE1]]
+define amdgpu_ps float @test_wwm2(i32 inreg %idx) {
+main_body:
+  ; use mbcnt to make sure the branch is divergent
+  %lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  %hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo)
+  %cc = icmp uge i32 %hi, 32
+  br i1 %cc, label %endif, label %if
+
+if:
+  %src = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> undef, i32 %idx, i32 0, i1 0, i1 0)
+  %out = fadd float %src, %src
+  %out.0 = call float @llvm.amdgcn.wwm.f32(float %out)
+  %out.1 = fadd float %src, %out.0
+  br label %endif
+
+endif:
+  %out.2 = phi float [ %out.1, %if ], [ 0.0, %main_body ]
+  ret float %out.2
+}
+
+; GCN-LABEL: {{^}}test_wqm1:
+; GFX1032: s_mov_b32 [[ORIG:s[0-9]+]], exec_lo
+; GFX1032: s_wqm_b32 exec_lo, exec_lo
+; GFX1032: s_and_b32 exec_lo, exec_lo, [[ORIG]]
+; GFX1064: s_mov_b64 [[ORIG:s\[[0-9]+:[0-9]+\]]], exec{{$}}
+; GFX1064: s_wqm_b64 exec, exec{{$}}
+; GFX1064: s_and_b64 exec, exec, [[ORIG]]
+define amdgpu_ps <4 x float> @test_wqm1(i32 inreg, i32 inreg, i32 inreg, i32 inreg %m0, <8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, <2 x float> %pos) #0 {
+main_body:
+  %inst23 = extractelement <2 x float> %pos, i32 0
+  %inst24 = extractelement <2 x float> %pos, i32 1
+  %inst25 = tail call float @llvm.amdgcn.interp.p1(float %inst23, i32 0, i32 0, i32 %m0)
+  %inst26 = tail call float @llvm.amdgcn.interp.p2(float %inst25, float %inst24, i32 0, i32 0, i32 %m0)
+  %inst28 = tail call float @llvm.amdgcn.interp.p1(float %inst23, i32 1, i32 0, i32 %m0)
+  %inst29 = tail call float @llvm.amdgcn.interp.p2(float %inst28, float %inst24, i32 1, i32 0, i32 %m0)
+  %tex = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float %inst26, float %inst29, <8 x i32> %rsrc, <4 x i32> %sampler, i1 0, i32 0, i32 0)
+  ret <4 x float> %tex
+}
+
+; GCN-LABEL: {{^}}test_wqm2:
+; GFX1032: s_wqm_b32 exec_lo, exec_lo
+; GFX1032: s_and_b32 exec_lo, exec_lo, s{{[0-9+]}}
+; GFX1064: s_wqm_b64 exec, exec{{$}}
+; GFX1064: s_and_b64 exec, exec, s[{{[0-9:]+}}]
+define amdgpu_ps float @test_wqm2(i32 inreg %idx0, i32 inreg %idx1) #0 {
+main_body:
+  %src0 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> undef, i32 %idx0, i32 0, i1 0, i1 0)
+  %src1 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> undef, i32 %idx1, i32 0, i1 0, i1 0)
+  %out = fadd float %src0, %src1
+  %out.0 = bitcast float %out to i32
+  %out.1 = call i32 @llvm.amdgcn.wqm.i32(i32 %out.0)
+  %out.2 = bitcast i32 %out.1 to float
+  ret float %out.2
+}
+
+; GCN-LABEL: {{^}}test_intr_fcmp_i64:
+; GFX1032-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], 0{{$}}
+; GFX1032-DAG: v_cmp_eq_f32_e64 s[[C_LO:[0-9]+]], {{s[0-9]+}}, |{{[vs][0-9]+}}|
+; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]
+; GFX1064:     v_cmp_eq_f32_e64 s{{\[}}[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], {{s[0-9]+}}, |{{[vs][0-9]+}}|
+; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]
+; GFX1064-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[C_HI]]
+; GCN:         store_dwordx2 v[{{[0-9:]+}}], v{{\[}}[[V_LO]]:[[V_HI]]],
+define amdgpu_kernel void @test_intr_fcmp_i64(i64 addrspace(1)* %out, float %src, float %a) {
+  %temp = call float @llvm.fabs.f32(float %a)
+  %result = call i64 @llvm.amdgcn.fcmp.i64.f32(float %src, float %temp, i32 1)
+  store i64 %result, i64 addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_intr_icmp_i64:
+; GFX1032-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], 0{{$}}
+; GFX1032-DAG: v_cmp_eq_u32_e64 [[C_LO:vcc_lo|s[0-9]+]], 0x64, {{s[0-9]+}}
+; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], [[C_LO]]
+; GFX1064:     v_cmp_eq_u32_e64 s{{\[}}[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], 0x64, {{s[0-9]+}}
+; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]
+; GFX1064-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[C_HI]]
+; GCN:         store_dwordx2 v[{{[0-9:]+}}], v{{\[}}[[V_LO]]:[[V_HI]]],
+define amdgpu_kernel void @test_intr_icmp_i64(i64 addrspace(1)* %out, i32 %src) {
+  %result = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %src, i32 100, i32 32)
+  store i64 %result, i64 addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_intr_fcmp_i32:
+; GFX1032-DAG: v_cmp_eq_f32_e64 s[[C_LO:[0-9]+]], {{s[0-9]+}}, |{{[vs][0-9]+}}|
+; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]
+; GFX1064:     v_cmp_eq_f32_e64 s{{\[}}[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], {{s[0-9]+}}, |{{[vs][0-9]+}}|
+; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]
+; GCN:         store_dword v[{{[0-9:]+}}], v[[V_LO]],
+define amdgpu_kernel void @test_intr_fcmp_i32(i32 addrspace(1)* %out, float %src, float %a) {
+  %temp = call float @llvm.fabs.f32(float %a)
+  %result = call i32 @llvm.amdgcn.fcmp.i32.f32(float %src, float %temp, i32 1)
+  store i32 %result, i32 addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_intr_icmp_i32:
+; GFX1032-DAG: v_cmp_eq_u32_e64 s[[C_LO:[0-9]+]], 0x64, {{s[0-9]+}}
+; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]{{$}}
+; GFX1064:     v_cmp_eq_u32_e64 s{{\[}}[[C_LO:[0-9]+]]:{{[0-9]+}}], 0x64, {{s[0-9]+}}
+; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]{{$}}
+; GCN:         store_dword v[{{[0-9:]+}}], v[[V_LO]],
+define amdgpu_kernel void @test_intr_icmp_i32(i32 addrspace(1)* %out, i32 %src) {
+  %result = call i32 @llvm.amdgcn.icmp.i32.i32(i32 %src, i32 100, i32 32)
+  store i32 %result, i32 addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_wqm_vote:
+; GFX1032: v_cmp_neq_f32_e32 vcc_lo, 0
+; GFX1032: s_wqm_b32 [[WQM:s[0-9]+]], vcc_lo
+; GFX1032: s_and_b32 exec_lo, exec_lo, [[WQM]]
+; GFX1064: v_cmp_neq_f32_e32 vcc, 0
+; GFX1064: s_wqm_b64 [[WQM:s\[[0-9:]+\]]], vcc{{$}}
+; GFX1064: s_and_b64 exec, exec, [[WQM]]
+define amdgpu_ps void @test_wqm_vote(float %a) {
+  %c1 = fcmp une float %a, 0.0
+  %c2 = call i1 @llvm.amdgcn.wqm.vote(i1 %c1)
+  call void @llvm.amdgcn.kill(i1 %c2)
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_branch_true:
+; GFX1032: s_and_b32 vcc_lo, exec_lo, -1
+; GFX1064: s_and_b64 vcc, exec, -1
+define amdgpu_kernel void @test_branch_true() #2 {
+entry:
+  br i1 true, label %for.end, label %for.body.lr.ph
+
+for.body.lr.ph:                                   ; preds = %entry
+  br label %for.body
+
+for.body:                                         ; preds = %for.body, %for.body.lr.ph
+  br i1 undef, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body, %entry
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_ps_live:
+; GFX1032: s_mov_b32 [[C:s[0-9]+]], exec_lo
+; GFX1064: s_mov_b64 [[C:s\[[0-9:]+\]]], exec{{$}}
+; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]]
+define amdgpu_ps float @test_ps_live() #0 {
+  %live = call i1 @llvm.amdgcn.ps.live()
+  %live.32 = zext i1 %live to i32
+  %r = bitcast i32 %live.32 to float
+  ret float %r
+}
+
+; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle64:
+; GFX1032: v_cmp_neq_f64_e64 [[C:s[0-9]+]], s[{{[0-9:]+}}], 1.0
+; GFX1032: s_and_b32 vcc_lo, exec_lo, [[C]]
+; GFX1064: v_cmp_neq_f64_e64 [[C:s\[[0-9:]+\]]], s[{{[0-9:]+}}], 1.0
+; GFX1064: s_and_b64 vcc, exec, [[C]]
+define amdgpu_kernel void @test_vccnz_ifcvt_triangle64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
+entry:
+  %v = load double, double addrspace(1)* %in
+  %cc = fcmp oeq double %v, 1.000000e+00
+  br i1 %cc, label %if, label %endif
+
+if:
+  %u = fadd double %v, %v
+  br label %endif
+
+endif:
+  %r = phi double [ %v, %entry ], [ %u, %if ]
+  store double %r, double addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_init_exec:
+; GFX1032: s_mov_b32 exec_lo, 0x12345
+; GFX1064: s_mov_b64 exec, 0x12345
+; GCN: v_add_f32_e32 v0,
+define amdgpu_ps float @test_init_exec(float %a, float %b) {
+main_body:
+  %s = fadd float %a, %b
+  call void @llvm.amdgcn.init.exec(i64 74565)
+  ret float %s
+}
+
+; GCN-LABEL: {{^}}test_init_exec_from_input:
+; GCN: s_bfe_u32 s0, s3, 0x70008
+; GFX1032: s_bfm_b32 exec_lo, s0, 0
+; GFX1032: s_cmp_eq_u32 s0, 32
+; GFX1032: s_cmov_b32 exec_lo, -1
+; GFX1064: s_bfm_b64 exec, s0, 0
+; GFX1064: s_cmp_eq_u32 s0, 64
+; GFX1064: s_cmov_b64 exec, -1
+; GCN: v_add_f32_e32 v0,
+define amdgpu_ps float @test_init_exec_from_input(i32 inreg, i32 inreg, i32 inreg, i32 inreg %count, float %a, float %b) {
+main_body:
+  %s = fadd float %a, %b
+  call void @llvm.amdgcn.init.exec.from.input(i32 %count, i32 8)
+  ret float %s
+}
+
+; GCN-LABEL: {{^}}test_vgprblocks_w32_attr:
+; Test that the wave size can be overridden in function attributes and that the block size is correct as a result
+; GFX10DEFWAVE: ; VGPRBlocks: 1
+define amdgpu_gs float @test_vgprblocks_w32_attr(float %a, float %b, float %c, float %d, float %e,
+                                        float %f, float %g, float %h, float %i, float %j, float %k, float %l) #3 {
+main_body:
+  %s = fadd float %a, %b
+  %s.1 = fadd float %s, %c
+  %s.2 = fadd float %s.1, %d
+  %s.3 = fadd float %s.2, %e
+  %s.4 = fadd float %s.3, %f
+  %s.5 = fadd float %s.4, %g
+  %s.6 = fadd float %s.5, %h
+  %s.7 = fadd float %s.6, %i
+  %s.8 = fadd float %s.7, %j
+  %s.9 = fadd float %s.8, %k
+  %s.10 = fadd float %s.9, %l
+  ret float %s.10
+}
+
+; GCN-LABEL: {{^}}test_vgprblocks_w64_attr:
+; Test that the wave size can be overridden in function attributes and that the block size is correct as a result
+; GFX10DEFWAVE: ; VGPRBlocks: 2
+define amdgpu_gs float @test_vgprblocks_w64_attr(float %a, float %b, float %c, float %d, float %e,
+                                        float %f, float %g, float %h, float %i, float %j, float %k, float %l) #4 {
+main_body:
+  %s = fadd float %a, %b
+  %s.1 = fadd float %s, %c
+  %s.2 = fadd float %s.1, %d
+  %s.3 = fadd float %s.2, %e
+  %s.4 = fadd float %s.3, %f
+  %s.5 = fadd float %s.4, %g
+  %s.6 = fadd float %s.5, %h
+  %s.7 = fadd float %s.6, %i
+  %s.8 = fadd float %s.7, %j
+  %s.9 = fadd float %s.8, %k
+  %s.10 = fadd float %s.9, %l
+  ret float %s.10
+}
+
+; GCN-LABEL: {{^}}icmp64:
+; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 0, v
+; GFX1064: v_cmp_eq_u32_e32 vcc, 0, v
+define amdgpu_kernel void @icmp64(i32 %n, i32 %s) {
+entry:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %mul4 = mul nsw i32 %s, %n
+  %cmp = icmp slt i32 0, %mul4
+  br label %if.end
+
+if.end:                                           ; preds = %entry
+  %rem = urem i32 %id, %s
+  %icmp = tail call i64 @llvm.amdgcn.icmp.i64.i32(i32 %rem, i32 0, i32 32)
+  %shr = lshr i64 %icmp, 1
+  %notmask = shl nsw i64 -1, 0
+  %and = and i64 %notmask, %shr
+  %or = or i64 %and, -9223372036854775808
+  %cttz = tail call i64 @llvm.cttz.i64(i64 %or, i1 true)
+  %cast = trunc i64 %cttz to i32
+  %cmp3 = icmp ugt i32 10, %cast
+  %cmp6 = icmp ne i32 %rem, 0
+  %brmerge = or i1 %cmp6, %cmp3
+  br i1 %brmerge, label %if.end2, label %if.then
+
+if.then:                                          ; preds = %if.end
+  unreachable
+
+if.end2:                                          ; preds = %if.end
+  ret void
+}
+
+; GCN-LABEL: {{^}}fcmp64:
+; GFX1032: v_cmp_eq_f32_e32 vcc_lo, 0, v
+; GFX1064: v_cmp_eq_f32_e32 vcc, 0, v
+define amdgpu_kernel void @fcmp64(float %n, float %s) {
+entry:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %id.f = uitofp i32 %id to float
+  %mul4 = fmul float %s, %n
+  %cmp = fcmp ult float 0.0, %mul4
+  br label %if.end
+
+if.end:                                           ; preds = %entry
+  %rem.f = frem float %id.f, %s
+  %fcmp = tail call i64 @llvm.amdgcn.fcmp.i64.f32(float %rem.f, float 0.0, i32 1)
+  %shr = lshr i64 %fcmp, 1
+  %notmask = shl nsw i64 -1, 0
+  %and = and i64 %notmask, %shr
+  %or = or i64 %and, -9223372036854775808
+  %cttz = tail call i64 @llvm.cttz.i64(i64 %or, i1 true)
+  %cast = trunc i64 %cttz to i32
+  %cmp3 = icmp ugt i32 10, %cast
+  %cmp6 = fcmp one float %rem.f, 0.0
+  %brmerge = or i1 %cmp6, %cmp3
+  br i1 %brmerge, label %if.end2, label %if.then
+
+if.then:                                          ; preds = %if.end
+  unreachable
+
+if.end2:                                          ; preds = %if.end
+  ret void
+}
+
+; GCN-LABEL: {{^}}icmp32:
+; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 0, v
+; GFX1064: v_cmp_eq_u32_e32 vcc, 0, v
+define amdgpu_kernel void @icmp32(i32 %n, i32 %s) {
+entry:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %mul4 = mul nsw i32 %s, %n
+  %cmp = icmp slt i32 0, %mul4
+  br label %if.end
+
+if.end:                                           ; preds = %entry
+  %rem = urem i32 %id, %s
+  %icmp = tail call i32 @llvm.amdgcn.icmp.i32.i32(i32 %rem, i32 0, i32 32)
+  %shr = lshr i32 %icmp, 1
+  %notmask = shl nsw i32 -1, 0
+  %and = and i32 %notmask, %shr
+  %or = or i32 %and, 2147483648
+  %cttz = tail call i32 @llvm.cttz.i32(i32 %or, i1 true)
+  %cmp3 = icmp ugt i32 10, %cttz
+  %cmp6 = icmp ne i32 %rem, 0
+  %brmerge = or i1 %cmp6, %cmp3
+  br i1 %brmerge, label %if.end2, label %if.then
+
+if.then:                                          ; preds = %if.end
+  unreachable
+
+if.end2:                                          ; preds = %if.end
+  ret void
+}
+
+; GCN-LABEL: {{^}}fcmp32:
+; GFX1032: v_cmp_eq_f32_e32 vcc_lo, 0, v
+; GFX1064: v_cmp_eq_f32_e32 vcc, 0, v
+define amdgpu_kernel void @fcmp32(float %n, float %s) {
+entry:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %id.f = uitofp i32 %id to float
+  %mul4 = fmul float %s, %n
+  %cmp = fcmp ult float 0.0, %mul4
+  br label %if.end
+
+if.end:                                           ; preds = %entry
+  %rem.f = frem float %id.f, %s
+  %fcmp = tail call i32 @llvm.amdgcn.fcmp.i32.f32(float %rem.f, float 0.0, i32 1)
+  %shr = lshr i32 %fcmp, 1
+  %notmask = shl nsw i32 -1, 0
+  %and = and i32 %notmask, %shr
+  %or = or i32 %and, 2147483648
+  %cttz = tail call i32 @llvm.cttz.i32(i32 %or, i1 true)
+  %cmp3 = icmp ugt i32 10, %cttz
+  %cmp6 = fcmp one float %rem.f, 0.0
+  %brmerge = or i1 %cmp6, %cmp3
+  br i1 %brmerge, label %if.end2, label %if.then
+
+if.then:                                          ; preds = %if.end
+  unreachable
+
+if.end2:                                          ; preds = %if.end
+  ret void
+}
+
+declare void @external_void_func_void() #1
+
+; Test save/restore of VGPR needed for SGPR spilling.
+
+; GCN-LABEL: {{^}}callee_no_stack_with_call:
+; GCN: s_waitcnt
+; GCN: s_mov_b32 s5, s32
+; GFX1064: s_add_u32 s32, s32, 0x400
+; GFX1032: s_add_u32 s32, s32, 0x200
+
+; GFX1064: s_or_saveexec_b64 [[COPY_EXEC0:s\[[0-9]+:[0-9]+\]]], -1{{$}}
+; GFX1032: s_or_saveexec_b32 [[COPY_EXEC0:s[0-9]]], -1{{$}}
+
+; GCN-NEXT: buffer_store_dword v32, off, s[0:3], s5 ; 4-byte Folded Spill
+
+; GFX1064-NEXT: s_mov_b64 exec, [[COPY_EXEC0]]
+; GFX1032-NEXT: s_mov_b32 exec_lo, [[COPY_EXEC0]]
+
+; GCN-DAG: v_writelane_b32 v32, s33, 0
+; GCN-DAG: v_writelane_b32 v32, s34, 1
+; GCN-DAG: s_mov_b32 s33, s5
+; GCN: s_swappc_b64
+; GCN-DAG: s_mov_b32 s5, s33
+; GCN-DAG: v_readlane_b32 s34, v32, 1
+; GCN-DAG: v_readlane_b32 s33, v32, 0
+
+; GFX1064: s_or_saveexec_b64 [[COPY_EXEC1:s\[[0-9]+:[0-9]+\]]], -1{{$}}
+; GFX1032: s_or_saveexec_b32 [[COPY_EXEC1:s[0-9]]], -1{{$}}
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s5 ; 4-byte Folded Reload
+; GFX1064-NEXT: s_mov_b64 exec, [[COPY_EXEC1]]
+; GFX1032-NEXT: s_mov_b32 exec_lo, [[COPY_EXEC1]]
+
+; GFX1064: s_sub_u32 s32, s32, 0x400
+; GFX1032: s_sub_u32 s32, s32, 0x200
+; GCN: s_setpc_b64
+define void @callee_no_stack_with_call() #1 {
+  call void @external_void_func_void()
+  ret void
+}
+
+
+declare i32 @llvm.amdgcn.workitem.id.x()
+declare float @llvm.fabs.f32(float)
+declare { float, i1 } @llvm.amdgcn.div.scale.f32(float, float, i1)
+declare { double, i1 } @llvm.amdgcn.div.scale.f64(double, double, i1)
+declare float @llvm.amdgcn.div.fmas.f32(float, float, float, i1)
+declare double @llvm.amdgcn.div.fmas.f64(double, double, double, i1)
+declare i1 @llvm.amdgcn.class.f32(float, i32)
+declare i32 @llvm.amdgcn.set.inactive.i32(i32, i32)
+declare i64 @llvm.amdgcn.set.inactive.i64(i64, i64)
+declare <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32, float, <8 x i32>, <4 x i32>, i1, i32, i32)
+declare <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32)
+declare float @llvm.amdgcn.wwm.f32(float)
+declare i32 @llvm.amdgcn.wqm.i32(i32)
+declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32)
+declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32)
+declare float @llvm.amdgcn.buffer.load.f32(<4 x i32>, i32, i32, i1, i1)
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
+declare i64 @llvm.amdgcn.fcmp.i64.f32(float, float, i32)
+declare i64 @llvm.amdgcn.icmp.i64.i32(i32, i32, i32)
+declare i32 @llvm.amdgcn.fcmp.i32.f32(float, float, i32)
+declare i32 @llvm.amdgcn.icmp.i32.i32(i32, i32, i32)
+declare void @llvm.amdgcn.kill(i1)
+declare i1 @llvm.amdgcn.wqm.vote(i1)
+declare i1 @llvm.amdgcn.ps.live()
+declare void @llvm.amdgcn.init.exec(i64)
+declare void @llvm.amdgcn.init.exec.from.input(i32, i32)
+declare i64 @llvm.cttz.i64(i64, i1)
+declare i32 @llvm.cttz.i32(i32, i1)
+
+attributes #0 = { nounwind readnone speculatable }
+attributes #1 = { nounwind }
+attributes #2 = { nounwind readnone optnone noinline }
+attributes #3 = { "target-features"="+wavefrontsize32" }
+attributes #4 = { "target-features"="+wavefrontsize64" }

Modified: llvm/trunk/test/CodeGen/AMDGPU/xor3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/xor3.ll?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/xor3.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/xor3.ll Thu Jun 20 08:08:34 2019
@@ -16,6 +16,7 @@ define amdgpu_ps float @xor3(i32 %a, i32
 ; GFX10-LABEL: xor3:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_xor3_b32 v0, v0, v1, v2
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = xor i32 %a, %b
   %result = xor i32 %x, %c
@@ -33,6 +34,7 @@ define amdgpu_ps float @xor3_vgpr_b(i32
 ; GFX10-LABEL: xor3_vgpr_b:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_xor3_b32 v0, s2, v0, s3
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = xor i32 %a, %b
   %result = xor i32 %x, %c
@@ -50,6 +52,7 @@ define amdgpu_ps float @xor3_vgpr_all2(i
 ; GFX10-LABEL: xor3_vgpr_all2:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_xor3_b32 v0, v1, v2, v0
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = xor i32 %b, %c
   %result = xor i32 %a, %x
@@ -67,6 +70,7 @@ define amdgpu_ps float @xor3_vgpr_bc(i32
 ; GFX10-LABEL: xor3_vgpr_bc:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_xor3_b32 v0, s2, v0, v1
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = xor i32 %a, %b
   %result = xor i32 %x, %c
@@ -84,6 +88,7 @@ define amdgpu_ps float @xor3_vgpr_const(
 ; GFX10-LABEL: xor3_vgpr_const:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_xor3_b32 v0, v0, v1, 16
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = xor i32 %a, %b
   %result = xor i32 %x, 16
@@ -102,6 +107,7 @@ define amdgpu_ps <2 x float> @xor3_multi
 ; GFX10-LABEL: xor3_multiuse_outer:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_xor3_b32 v0, v0, v1, v2
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    v_mul_lo_u32 v1, v0, v3
 ; GFX10-NEXT:    ; return to shader part epilog
   %inner = xor i32 %a, %b
@@ -123,6 +129,7 @@ define amdgpu_ps <2 x float> @xor3_multi
 ; GFX10-LABEL: xor3_multiuse_inner:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_xor_b32_e32 v0, v0, v1
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    v_xor_b32_e32 v1, v0, v2
 ; GFX10-NEXT:    ; return to shader part epilog
   %inner = xor i32 %a, %b
@@ -151,6 +158,7 @@ define amdgpu_ps float @xor3_uniform_vgp
 ; GFX10-NEXT:    v_add_f32_e64 v1, s3, 2.0
 ; GFX10-NEXT:    v_add_f32_e64 v2, s2, 1.0
 ; GFX10-NEXT:    v_add_f32_e64 v0, 0x40400000, s4
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    v_xor_b32_e32 v1, v2, v1
 ; GFX10-NEXT:    v_xor_b32_e32 v0, v1, v0
 ; GFX10-NEXT:    ; return to shader part epilog

Modified: llvm/trunk/test/CodeGen/AMDGPU/xor_add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/xor_add.ll?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/xor_add.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/xor_add.ll Thu Jun 20 08:08:34 2019
@@ -22,6 +22,7 @@ define amdgpu_ps float @xor_add(i32 %a,
 ; GFX10-LABEL: xor_add:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_xad_u32 v0, v0, v1, v2
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = xor i32 %a, %b
   %result = add i32 %x, %c
@@ -46,6 +47,7 @@ define amdgpu_ps float @xor_add_vgpr_a(i
 ; GFX10-LABEL: xor_add_vgpr_a:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_xad_u32 v0, v0, s2, s3
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = xor i32 %a, %b
   %result = add i32 %x, %c
@@ -68,6 +70,7 @@ define amdgpu_ps float @xor_add_vgpr_all
 ; GFX10-LABEL: xor_add_vgpr_all:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_xad_u32 v0, v0, v1, v2
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = xor i32 %a, %b
   %result = add i32 %x, %c
@@ -90,6 +93,7 @@ define amdgpu_ps float @xor_add_vgpr_ab(
 ; GFX10-LABEL: xor_add_vgpr_ab:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_xad_u32 v0, v0, v1, s2
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = xor i32 %a, %b
   %result = add i32 %x, %c
@@ -112,6 +116,7 @@ define amdgpu_ps float @xor_add_vgpr_con
 ; GFX10-LABEL: xor_add_vgpr_const:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    v_xad_u32 v0, v0, 3, v1
+; GFX10-NEXT:    ; implicit-def: $vcc_hi
 ; GFX10-NEXT:    ; return to shader part epilog
   %x = xor i32 %a, 3
   %result = add i32 %x, %b

Modified: llvm/trunk/test/MC/AMDGPU/gfx10-constant-bus.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/AMDGPU/gfx10-constant-bus.s?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/MC/AMDGPU/gfx10-constant-bus.s (original)
+++ llvm/trunk/test/MC/AMDGPU/gfx10-constant-bus.s Thu Jun 20 08:08:34 2019
@@ -33,3 +33,13 @@ v_div_fmas_f64 v[5:6], v[1:2], s[2:3], v
 
 v_div_fmas_f64 v[5:6], v[1:2], s[2:3], 0x123456
 // GFX10-ERR: error: invalid operand (violates constant bus restrictions)
+
+//-----------------------------------------------------------------------------------------
+// v_mad_u64_u32 has operands of different sizes.
+// When these operands are literals, they are counted as 2 scalar values even if literals are identical.
+
+v_mad_u64_u32 v[5:6], s12, v1, 0x12345678, 0x12345678
+// GFX10: v_mad_u64_u32 v[5:6], s12, v1, 0x12345678, 0x12345678 ; encoding: [0x05,0x0c,0x76,0xd5,0x01,0xff,0xfd,0x03,0x78,0x56,0x34,0x12]
+
+v_mad_u64_u32 v[5:6], s12, s1, 0x12345678, 0x12345678
+// GFX10-ERR: error: invalid operand (violates constant bus restrictions)

Added: llvm/trunk/test/MC/AMDGPU/wave32.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/AMDGPU/wave32.s?rev=363934&view=auto
==============================================================================
--- llvm/trunk/test/MC/AMDGPU/wave32.s (added)
+++ llvm/trunk/test/MC/AMDGPU/wave32.s Thu Jun 20 08:08:34 2019
@@ -0,0 +1,412 @@
+// RUN: not llvm-mc -arch=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -show-encoding %s | FileCheck -check-prefix=GFX1032 %s
+// RUN: not llvm-mc -arch=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -show-encoding %s | FileCheck -check-prefix=GFX1064 %s
+// RUN: not llvm-mc -arch=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -show-encoding %s 2>&1 | FileCheck -check-prefix=GFX1032-ERR %s
+// RUN: not llvm-mc -arch=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -show-encoding %s 2>&1 | FileCheck -check-prefix=GFX1064-ERR %s
+
+v_cmp_ge_i32_e32 s0, v0
+// GFX1032: v_cmp_ge_i32_e32 vcc_lo, s0, v0 ; encoding: [0x00,0x00,0x0c,0x7d]
+// GFX1064: v_cmp_ge_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x0c,0x7d]
+
+v_cmp_ge_i32_e32 vcc_lo, s0, v1
+// GFX1032: v_cmp_ge_i32_e32 vcc_lo, s0, v1 ; encoding: [0x00,0x02,0x0c,0x7d]
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_cmp_ge_i32_e32 vcc, s0, v2
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064: v_cmp_ge_i32_e32 vcc, s0, v2 ; encoding: [0x00,0x04,0x0c,0x7d]
+
+v_cmp_le_f16_sdwa s0, v3, v4 src0_sel:WORD_1 src1_sel:DWORD
+// GFX1032: v_cmp_le_f16_sdwa s0, v3, v4 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x08,0x96,0x7d,0x03,0x80,0x05,0x06]
+// GFX1064-ERR: error: invalid operand for instruction
+
+v_cmp_le_f16_sdwa s[0:1], v3, v4 src0_sel:WORD_1 src1_sel:DWORD
+// GFX1032-ERR: error: invalid operand for instruction
+// GFX1064: v_cmp_le_f16_sdwa s[0:1], v3, v4 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x08,0x96,0x7d,0x03,0x80,0x05,0x06]
+
+v_cmp_class_f32_e32 vcc_lo, s0, v0
+// GFX1032: v_cmp_class_f32_e32 vcc_lo, s0, v0 ; encoding: [0x00,0x00,0x10,0x7d]
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_cmp_class_f32_e32 vcc, s0, v0
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064: v_cmp_class_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x10,0x7d]
+
+// TODO-GFX10: The following encoding does not match SP3's encoding, which is:
+//  [0xf9,0x04,0x1e,0x7d,0x01,0x06,0x06,0x06]
+v_cmp_class_f16_sdwa vcc_lo, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// GFX1032: v_cmp_class_f16_sdwa vcc_lo, v1, v2 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x04,0x1e,0x7d,0x01,0x00,0x06,0x06]
+// GFX1064-ERR: error: invalid operand for instruction
+
+// TODO-GFX10: The following encoding does not match SP3's encoding, which is:
+//  [0xf9,0x04,0x1e,0x7d,0x01,0x06,0x06,0x06]
+v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064: v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x04,0x1e,0x7d,0x01,0x00,0x06,0x06]
+
+v_cmp_class_f16_sdwa s0, v1, v2 src0_sel:DWORD src1_sel:DWORD
+// GFX1032: v_cmp_class_f16_sdwa s0, v1, v2 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x04,0x1e,0x7d,0x01,0x80,0x06,0x06]
+// GFX1064-ERR: error: invalid operand for instruction
+
+v_cmp_class_f16_sdwa s[0:1], v1, v2 src0_sel:DWORD src1_sel:DWORD
+// GFX1032-ERR: error: invalid operand for instruction
+// GFX1064: v_cmp_class_f16_sdwa s[0:1], v1, v2 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x04,0x1e,0x7d,0x01,0x80,0x06,0x06]
+
+v_cndmask_b32_e32 v1, v2, v3,
+// GFX1032: v_cndmask_b32_e32 v1, v2, v3, vcc_lo ; encoding: [0x02,0x07,0x02,0x02]
+// GFX1064: v_cndmask_b32_e32 v1, v2, v3, vcc ; encoding: [0x02,0x07,0x02,0x02]
+
+v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+// GFX1032: v_cndmask_b32_e32 v1, v2, v3, vcc_lo ; encoding: [0x02,0x07,0x02,0x02]
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_cndmask_b32_e32 v1, v2, v3, vcc
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064: v_cndmask_b32_e32 v1, v2, v3, vcc ; encoding: [0x02,0x07,0x02,0x02]
+
+v_add_co_u32_e32 v2, vcc_lo, s0, v2
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_add_co_u32_e32 v2, vcc, s0, v2
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_add_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo
+// GFX1032: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo ; encoding: [0x03,0x09,0x06,0x50]
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_add_co_ci_u32_e32 v3, vcc, v3, v4, vcc
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064: v_add_co_ci_u32_e32 v3, vcc, v3, v4, vcc ; encoding: [0x03,0x09,0x06,0x50]
+
+v_add_co_ci_u32_e32 v3, v3, v4
+// GFX1032: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo ; encoding: [0x03,0x09,0x06,0x50]
+// GFX1064: v_add_co_ci_u32_e32 v3, vcc, v3, v4, vcc ; encoding: [0x03,0x09,0x06,0x50]
+
+v_sub_co_u32_e32 v2, vcc_lo, s0, v2
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_sub_co_u32_e32 v2, vcc, s0, v2
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_subrev_co_u32_e32 v2, vcc_lo, s0, v2
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_subrev_co_u32_e32 v2, vcc, s0, v2
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_sub_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo
+// GFX1032: v_sub_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo ; encoding: [0x03,0x09,0x06,0x52]
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_sub_co_ci_u32_e32 v3, vcc, v3, v4, vcc
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064: v_sub_co_ci_u32_e32 v3, vcc, v3, v4, vcc ; encoding: [0x03,0x09,0x06,0x52]
+
+v_sub_co_ci_u32_e32 v3, v3, v4
+// GFX1032: v_sub_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo ; encoding: [0x03,0x09,0x06,0x52]
+// GFX1064: v_sub_co_ci_u32_e32 v3, vcc, v3, v4, vcc ; encoding: [0x03,0x09,0x06,0x52]
+
+v_subrev_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
+// GFX1032: v_subrev_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo ; encoding: [0x80,0x02,0x02,0x54]
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_subrev_co_ci_u32_e32 v1, vcc, 0, v1, vcc
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064: v_subrev_co_ci_u32_e32 v1, vcc, 0, v1, vcc ; encoding: [0x80,0x02,0x02,0x54]
+
+v_subrev_co_ci_u32_e32 v1, 0, v1
+// GFX1032: v_subrev_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo ; encoding: [0x80,0x02,0x02,0x54]
+// GFX1064: v_subrev_co_ci_u32_e32 v1, vcc, 0, v1, vcc ; encoding: [0x80,0x02,0x02,0x54]
+
+v_add_co_u32_sdwa v0, vcc_lo, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// GFX1032-ERR: error: invalid operand
+// GFX1064-ERR: error: invalid operand
+
+v_add_co_u32_sdwa v0, vcc, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// GFX1032-ERR: error: instruction not supported
+// GFX1064-ERR: error: instruction not supported
+
+v_add_co_u32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// GFX1032-ERR: error: not a valid operand
+// GFX1064-ERR: error: not a valid operand
+
+v_add_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// GFX1032: v_add_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x00,0x06]
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_add_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064: v_add_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x00,0x06]
+
+v_add_co_ci_u32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// GFX1032: v_add_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x00,0x06]
+// GFX1064: v_add_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x00,0x06]
+
+v_sub_co_u32_sdwa v0, vcc_lo, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// GFX1032-ERR: error: invalid operand
+// GFX1064-ERR: error: invalid operand
+
+v_sub_co_u32_sdwa v0, vcc, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// GFX1032-ERR: error: instruction not supported
+// GFX1064-ERR: error: instruction not supported
+
+v_sub_co_u32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// GFX1032-ERR: error: not a valid operand
+// GFX1064-ERR: error: not a valid operand
+
+v_subrev_co_u32_sdwa v0, vcc_lo, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// GFX1032-ERR: error: invalid operand
+// GFX1064-ERR: error: invalid operand
+
+v_subrev_co_u32_sdwa v0, vcc, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// GFX1032-ERR: error: instruction not supported
+// GFX1064-ERR: error: instruction not supported
+
+v_subrev_co_u32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+// GFX1032-ERR: error: not a valid operand
+// GFX1064-ERR: error: not a valid operand
+
+v_sub_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// GFX1032: v_sub_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x52,0x01,0x06,0x00,0x06]
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_sub_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064: v_sub_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x52,0x01,0x06,0x00,0x06]
+
+v_sub_co_ci_u32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// GFX1032: v_sub_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x52,0x01,0x06,0x00,0x06]
+// GFX1064: v_sub_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x52,0x01,0x06,0x00,0x06]
+
+v_subrev_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// GFX1032: v_subrev_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x54,0x01,0x06,0x00,0x06]
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_subrev_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064: v_subrev_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x54,0x01,0x06,0x00,0x06]
+
+v_subrev_co_ci_u32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// GFX1032: v_subrev_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x54,0x01,0x06,0x00,0x06]
+// GFX1064: v_subrev_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x54,0x01,0x06,0x00,0x06]
+
+v_add_co_ci_u32 v1, sext(v1), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// GFX1032: v_add_co_ci_u32_sdwa v1, vcc_lo, sext(v1), sext(v4), vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x08,0x0e]
+// GFX1064: v_add_co_ci_u32_sdwa v1, vcc, sext(v1), sext(v4), vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x08,0x0e]
+
+v_add_co_ci_u32_sdwa v1, vcc_lo, sext(v1), sext(v4), vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// GFX1032: v_add_co_ci_u32_sdwa v1, vcc_lo, sext(v1), sext(v4), vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x08,0x0e]
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_add_co_ci_u32_sdwa v1, vcc, sext(v1), sext(v4), vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064: v_add_co_ci_u32_sdwa v1, vcc, sext(v1), sext(v4), vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x08,0x0e]
+
+v_add_co_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX1032-ERR: error: not a valid operand
+// GFX1064-ERR: error: not a valid operand
+
+v_add_co_u32_dpp v5, vcc_lo, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX1032-ERR: error: not a valid operand
+// GFX1064-ERR: error: not a valid operand
+
+v_add_co_u32_dpp v5, vcc, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX1032-ERR: error: not a valid operand
+// GFX1064-ERR: error: not a valid operand
+
+v_add_co_ci_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX1032: v_add_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x00]
+// GFX1064: v_add_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x00]
+
+v_add_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX1032: v_add_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x00]
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_add_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064: v_add_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x00]
+
+v_sub_co_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX1032-ERR: error: not a valid operand
+// GFX1064-ERR: error: not a valid operand
+
+v_sub_co_u32_dpp v5, vcc_lo, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX1032-ERR: error: not a valid operand
+// GFX1064-ERR: error: not a valid operand
+
+v_sub_co_u32_dpp v5, vcc, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX1032-ERR: error: not a valid operand
+// GFX1064-ERR: error: not a valid operand
+
+v_sub_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX1032: v_sub_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x52,0x01,0xe4,0x00,0x00]
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_sub_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064: v_sub_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x52,0x01,0xe4,0x00,0x00]
+
+v_subrev_co_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX1032-ERR: error: not a valid operand
+// GFX1064-ERR: error: not a valid operand
+
+v_subrev_co_u32_dpp v5, vcc_lo, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX1032-ERR: error: not a valid operand
+// GFX1064-ERR: error: not a valid operand
+
+v_subrev_co_u32_dpp v5, vcc, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX1032-ERR: error: not a valid operand
+// GFX1064-ERR: error: not a valid operand
+
+v_subrev_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX1032: v_subrev_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0x00]
+// GFX1064-ERR: error: instruction not supported on this GPU
+
+v_subrev_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+// GFX1032-ERR: error: instruction not supported on this GPU
+// GFX1064: v_subrev_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0x00]
+
+v_add_co_u32 v0, s0, v0, v2
+// GFX1032: v_add_co_u32_e64 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00]
+// GFX1064-ERR: error: invalid operand for instruction
+
+v_add_co_u32_e64 v0, s0, v0, v2
+// GFX1032: v_add_co_u32_e64 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00]
+// GFX1064-ERR: error: invalid operand for instruction
+
+v_add_co_ci_u32_e64 v4, s0, v1, v5, s2
+// GFX1032: v_add_co_ci_u32_e64 v4, s0, v1, v5, s2 ; encoding: [0x04,0x00,0x28,0xd5,0x01,0x0b,0x0a,0x00]
+// GFX1064-ERR: error: invalid operand for instruction
+
+v_sub_co_u32 v0, s0, v0, v2
+// GFX1032: v_sub_co_u32_e64 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00]
+// GFX1064-ERR: error: invalid operand for instruction
+
+v_sub_co_u32_e64 v0, s0, v0, v2
+// GFX1032: v_sub_co_u32_e64 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00]
+// GFX1064-ERR: error: invalid operand for instruction
+
+v_sub_co_ci_u32_e64 v4, s0, v1, v5, s2
+// GFX1032: v_sub_co_ci_u32_e64 v4, s0, v1, v5, s2 ; encoding: [0x04,0x00,0x29,0xd5,0x01,0x0b,0x0a,0x00]
+// GFX1064-ERR: error: invalid operand for instruction
+
+v_subrev_co_u32 v0, s0, v0, v2
+// GFX1032: v_subrev_co_u32_e64 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00]
+// GFX1064-ERR: error: invalid operand for instruction
+
+v_subrev_co_u32_e64 v0, s0, v0, v2
+// GFX1032: v_subrev_co_u32_e64 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00]
+// GFX1064-ERR: error: invalid operand for instruction
+
+v_subrev_co_ci_u32_e64 v4, s0, v1, v5, s2
+// GFX1032: v_subrev_co_ci_u32_e64 v4, s0, v1, v5, s2 ; encoding: [0x04,0x00,0x2a,0xd5,0x01,0x0b,0x0a,0x00]
+// GFX1064-ERR: error: invalid operand for instruction
+
+v_add_co_u32 v0, s[0:1], v0, v2
+// GFX1032-ERR: error: invalid operand for instruction
+// GFX1064: v_add_co_u32_e64 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00]
+
+v_add_co_u32_e64 v0, s[0:1], v0, v2
+// GFX1032-ERR: error: invalid operand for instruction
+// GFX1064: v_add_co_u32_e64 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00]
+
+v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3]
+// GFX1032-ERR: error: invalid operand for instruction
+// GFX1064: v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] ; encoding: [0x04,0x00,0x28,0xd5,0x01,0x0b,0x0a,0x00]
+
+v_sub_co_u32 v0, s[0:1], v0, v2
+// GFX1032-ERR: error: invalid operand for instruction
+// GFX1064: v_sub_co_u32_e64 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00]
+
+v_sub_co_u32_e64 v0, s[0:1], v0, v2
+// GFX1032-ERR: error: invalid operand for instruction
+// GFX1064: v_sub_co_u32_e64 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00]
+
+v_sub_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3]
+// GFX1032-ERR: error: invalid operand for instruction
+// GFX1064: v_sub_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] ; encoding: [0x04,0x00,0x29,0xd5,0x01,0x0b,0x0a,0x00]
+
+v_subrev_co_u32 v0, s[0:1], v0, v2
+// GFX1032-ERR: error: invalid operand for instruction
+// GFX1064: v_subrev_co_u32_e64 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00]
+
+v_subrev_co_u32_e64 v0, s[0:1], v0, v2
+// GFX1032-ERR: error: invalid operand for instruction
+// GFX1064: v_subrev_co_u32_e64 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00]
+
+v_subrev_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3]
+// GFX1032-ERR: error: invalid operand for instruction
+// GFX1064: v_subrev_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] ; encoding: [0x04,0x00,0x2a,0xd5,0x01,0x0b,0x0a,0x00]
+
+v_add_co_ci_u32_e64 v4, vcc_lo, v1, v5, s2
+// GFX1032: v_add_co_ci_u32_e64 v4, vcc_lo, v1, v5, s2 ; encoding: [0x04,0x6a,0x28,0xd5,0x01,0x0b,0x0a,0x00]
+// GFX1064-ERR: error: invalid operand for instruction
+
+v_add_co_ci_u32_e64 v4, vcc, v1, v5, s[2:3]
+// GFX1032-ERR: error: invalid operand for instruction
+// GFX1064: v_add_co_ci_u32_e64 v4, vcc, v1, v5, s[2:3] ; encoding: [0x04,0x6a,0x28,0xd5,0x01,0x0b,0x0a,0x00]
+
+v_add_co_ci_u32_e64 v4, s0, v1, v5, vcc_lo
+// GFX1032: v_add_co_ci_u32_e64 v4, s0, v1, v5, vcc_lo ; encoding: [0x04,0x00,0x28,0xd5,0x01,0x0b,0xaa,0x01]
+// GFX1064-ERR: error: invalid operand for instruction
+
+v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, vcc
+// GFX1032-ERR: error: invalid operand for instruction
+// GFX1064: v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, vcc ; encoding: [0x04,0x00,0x28,0xd5,0x01,0x0b,0xaa,0x01]
+
+v_div_scale_f32 v2, s2, v0, v0, v2
+// GFX1032: v_div_scale_f32 v2, s2, v0, v0, v2 ; encoding: [0x02,0x02,0x6d,0xd5,0x00,0x01,0x0a,0x04]
+// GFX1064-ERR: error: invalid operand for instruction
+
+v_div_scale_f32 v2, s[2:3], v0, v0, v2
+// GFX1032-ERR: error: invalid operand for instruction
+// GFX1064: v_div_scale_f32 v2, s[2:3], v0, v0, v2 ; encoding: [0x02,0x02,0x6d,0xd5,0x00,0x01,0x0a,0x04]
+
+v_div_scale_f64 v[2:3], s2, v[0:1], v[0:1], v[2:3]
+// GFX1032: v_div_scale_f64 v[2:3], s2, v[0:1], v[0:1], v[2:3] ; encoding: [0x02,0x02,0x6e,0xd5,0x00,0x01,0x0a,0x04]
+// GFX1064-ERR: error: invalid operand for instruction
+
+v_div_scale_f64 v[2:3], s[2:3], v[0:1], v[0:1], v[2:3]
+// GFX1032-ERR: error: invalid operand for instruction
+// GFX1064: v_div_scale_f64 v[2:3], s[2:3], v[0:1], v[0:1], v[2:3] ; encoding: [0x02,0x02,0x6e,0xd5,0x00,0x01,0x0a,0x04]
+
+v_mad_i64_i32 v[0:1], s6, v0, v1, v[2:3]
+// GFX1032: v_mad_i64_i32 v[0:1], s6, v0, v1, v[2:3] ; encoding: [0x00,0x06,0x77,0xd5,0x00,0x03,0x0a,0x04]
+// GFX1064-ERR: error: invalid operand for instruction
+
+v_mad_i64_i32 v[0:1], s[6:7], v0, v1, v[2:3]
+// GFX1032-ERR: error: invalid operand for instruction
+// GFX1064: v_mad_i64_i32 v[0:1], s[6:7], v0, v1, v[2:3] ; encoding: [0x00,0x06,0x77,0xd5,0x00,0x03,0x0a,0x04]
+
+v_mad_u64_u32 v[0:1], s6, v0, v1, v[2:3]
+// GFX1032: v_mad_u64_u32 v[0:1], s6, v0, v1, v[2:3] ; encoding: [0x00,0x06,0x76,0xd5,0x00,0x03,0x0a,0x04]
+// GFX1064-ERR: error: invalid operand for instruction
+
+v_mad_u64_u32 v[0:1], s[6:7], v0, v1, v[2:3]
+// GFX1032-ERR: error: invalid operand for instruction
+// GFX1064: v_mad_u64_u32 v[0:1], s[6:7], v0, v1, v[2:3] ; encoding: [0x00,0x06,0x76,0xd5,0x00,0x03,0x0a,0x04]
+
+v_cmpx_neq_f32_e32 v0, v1
+// GFX1032: v_cmpx_neq_f32_e32 v0, v1 ; encoding: [0x00,0x03,0x3a,0x7c]
+// GFX1064: v_cmpx_neq_f32_e32 v0, v1 ; encoding: [0x00,0x03,0x3a,0x7c]
+
+v_cmpx_neq_f32_sdwa v0, v1 src0_sel:WORD_1 src1_sel:DWORD
+// GFX1032: v_cmpx_neq_f32_sdwa v0, v1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0x3a,0x7c,0x00,0x00,0x05,0x06]
+// GFX1064: v_cmpx_neq_f32_sdwa v0, v1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0x3a,0x7c,0x00,0x00,0x05,0x06]
+
+v_cmpx_eq_u32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD
+// GFX1032: v_cmpx_eq_u32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0xa5,0x7d,0x00,0x00,0x05,0x86]
+// GFX1064: v_cmpx_eq_u32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0xa5,0x7d,0x00,0x00,0x05,0x86]
+
+v_cmpx_class_f32_e64 v0, 1
+// GFX1032: v_cmpx_class_f32_e64 v0, 1 ; encoding: [0x00,0x00,0x98,0xd4,0x00,0x03,0x01,0x00]
+// GFX1064: v_cmpx_class_f32_e64 v0, 1 ; encoding: [0x00,0x00,0x98,0xd4,0x00,0x03,0x01,0x00]
+
+v_cmpx_class_f32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD
+// GFX1032: v_cmpx_class_f32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0x31,0x7d,0x00,0x00,0x05,0x86]
+// GFX1064: v_cmpx_class_f32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0x31,0x7d,0x00,0x00,0x05,0x86]

Modified: llvm/trunk/test/MC/Disassembler/AMDGPU/gfx10-sgpr-max.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/Disassembler/AMDGPU/gfx10-sgpr-max.txt?rev=363934&r1=363933&r2=363934&view=diff
==============================================================================
--- llvm/trunk/test/MC/Disassembler/AMDGPU/gfx10-sgpr-max.txt (original)
+++ llvm/trunk/test/MC/Disassembler/AMDGPU/gfx10-sgpr-max.txt Thu Jun 20 08:08:34 2019
@@ -8,3 +8,9 @@
 
 # GFX10: s_mov_b32 s105, s104 ; encoding: [0x68,0x03,0xe9,0xbe]
 0x68,0x03,0xe9,0xbe
+
+# GFX10: v_cmp_eq_f32_e64 s105, v0, s105
+0x69,0x00,0x02,0xd4,0x00,0xd3,0x00,0x00
+
+# GFX10: v_cmp_eq_f32_sdwa s105, v0, s105 src0_sel:DWORD src1_sel:DWORD
+0xf9,0xd2,0x04,0x7c,0x00,0xe9,0x06,0x86

Added: llvm/trunk/test/MC/Disassembler/AMDGPU/wave32.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/Disassembler/AMDGPU/wave32.txt?rev=363934&view=auto
==============================================================================
--- llvm/trunk/test/MC/Disassembler/AMDGPU/wave32.txt (added)
+++ llvm/trunk/test/MC/Disassembler/AMDGPU/wave32.txt Thu Jun 20 08:08:34 2019
@@ -0,0 +1,164 @@
+# RUN: llvm-mc -arch=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -disassemble -show-encoding < %s | FileCheck -check-prefix=GFX1032 %s
+# RUN: llvm-mc -arch=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize64,-wavefrontsize32 -disassemble -show-encoding < %s | FileCheck -check-prefix=GFX1064 %s
+
+# GFX1032:   v_cmp_lt_f32_e32 vcc_lo, s2, v4
+# GFX1064:   v_cmp_lt_f32_e32 vcc, s2, v4
+0x02,0x08,0x02,0x7c
+
+# GFX1032:   v_cmp_ge_i32_e64 s2, s0, v2
+# GFX1064:   v_cmp_ge_i32_e64 s[2:3], s0, v2
+0x02,0x00,0x86,0xd4,0x00,0x04,0x02,0x00
+
+# GFX1032: v_cmp_ge_i32_sdwa vcc_lo, v0, v2 src0_sel:WORD_1 src1_sel:DWORD
+# GFX1064: v_cmp_ge_i32_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:DWORD
+0xf9,0x04,0x0c,0x7d,0x00,0x00,0x05,0x06
+
+# GFX1032: v_cmp_le_f16_sdwa s0, v3, v4 src0_sel:WORD_1 src1_sel:DWORD
+# GFX1064: v_cmp_le_f16_sdwa s[0:1], v3, v4 src0_sel:WORD_1 src1_sel:DWORD
+0xf9,0x08,0x96,0x7d,0x03,0x80,0x05,0x06
+
+# GFX1032: v_cmp_class_f32_e32 vcc_lo, s0, v0
+# GFX1064: v_cmp_class_f32_e32 vcc, s0, v0
+0x00,0x00,0x10,0x7d
+
+# GFX1032: v_cmp_class_f16_sdwa vcc_lo, v1, v2 src0_sel:DWORD src1_sel:DWORD
+# GFX1064: v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD
+0xf9,0x04,0x1e,0x7d,0x01,0x00,0x06,0x06
+
+# GFX1032: v_cmp_class_f16_sdwa s0, v1, v2 src0_sel:DWORD src1_sel:DWORD
+# GFX1064: v_cmp_class_f16_sdwa s[0:1], v1, v2 src0_sel:DWORD src1_sel:DWORD
+0xf9,0x04,0x1e,0x7d,0x01,0x80,0x06,0x06
+
+# GFX1032: v_cndmask_b32_e32 v5, 0, v2, vcc_lo
+# GFX1064: v_cndmask_b32_e32 v5, 0, v2, vcc ;
+0x80,0x04,0x0a,0x02
+
+# GFX1032: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+# GFX1064: v_cndmask_b32_e32 v1, v2, v3, vcc ;
+0x02,0x07,0x02,0x02
+
+# GFX1032: v_add_co_u32_e64 v2, vcc_lo, s0, v2
+# GFX1064: v_add_co_u32_e64 v2, vcc, s0, v2
+0x02,0x6a,0x0f,0xd7,0x00,0x04,0x02,0x00
+
+# GFX1032: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo
+# GFX1064: v_add_co_ci_u32_e32 v3, vcc, v3, v4, vcc ;
+0x03,0x09,0x06,0x50
+
+# GFX1032: v_sub_co_u32_e64 v2, vcc_lo, s0, v2
+# GFX1064: v_sub_co_u32_e64 v2, vcc, s0, v2
+0x02,0x6a,0x10,0xd7,0x00,0x04,0x02,0x00
+
+# GFX1032: v_subrev_co_u32_e64 v2, vcc_lo, s0, v2
+# GFX1064: v_subrev_co_u32_e64 v2, vcc, s0, v2
+0x02,0x6a,0x19,0xd7,0x00,0x04,0x02,0x00
+
+# GFX1032: v_sub_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo
+# GFX1064: v_sub_co_ci_u32_e32 v3, vcc, v3, v4, vcc ;
+0x03,0x09,0x06,0x52
+
+# GFX1032: v_subrev_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
+# GFX1064: v_subrev_co_ci_u32_e32 v1, vcc, 0, v1, vcc ;
+0x80,0x02,0x02,0x54
+
+# GFX1032: v_add_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo  dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+# GFX1064: v_add_co_ci_u32_sdwa v1, vcc, v1, v4, vcc  dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+0xf9,0x08,0x02,0x50,0x01,0x06,0x00,0x06
+
+# GFX1032: v_sub_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo  dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+# GFX1064: v_sub_co_ci_u32_sdwa v1, vcc, v1, v4, vcc  dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+0xf9,0x08,0x02,0x52,0x01,0x06,0x00,0x06
+
+# GFX1032: v_subrev_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo  dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+# GFX1064: v_subrev_co_ci_u32_sdwa v1, vcc, v1, v4, vcc  dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+0xf9,0x08,0x02,0x54,0x01,0x06,0x00,0x06
+
+# GFX1032: v_add_co_ci_u32_sdwa v1, vcc_lo, sext(v1), sext(v4), vcc_lo  dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+# GFX1064: v_add_co_ci_u32_sdwa v1, vcc, sext(v1), sext(v4), vcc  dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+0xf9,0x08,0x02,0x50,0x01,0x06,0x08,0x0e
+
+# GFX1032: v_add_nc_u32_dpp v5, v1, v2  quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+# GFX1064: v_add_nc_u32_dpp v5, v1, v2  quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+0xfa,0x04,0x0a,0x4a,0x01,0xe4,0x00,0x00
+
+# FIXME: Results in invalid v_subrev_u16_dpp which apparently has the same encoding but does not exist in GFX10
+
+# gfx1032: v_add_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo  quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+# gfx1064: v_add_co_ci_u32_dpp v5, vcc, v1, v2, vcc  quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+# 0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x00
+
+# FIXME: Results in v_mul_lo_u16_dpp
+
+# gfx1032: v_sub_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo  quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+# gfx1064: v_sub_co_ci_u32_dpp v5, vcc, v1, v2, vcc  quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+# 0xfa,0x04,0x0a,0x52,0x01,0xe4,0x00,0x00
+
+# FIXME: gives v_lshlrev_b16_dpp
+
+# gfx1032: v_subrev_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo  quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+# gfx1064: v_subrev_co_ci_u32_dpp v5, vcc, v1, v2, vcc  quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0
+# 0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0x00
+
+# GFX1032: v_add_co_u32_e64 v0, s0, v0, v2
+# GFX1064: v_add_co_u32_e64 v0, s[0:1], v0, v2
+0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00
+
+# GFX1032: v_add_co_ci_u32_e64 v4, s0, v1, v5, s2
+# GFX1064: v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3]
+0x04,0x00,0x28,0xd5,0x01,0x0b,0x0a,0x00
+
+# GFX1032: v_sub_co_u32_e64 v0, s0, v0, v2
+# GFX1064: v_sub_co_u32_e64 v0, s[0:1], v0, v2
+0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00
+
+# GFX1032: v_sub_co_ci_u32_e64 v4, s0, v1, v5, s2
+# GFX1064: v_sub_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3]
+0x04,0x00,0x29,0xd5,0x01,0x0b,0x0a,0x00
+
+# GFX1032: v_subrev_co_u32_e64 v0, s0, v0, v2
+# GFX1064: v_subrev_co_u32_e64 v0, s[0:1], v0, v2
+0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00
+
+# GFX1032: v_subrev_co_ci_u32_e64 v4, s0, v1, v5, s2
+# GFX1064: v_subrev_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3]
+0x04,0x00,0x2a,0xd5,0x01,0x0b,0x0a,0x00
+
+# GFX1032: v_add_co_ci_u32_e64 v4, vcc_lo, v1, v5, s2
+# GFX1064: v_add_co_ci_u32_e64 v4, vcc, v1, v5, s[2:3]
+0x04,0x6a,0x28,0xd5,0x01,0x0b,0x0a,0x00
+
+# GFX1032: v_add_co_ci_u32_e64 v4, s0, v1, v5, vcc_lo
+# GFX1064: v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, vcc ;
+0x04,0x00,0x28,0xd5,0x01,0x0b,0xaa,0x01
+
+# GFX1032: v_div_scale_f32 v2, s2, v0, v0, v2
+# GFX1064: v_div_scale_f32 v2, s[2:3], v0, v0, v2
+0x02,0x02,0x6d,0xd5,0x00,0x01,0x0a,0x04
+
+# GFX1032: v_div_scale_f64 v[2:3], s2, v[0:1], v[0:1], v[2:3]
+# GFX1064: v_div_scale_f64 v[2:3], s[2:3], v[0:1], v[0:1], v[2:3]
+0x02,0x02,0x6e,0xd5,0x00,0x01,0x0a,0x04
+
+# GFX1032: v_mad_i64_i32 v[0:1], s6, v0, v1, v[2:3]
+# GFX1064: v_mad_i64_i32 v[0:1], s[6:7], v0, v1, v[2:3]
+0x00,0x06,0x77,0xd5,0x00,0x03,0x0a,0x04
+
+# GFX1032: v_mad_u64_u32 v[0:1], s6, v0, v1, v[2:3]
+# GFX1064: v_mad_u64_u32 v[0:1], s[6:7], v0, v1, v[2:3]
+0x00,0x06,0x76,0xd5,0x00,0x03,0x0a,0x04
+
+# GFX1032: v_cmpx_neq_f32_e32 v0, v1
+# GFX1064: v_cmpx_neq_f32_e32 v0, v1
+0x00,0x03,0x3a,0x7c
+
+# GFX1032: v_cmpx_neq_f32_sdwa v0, v1 src0_sel:WORD_1 src1_sel:DWORD
+# GFX1064: v_cmpx_neq_f32_sdwa v0, v1 src0_sel:WORD_1 src1_sel:DWORD
+0xf9,0x02,0x3a,0x7c,0x00,0x00,0x05,0x06
+
+# GFX1032: v_cmpx_class_f32_e64 v0, 1
+# GFX1064: v_cmpx_class_f32_e64 v0, 1
+0x00,0x00,0x98,0xd4,0x00,0x03,0x01,0x00
+
+# GFX1032: v_cmpx_class_f32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD
+# GFX1064: v_cmpx_class_f32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD
+0xf9,0x02,0x31,0x7d,0x00,0x00,0x05,0x86




More information about the llvm-commits mailing list