[llvm] r296372 - AMDGPU: Fold omod into instructions

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 27 11:35:43 PST 2017


Author: arsenm
Date: Mon Feb 27 13:35:42 2017
New Revision: 296372

URL: http://llvm.org/viewvc/llvm-project?rev=296372&view=rev
Log:
AMDGPU: Fold omod into instructions

Added:
    llvm/trunk/test/CodeGen/AMDGPU/omod.ll
Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.h
    llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp
    llvm/trunk/test/CodeGen/AMDGPU/clamp-omod-special-case.mir

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp?rev=296372&r1=296371&r2=296372&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp Mon Feb 27 13:35:42 2017
@@ -20,7 +20,8 @@ AMDGPUMachineFunction::AMDGPUMachineFunc
   LDSSize(0),
   ABIArgOffset(0),
   IsKernel(MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_KERNEL ||
-           MF.getFunction()->getCallingConv() == CallingConv::SPIR_KERNEL) {
+           MF.getFunction()->getCallingConv() == CallingConv::SPIR_KERNEL),
+  NoSignedZerosFPMath(MF.getTarget().Options.NoSignedZerosFPMath) {
   // FIXME: Should initialize KernArgSize based on ExplicitKernelArgOffset,
   // except reserved size is not correctly aligned.
 }

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.h?rev=296372&r1=296371&r2=296372&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.h Mon Feb 27 13:35:42 2017
@@ -31,6 +31,7 @@ class AMDGPUMachineFunction : public Mac
   unsigned ABIArgOffset;
 
   bool IsKernel;
+  bool NoSignedZerosFPMath;
 
 public:
   AMDGPUMachineFunction(const MachineFunction &MF);
@@ -70,6 +71,10 @@ public:
     return IsKernel;
   }
 
+  bool hasNoSignedZerosFPMath() const {
+    return NoSignedZerosFPMath;
+  }
+
   unsigned allocateLDSGlobal(const DataLayout &DL, const GlobalValue &GV);
 };
 

Modified: llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp?rev=296372&r1=296371&r2=296372&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp Mon Feb 27 13:35:42 2017
@@ -12,6 +12,7 @@
 #include "AMDGPU.h"
 #include "AMDGPUSubtarget.h"
 #include "SIInstrInfo.h"
+#include "SIMachineFunctionInfo.h"
 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
 #include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -79,6 +80,9 @@ public:
   const MachineOperand *isClamp(const MachineInstr &MI) const;
   bool tryFoldClamp(MachineInstr &MI);
 
+  std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
+  bool tryFoldOMod(MachineInstr &MI);
+
 public:
   SIFoldOperands() : MachineFunctionPass(ID) {
     initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
@@ -135,7 +139,7 @@ FunctionPass *llvm::createSIFoldOperands
   return new SIFoldOperands();
 }
 
-static bool isSafeToFold(const MachineInstr &MI) {
+static bool isFoldableCopy(const MachineInstr &MI) {
   switch (MI.getOpcode()) {
   case AMDGPU::V_MOV_B32_e32:
   case AMDGPU::V_MOV_B32_e64:
@@ -731,7 +735,6 @@ static bool hasOneNonDBGUseInst(const Ma
   return true;
 }
 
-// FIXME: Does this need to check IEEE bit on function?
 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
   const MachineOperand *ClampSrc = isClamp(MI);
   if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
@@ -753,6 +756,128 @@ bool SIFoldOperands::tryFoldClamp(Machin
   return true;
 }
 
+static int getOModValue(unsigned Opc, int64_t Val) {
+  switch (Opc) {
+  case AMDGPU::V_MUL_F32_e64: {
+    switch (static_cast<uint32_t>(Val)) {
+    case 0x3f000000: // 0.5
+      return SIOutMods::DIV2;
+    case 0x40000000: // 2.0
+      return SIOutMods::MUL2;
+    case 0x40800000: // 4.0
+      return SIOutMods::MUL4;
+    default:
+      return SIOutMods::NONE;
+    }
+  }
+  case AMDGPU::V_MUL_F16_e64: {
+    switch (static_cast<uint16_t>(Val)) {
+    case 0x3800: // 0.5
+      return SIOutMods::DIV2;
+    case 0x4000: // 2.0
+      return SIOutMods::MUL2;
+    case 0x4400: // 4.0
+      return SIOutMods::MUL4;
+    default:
+      return SIOutMods::NONE;
+    }
+  }
+  default:
+    llvm_unreachable("invalid mul opcode");
+  }
+}
+
+// FIXME: Does this really not support denormals with f16?
+// FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
+// handled, so will anything other than that break?
+std::pair<const MachineOperand *, int>
+SIFoldOperands::isOMod(const MachineInstr &MI) const {
+  unsigned Op = MI.getOpcode();
+  switch (Op) {
+  case AMDGPU::V_MUL_F32_e64:
+  case AMDGPU::V_MUL_F16_e64: {
+    // If output denormals are enabled, omod is ignored.
+    if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
+        (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
+      return std::make_pair(nullptr, SIOutMods::NONE);
+
+    const MachineOperand *RegOp = nullptr;
+    const MachineOperand *ImmOp = nullptr;
+    const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
+    const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
+    if (Src0->isImm()) {
+      ImmOp = Src0;
+      RegOp = Src1;
+    } else if (Src1->isImm()) {
+      ImmOp = Src1;
+      RegOp = Src0;
+    } else
+      return std::make_pair(nullptr, SIOutMods::NONE);
+
+    int OMod = getOModValue(Op, ImmOp->getImm());
+    if (OMod == SIOutMods::NONE ||
+        TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
+        TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
+        TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
+        TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
+      return std::make_pair(nullptr, SIOutMods::NONE);
+
+    return std::make_pair(RegOp, OMod);
+  }
+  case AMDGPU::V_ADD_F32_e64:
+  case AMDGPU::V_ADD_F16_e64: {
+    // If output denormals are enabled, omod is ignored.
+    if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
+        (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
+      return std::make_pair(nullptr, SIOutMods::NONE);
+
+    // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
+    const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
+    const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
+
+    if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
+        Src0->getSubReg() == Src1->getSubReg() &&
+        !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
+        !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
+        !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
+        !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
+      return std::make_pair(Src0, SIOutMods::MUL2);
+
+    return std::make_pair(nullptr, SIOutMods::NONE);
+  }
+  default:
+    return std::make_pair(nullptr, SIOutMods::NONE);
+  }
+}
+
+// FIXME: Does this need to check IEEE bit on function?
+bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
+  const MachineOperand *RegOp;
+  int OMod;
+  std::tie(RegOp, OMod) = isOMod(MI);
+  if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
+      RegOp->getSubReg() != AMDGPU::NoSubRegister ||
+      !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
+    return false;
+
+  MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
+  MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
+  if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
+    return false;
+
+  // Clamp is applied after omod. If the source already has clamp set, don't
+  // fold it.
+  if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
+    return false;
+
+  DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
+
+  DefOMod->setImm(OMod);
+  MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
+  MI.eraseFromParent();
+  return true;
+}
+
 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
   if (skipFunction(*MF.getFunction()))
     return false;
@@ -762,6 +887,15 @@ bool SIFoldOperands::runOnMachineFunctio
   TII = ST->getInstrInfo();
   TRI = &TII->getRegisterInfo();
 
+  const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
+
+  // omod is ignored by hardware if IEEE bit is enabled. omod also does not
+  // correctly handle signed zeros.
+  //
+  // TODO: Check nsz on instructions when fast math flags are preserved to MI
+  // level.
+  bool IsIEEEMode = ST->enableIEEEBit(MF) || !MFI->hasNoSignedZerosFPMath();
+
   for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
        BI != BE; ++BI) {
 
@@ -771,9 +905,9 @@ bool SIFoldOperands::runOnMachineFunctio
       Next = std::next(I);
       MachineInstr &MI = *I;
 
-      if (!isSafeToFold(MI)) {
-        // TODO: Try omod also.
-        tryFoldClamp(MI);
+      if (!isFoldableCopy(MI)) {
+        if (IsIEEEMode || !tryFoldOMod(MI))
+          tryFoldClamp(MI);
         continue;
       }
 

Modified: llvm/trunk/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/clamp-omod-special-case.mir?rev=296372&r1=296371&r2=296372&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/clamp-omod-special-case.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/clamp-omod-special-case.mir Mon Feb 27 13:35:42 2017
@@ -1,13 +1,31 @@
 # RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-fold-operands  %s -o - | FileCheck -check-prefix=GCN %s
 --- |
-  define amdgpu_kernel void @v_max_self_clamp_not_set_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) {
+  define amdgpu_ps void @v_max_self_clamp_not_set_f32() #0 {
     ret void
   }
 
-  define amdgpu_kernel void @v_clamp_omod_already_set_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) {
+  define amdgpu_ps void @v_clamp_omod_already_set_f32() #0 {
     ret void
   }
 
+  define amdgpu_ps void @v_omod_mul_omod_already_set_f32() #0 {
+    ret void
+  }
+
+  define amdgpu_ps void @v_omod_mul_clamp_already_set_f32() #0 {
+    ret void
+  }
+
+  define amdgpu_ps void @v_omod_add_omod_already_set_f32() #0 {
+    ret void
+  }
+
+  define amdgpu_ps void @v_omod_add_clamp_already_set_f32() #0 {
+    ret void
+  }
+
+  attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" }
+
 ...
 ---
 # GCN-LABEL: name: v_max_self_clamp_not_set_f32
@@ -133,3 +151,274 @@ body:             |
     BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
     S_ENDPGM
 ...
+---
+# Don't fold a mul that looks like an omod if itself has omod set
+
+# GCN-LABEL: name: v_omod_mul_omod_already_set_f32
+# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+# GCN-NEXT: %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit %exec
+name:            v_omod_mul_omod_already_set_f32
+tracksRegLiveness: true
+registers:
+  - { id: 0, class: sgpr_64 }
+  - { id: 1, class: sreg_32_xm0 }
+  - { id: 2, class: sgpr_32 }
+  - { id: 3, class: vgpr_32 }
+  - { id: 4, class: sreg_64_xexec }
+  - { id: 5, class: sreg_64_xexec }
+  - { id: 6, class: sreg_32 }
+  - { id: 7, class: sreg_32 }
+  - { id: 8, class: sreg_32_xm0 }
+  - { id: 9, class: sreg_64 }
+  - { id: 10, class: sreg_32_xm0 }
+  - { id: 11, class: sreg_32_xm0 }
+  - { id: 12, class: sgpr_64 }
+  - { id: 13, class: sgpr_128 }
+  - { id: 14, class: sreg_32_xm0 }
+  - { id: 15, class: sreg_64 }
+  - { id: 16, class: sgpr_128 }
+  - { id: 17, class: vgpr_32 }
+  - { id: 18, class: vreg_64 }
+  - { id: 19, class: vgpr_32 }
+  - { id: 20, class: vgpr_32 }
+  - { id: 21, class: vgpr_32 }
+  - { id: 22, class: vgpr_32 }
+  - { id: 23, class: vreg_64 }
+  - { id: 24, class: vgpr_32 }
+  - { id: 25, class: vreg_64 }
+  - { id: 26, class: vreg_64 }
+liveins:
+  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '%vgpr0', virtual-reg: '%3' }
+body:             |
+  bb.0 (%ir-block.0):
+    liveins: %sgpr0_sgpr1, %vgpr0
+
+    %3 = COPY %vgpr0
+    %0 = COPY %sgpr0_sgpr1
+    %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %25 = REG_SEQUENCE %3, 1, %24, 2
+    %10 = S_MOV_B32 61440
+    %11 = S_MOV_B32 0
+    %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
+    %13 = REG_SEQUENCE killed %5, 17, %12, 18
+    %14 = S_MOV_B32 2
+    %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+    %16 = REG_SEQUENCE killed %4, 17, %12, 18
+    %18 = COPY %26
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+    %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit %exec
+    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    S_ENDPGM
+
+...
+---
+# Don't fold a mul that looks like an omod if itself has clamp set
+# This might be OK, but would require folding the clamp at the same time.
+# GCN-LABEL: name: v_omod_mul_clamp_already_set_f32
+# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+# GCN-NEXT: %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit %exec
+
+name:            v_omod_mul_clamp_already_set_f32
+tracksRegLiveness: true
+registers:
+  - { id: 0, class: sgpr_64 }
+  - { id: 1, class: sreg_32_xm0 }
+  - { id: 2, class: sgpr_32 }
+  - { id: 3, class: vgpr_32 }
+  - { id: 4, class: sreg_64_xexec }
+  - { id: 5, class: sreg_64_xexec }
+  - { id: 6, class: sreg_32 }
+  - { id: 7, class: sreg_32 }
+  - { id: 8, class: sreg_32_xm0 }
+  - { id: 9, class: sreg_64 }
+  - { id: 10, class: sreg_32_xm0 }
+  - { id: 11, class: sreg_32_xm0 }
+  - { id: 12, class: sgpr_64 }
+  - { id: 13, class: sgpr_128 }
+  - { id: 14, class: sreg_32_xm0 }
+  - { id: 15, class: sreg_64 }
+  - { id: 16, class: sgpr_128 }
+  - { id: 17, class: vgpr_32 }
+  - { id: 18, class: vreg_64 }
+  - { id: 19, class: vgpr_32 }
+  - { id: 20, class: vgpr_32 }
+  - { id: 21, class: vgpr_32 }
+  - { id: 22, class: vgpr_32 }
+  - { id: 23, class: vreg_64 }
+  - { id: 24, class: vgpr_32 }
+  - { id: 25, class: vreg_64 }
+  - { id: 26, class: vreg_64 }
+liveins:
+  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '%vgpr0', virtual-reg: '%3' }
+body:             |
+  bb.0 (%ir-block.0):
+    liveins: %sgpr0_sgpr1, %vgpr0
+
+    %3 = COPY %vgpr0
+    %0 = COPY %sgpr0_sgpr1
+    %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %25 = REG_SEQUENCE %3, 1, %24, 2
+    %10 = S_MOV_B32 61440
+    %11 = S_MOV_B32 0
+    %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
+    %13 = REG_SEQUENCE killed %5, 17, %12, 18
+    %14 = S_MOV_B32 2
+    %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+    %16 = REG_SEQUENCE killed %4, 17, %12, 18
+    %18 = COPY %26
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+    %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit %exec
+    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    S_ENDPGM
+
+...
+
+
+
+
+
+
+
+
+
+
+
+
+
+---
+# Don't fold a mul that looks like an omod if itself has omod set
+
+# GCN-LABEL: name: v_omod_add_omod_already_set_f32
+# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+# GCN-NEXT: %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit %exec
+name:            v_omod_add_omod_already_set_f32
+tracksRegLiveness: true
+registers:
+  - { id: 0, class: sgpr_64 }
+  - { id: 1, class: sreg_32_xm0 }
+  - { id: 2, class: sgpr_32 }
+  - { id: 3, class: vgpr_32 }
+  - { id: 4, class: sreg_64_xexec }
+  - { id: 5, class: sreg_64_xexec }
+  - { id: 6, class: sreg_32 }
+  - { id: 7, class: sreg_32 }
+  - { id: 8, class: sreg_32_xm0 }
+  - { id: 9, class: sreg_64 }
+  - { id: 10, class: sreg_32_xm0 }
+  - { id: 11, class: sreg_32_xm0 }
+  - { id: 12, class: sgpr_64 }
+  - { id: 13, class: sgpr_128 }
+  - { id: 14, class: sreg_32_xm0 }
+  - { id: 15, class: sreg_64 }
+  - { id: 16, class: sgpr_128 }
+  - { id: 17, class: vgpr_32 }
+  - { id: 18, class: vreg_64 }
+  - { id: 19, class: vgpr_32 }
+  - { id: 20, class: vgpr_32 }
+  - { id: 21, class: vgpr_32 }
+  - { id: 22, class: vgpr_32 }
+  - { id: 23, class: vreg_64 }
+  - { id: 24, class: vgpr_32 }
+  - { id: 25, class: vreg_64 }
+  - { id: 26, class: vreg_64 }
+liveins:
+  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '%vgpr0', virtual-reg: '%3' }
+body:             |
+  bb.0 (%ir-block.0):
+    liveins: %sgpr0_sgpr1, %vgpr0
+
+    %3 = COPY %vgpr0
+    %0 = COPY %sgpr0_sgpr1
+    %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %25 = REG_SEQUENCE %3, 1, %24, 2
+    %10 = S_MOV_B32 61440
+    %11 = S_MOV_B32 0
+    %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
+    %13 = REG_SEQUENCE killed %5, 17, %12, 18
+    %14 = S_MOV_B32 2
+    %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+    %16 = REG_SEQUENCE killed %4, 17, %12, 18
+    %18 = COPY %26
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+    %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit %exec
+    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    S_ENDPGM
+
+...
+---
+# Don't fold a mul that looks like an omod if itself has clamp set
+# This might be OK, but would require folding the clamp at the same time.
+# GCN-LABEL: name: v_omod_add_clamp_already_set_f32
+# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+# GCN-NEXT: %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit %exec
+
+name:            v_omod_add_clamp_already_set_f32
+tracksRegLiveness: true
+registers:
+  - { id: 0, class: sgpr_64 }
+  - { id: 1, class: sreg_32_xm0 }
+  - { id: 2, class: sgpr_32 }
+  - { id: 3, class: vgpr_32 }
+  - { id: 4, class: sreg_64_xexec }
+  - { id: 5, class: sreg_64_xexec }
+  - { id: 6, class: sreg_32 }
+  - { id: 7, class: sreg_32 }
+  - { id: 8, class: sreg_32_xm0 }
+  - { id: 9, class: sreg_64 }
+  - { id: 10, class: sreg_32_xm0 }
+  - { id: 11, class: sreg_32_xm0 }
+  - { id: 12, class: sgpr_64 }
+  - { id: 13, class: sgpr_128 }
+  - { id: 14, class: sreg_32_xm0 }
+  - { id: 15, class: sreg_64 }
+  - { id: 16, class: sgpr_128 }
+  - { id: 17, class: vgpr_32 }
+  - { id: 18, class: vreg_64 }
+  - { id: 19, class: vgpr_32 }
+  - { id: 20, class: vgpr_32 }
+  - { id: 21, class: vgpr_32 }
+  - { id: 22, class: vgpr_32 }
+  - { id: 23, class: vreg_64 }
+  - { id: 24, class: vgpr_32 }
+  - { id: 25, class: vreg_64 }
+  - { id: 26, class: vreg_64 }
+liveins:
+  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '%vgpr0', virtual-reg: '%3' }
+body:             |
+  bb.0 (%ir-block.0):
+    liveins: %sgpr0_sgpr1, %vgpr0
+
+    %3 = COPY %vgpr0
+    %0 = COPY %sgpr0_sgpr1
+    %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %25 = REG_SEQUENCE %3, 1, %24, 2
+    %10 = S_MOV_B32 61440
+    %11 = S_MOV_B32 0
+    %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
+    %13 = REG_SEQUENCE killed %5, 17, %12, 18
+    %14 = S_MOV_B32 2
+    %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+    %16 = REG_SEQUENCE killed %4, 17, %12, 18
+    %18 = COPY %26
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
+    %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit %exec
+    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    S_ENDPGM
+
+...

Added: llvm/trunk/test/CodeGen/AMDGPU/omod.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/omod.ll?rev=296372&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/omod.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/omod.ll Mon Feb 27 13:35:42 2017
@@ -0,0 +1,286 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; IEEE bit enabled for compute kernel, no shouldn't use.
+; GCN-LABEL: {{^}}v_omod_div2_f32_enable_ieee_signed_zeros:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}}
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
+define amdgpu_kernel void @v_omod_div2_f32_enable_ieee_signed_zeros(float addrspace(1)* %out, float addrspace(1)* %aptr) #4 {
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+  %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+  %a = load float, float addrspace(1)* %gep0
+  %add = fadd float %a, 1.0
+  %div2 = fmul float %add, 0.5
+  store float %div2, float addrspace(1)* %out.gep
+  ret void
+}
+
+; IEEE bit enabled for compute kernel, no shouldn't use even though nsz is allowed
+; GCN-LABEL: {{^}}v_omod_div2_f32_enable_ieee_nsz:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}}
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
+define amdgpu_kernel void @v_omod_div2_f32_enable_ieee_nsz(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 {
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
+  %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+  %a = load float, float addrspace(1)* %gep0
+  %add = fadd float %a, 1.0
+  %div2 = fmul float %add, 0.5
+  store float %div2, float addrspace(1)* %out.gep
+  ret void
+}
+
+; Only allow without IEEE bit if signed zeros are significant.
+; GCN-LABEL: {{^}}v_omod_div2_f32_signed_zeros:
+; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
+define amdgpu_ps void @v_omod_div2_f32_signed_zeros(float %a) #4 {
+  %add = fadd float %a, 1.0
+  %div2 = fmul float %add, 0.5
+  store float %div2, float addrspace(1)* undef
+  ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_div2_f32:
+; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 div:2{{$}}
+define amdgpu_ps void @v_omod_div2_f32(float %a) #0 {
+  %add = fadd float %a, 1.0
+  %div2 = fmul float %add, 0.5
+  store float %div2, float addrspace(1)* undef
+  ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_mul2_f32:
+; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 mul:2{{$}}
+define amdgpu_ps void @v_omod_mul2_f32(float %a) #0 {
+  %add = fadd float %a, 1.0
+  %div2 = fmul float %add, 2.0
+  store float %div2, float addrspace(1)* undef
+  ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_mul4_f32:
+; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 mul:4{{$}}
+define amdgpu_ps void @v_omod_mul4_f32(float %a) #0 {
+  %add = fadd float %a, 1.0
+  %div2 = fmul float %add, 4.0
+  store float %div2, float addrspace(1)* undef
+  ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_mul4_multi_use_f32:
+; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 4.0, [[ADD]]{{$}}
+define amdgpu_ps void @v_omod_mul4_multi_use_f32(float %a) #0 {
+  %add = fadd float %a, 1.0
+  %div2 = fmul float %add, 4.0
+  store float %div2, float addrspace(1)* undef
+  store volatile float %add, float addrspace(1)* undef
+  ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_mul4_dbg_use_f32:
+; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 mul:4{{$}}
+define amdgpu_ps void @v_omod_mul4_dbg_use_f32(float %a) #0 {
+  %add = fadd float %a, 1.0
+  call void @llvm.dbg.value(metadata float %add, i64 0, metadata !4, metadata !9), !dbg !10
+  %div2 = fmul float %add, 4.0
+  store float %div2, float addrspace(1)* undef
+  ret void
+}
+
+; Clamp is applied after omod, folding both into instruction is OK.
+; GCN-LABEL: {{^}}v_clamp_omod_div2_f32:
+; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 clamp div:2{{$}}
+define amdgpu_ps void @v_clamp_omod_div2_f32(float %a) #0 {
+  %add = fadd float %a, 1.0
+  %div2 = fmul float %add, 0.5
+
+  %max = call float @llvm.maxnum.f32(float %div2, float 0.0)
+  %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
+  store float %clamp, float addrspace(1)* undef
+  ret void
+}
+
+; Cannot fold omod into clamp
+; GCN-LABEL: {{^}}v_omod_div2_clamp_f32:
+; GCN: v_add_f32_e64 [[ADD:v[0-9]+]], v0, 1.0 clamp{{$}}
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
+define amdgpu_ps void @v_omod_div2_clamp_f32(float %a) #0 {
+  %add = fadd float %a, 1.0
+  %max = call float @llvm.maxnum.f32(float %add, float 0.0)
+  %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
+  %div2 = fmul float %clamp, 0.5
+  store float %div2, float addrspace(1)* undef
+  ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_div2_abs_src_f32:
+; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
+; GCN: v_mul_f32_e64 v{{[0-9]+}}, |[[ADD]]|, 0.5{{$}}
+define amdgpu_ps void @v_omod_div2_abs_src_f32(float %a) #0 {
+  %add = fadd float %a, 1.0
+  %abs.add = call float @llvm.fabs.f32(float %add)
+  %div2 = fmul float %abs.add, 0.5
+  store float %div2, float addrspace(1)* undef
+  ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_add_self_clamp_f32:
+; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, v0 clamp{{$}}
+define amdgpu_ps void @v_omod_add_self_clamp_f32(float %a) #0 {
+  %add = fadd float %a, %a
+  %max = call float @llvm.maxnum.f32(float %add, float 0.0)
+  %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
+  store float %clamp, float addrspace(1)* undef
+  ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_add_clamp_self_f32:
+; GCN: v_max_f32_e64 [[CLAMP:v[0-9]+]], v0, v0 clamp{{$}}
+; GCN: v_add_f32_e32 v{{[0-9]+}}, [[CLAMP]], [[CLAMP]]{{$}}
+define amdgpu_ps void @v_omod_add_clamp_self_f32(float %a) #0 {
+  %max = call float @llvm.maxnum.f32(float %a, float 0.0)
+  %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
+  %add = fadd float %clamp, %clamp
+  store float %add, float addrspace(1)* undef
+  ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_add_abs_self_f32:
+; GCN: v_add_f32_e32 [[X:v[0-9]+]], 1.0, v0
+; GCN: v_add_f32_e64 v{{[0-9]+}}, |[[X]]|, |[[X]]|{{$}}
+define amdgpu_ps void @v_omod_add_abs_self_f32(float %a) #0 {
+  %x = fadd float %a, 1.0
+  %abs.x = call float @llvm.fabs.f32(float %x)
+  %add = fadd float %abs.x, %abs.x
+  store float %add, float addrspace(1)* undef
+  ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_add_abs_x_x_f32:
+
+; GCN: v_add_f32_e32 [[X:v[0-9]+]], 1.0, v0
+; GCN: v_add_f32_e64 v{{[0-9]+}}, |[[X]]|, [[X]]{{$}}
+define amdgpu_ps void @v_omod_add_abs_x_x_f32(float %a) #0 {
+  %x = fadd float %a, 1.0
+  %abs.x = call float @llvm.fabs.f32(float %x)
+  %add = fadd float %abs.x, %x
+  store float %add, float addrspace(1)* undef
+  ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_add_x_abs_x_f32:
+; GCN: v_add_f32_e32 [[X:v[0-9]+]], 1.0, v0
+; GCN: v_add_f32_e64 v{{[0-9]+}}, [[X]], |[[X]]|{{$}}
+define amdgpu_ps void @v_omod_add_x_abs_x_f32(float %a) #0 {
+  %x = fadd float %a, 1.0
+  %abs.x = call float @llvm.fabs.f32(float %x)
+  %add = fadd float %x, %abs.x
+  store float %add, float addrspace(1)* undef
+  ret void
+}
+
+; Don't fold omod into omod into another omod.
+; GCN-LABEL: {{^}}v_omod_div2_omod_div2_f32:
+; GCN: v_add_f32_e64 [[ADD:v[0-9]+]], v0, 1.0 div:2{{$}}
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
+define amdgpu_ps void @v_omod_div2_omod_div2_f32(float %a) #0 {
+  %add = fadd float %a, 1.0
+  %div2.0 = fmul float %add, 0.5
+  %div2.1 = fmul float %div2.0, 0.5
+  store float %div2.1, float addrspace(1)* undef
+  ret void
+}
+
+; Don't fold omod if denorms enabled
+; GCN-LABEL: {{^}}v_omod_div2_f32_denormals:
+; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
+; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
+define amdgpu_ps void @v_omod_div2_f32_denormals(float %a) #2 {
+  %add = fadd float %a, 1.0
+  %div2 = fmul float %add, 0.5
+  store float %div2, float addrspace(1)* undef
+  ret void
+}
+
+; Don't fold omod if denorms enabled for add form.
+; GCN-LABEL: {{^}}v_omod_mul2_f32_denormals:
+; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
+; GCN: v_add_f32_e32 v{{[0-9]+}}, [[ADD]], [[ADD]]{{$}}
+define amdgpu_ps void @v_omod_mul2_f32_denormals(float %a) #2 {
+  %add = fadd float %a, 1.0
+  %mul2 = fadd float %add, %add
+  store float %mul2, float addrspace(1)* undef
+  ret void
+}
+
+; Don't fold omod if denorms enabled
+; GCN-LABEL: {{^}}v_omod_div2_f16_denormals:
+; VI: v_add_f16_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
+; VI: v_mul_f16_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}}
+define amdgpu_ps void @v_omod_div2_f16_denormals(half %a) #0 {
+  %add = fadd half %a, 1.0
+  %div2 = fmul half %add, 0.5
+  store half %div2, half addrspace(1)* undef
+  ret void
+}
+
+; Don't fold omod if denorms enabled for add form.
+; GCN-LABEL: {{^}}v_omod_mul2_f16_denormals:
+; VI: v_add_f16_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}}
+; VI: v_add_f16_e32 v{{[0-9]+}}, [[ADD]], [[ADD]]{{$}}
+define amdgpu_ps void @v_omod_mul2_f16_denormals(half %a) #0 {
+  %add = fadd half %a, 1.0
+  %mul2 = fadd half %add, %add
+  store half %mul2, half addrspace(1)* undef
+  ret void
+}
+
+; GCN-LABEL: {{^}}v_omod_div2_f16_no_denormals:
+; VI-NOT: v0
+; VI: v_add_f16_e64 [[ADD:v[0-9]+]], v0, 1.0 div:2{{$}}
+define amdgpu_ps void @v_omod_div2_f16_no_denormals(half %a) #3 {
+  %add = fadd half %a, 1.0
+  %div2 = fmul half %add, 0.5
+  store half %div2, half addrspace(1)* undef
+  ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+declare float @llvm.fabs.f32(float) #1
+declare float @llvm.floor.f32(float) #1
+declare float @llvm.minnum.f32(float, float) #1
+declare float @llvm.maxnum.f32(float, float) #1
+declare float @llvm.amdgcn.fmed3.f32(float, float, float) #1
+declare double @llvm.fabs.f64(double) #1
+declare double @llvm.minnum.f64(double, double) #1
+declare double @llvm.maxnum.f64(double, double) #1
+declare half @llvm.fabs.f16(half) #1
+declare half @llvm.minnum.f16(half, half) #1
+declare half @llvm.maxnum.f16(half, half) #1
+declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1
+
+attributes #0 = { nounwind "no-signed-zeros-fp-math"="true" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind "target-features"="+fp32-denormals" "no-signed-zeros-fp-math"="true" }
+attributes #3 = { nounwind "target-features"="-fp64-fp16-denormals" "no-signed-zeros-fp-math"="true" }
+attributes #4 = { nounwind "no-signed-zeros-fp-math"="false" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, isOptimized: true, runtimeVersion: 0, emissionKind: NoDebug)
+!1 = !DIFile(filename: "/tmp/foo.cl", directory: "/dev/null")
+!2 = !{i32 2, !"Dwarf Version", i32 4}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !DILocalVariable(name: "add", arg: 1, scope: !5, file: !1, line: 1)
+!5 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: true, unit: !0)
+!6 = !DISubroutineType(types: !7)
+!7 = !{null, !8}
+!8 = !DIBasicType(name: "float", size: 32, align: 32)
+!9 = !DIExpression()
+!10 = !DILocation(line: 1, column: 42, scope: !5)




More information about the llvm-commits mailing list