[llvm] r291711 - AMDGPU: Fix folding immediates into mac src2

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 11 14:00:03 PST 2017


Author: arsenm
Date: Wed Jan 11 16:00:02 2017
New Revision: 291711

URL: http://llvm.org/viewvc/llvm-project?rev=291711&view=rev
Log:
AMDGPU: Fix folding immediates into mac src2

Whether it is legal or not needs to check for the instruction
it will be replaced with.

Modified:
    llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp
    llvm/trunk/test/CodeGen/AMDGPU/v_mac.ll

Modified: llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp?rev=291711&r1=291710&r2=291711&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp Wed Jan 11 16:00:02 2017
@@ -99,6 +99,34 @@ char SIFoldOperands::ID = 0;
 
 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
 
+// Wrapper around isInlineConstant that understands special cases when
+// instruction types are replaced during operand folding.
+static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
+                                     const MachineInstr &UseMI,
+                                     unsigned OpNo,
+                                     const MachineOperand &OpToFold) {
+  if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
+    return true;
+
+  unsigned Opc = UseMI.getOpcode();
+  switch (Opc) {
+  case AMDGPU::V_MAC_F32_e64:
+  case AMDGPU::V_MAC_F16_e64: {
+    // Special case for mac. Since this is replaced with mad when folded into
+    // src2, we need to check the legality for the final instruction.
+    int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
+    if (static_cast<int>(OpNo) == Src2Idx) {
+      bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
+      const MCInstrDesc &MadDesc
+        = TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
+      return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
+    }
+  }
+  default:
+    return false;
+  }
+}
+
 FunctionPass *llvm::createSIFoldOperandsPass() {
   return new SIFoldOperands();
 }
@@ -171,7 +199,7 @@ static bool tryAddToFoldList(SmallVector
     unsigned Opc = MI->getOpcode();
     if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64) &&
         (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
-      bool IsF32  = Opc == AMDGPU::V_MAC_F32_e64;
+      bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
 
       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
       // to fold the operand.
@@ -611,7 +639,7 @@ void SIFoldOperands::foldInstOperand(Mac
       // Folding immediates with more than one use will increase program size.
       // FIXME: This will also reduce register usage, which may be better
       // in some cases. A better heuristic is needed.
-      if (TII->isInlineConstant(*UseMI, OpNo, OpToFold)) {
+      if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
         foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
       } else {
         if (++NumLiteralUses == 1) {

Modified: llvm/trunk/test/CodeGen/AMDGPU/v_mac.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/v_mac.ll?rev=291711&r1=291710&r2=291711&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/v_mac.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/v_mac.ll Wed Jan 11 16:00:02 2017
@@ -212,5 +212,71 @@ entry:
   ret void
 }
 
+; Without special casing the inline constant check for v_mac_f32's
+; src2, this fails to fold the 1.0 into a mad.
+
+; GCN-LABEL: {{^}}fold_inline_imm_into_mac_src2_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+
+; GCN: v_add_f32_e32 [[TMP2:v[0-9]+]], [[A]], [[A]]
+; GCN: v_mad_f32 v{{[0-9]+}}, [[TMP2]], -4.0, 1.0
+define void @fold_inline_imm_into_mac_src2_f32(float addrspace(1)* %out, float addrspace(1)* %a, float addrspace(1)* %b) #3 {
+bb:
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %tid.ext = sext i32 %tid to i64
+  %gep.a = getelementptr inbounds float, float addrspace(1)* %a, i64 %tid.ext
+  %gep.b = getelementptr inbounds float, float addrspace(1)* %b, i64 %tid.ext
+  %gep.out = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+  %tmp = load volatile float, float addrspace(1)* %gep.a
+  %tmp1 = load volatile float, float addrspace(1)* %gep.b
+  %tmp2 = fadd float %tmp, %tmp
+  %tmp3 = fmul float %tmp2, 4.0
+  %tmp4 = fsub float 1.0, %tmp3
+  %tmp5 = fadd float %tmp4, %tmp1
+  %tmp6 = fadd float %tmp1, %tmp1
+  %tmp7 = fmul float %tmp6, %tmp
+  %tmp8 = fsub float 1.0, %tmp7
+  %tmp9 = fmul float %tmp8, 8.0
+  %tmp10 = fadd float %tmp5, %tmp9
+  store float %tmp10, float addrspace(1)* %gep.out
+  ret void
+}
+
+; GCN-LABEL: {{^}}fold_inline_imm_into_mac_src2_f16:
+; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_ushort [[B:v[0-9]+]]
+
+; FIXME: How is this not folded?
+; SI: v_cvt_f32_f16_e32 v{{[0-9]+}}, 0x3c00
+
+; VI: v_add_f16_e32 [[TMP2:v[0-9]+]], [[A]], [[A]]
+; VI: v_mad_f16 v{{[0-9]+}}, [[TMP2]], -4.0, 1.0
+define void @fold_inline_imm_into_mac_src2_f16(half addrspace(1)* %out, half addrspace(1)* %a, half addrspace(1)* %b) #3 {
+bb:
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %tid.ext = sext i32 %tid to i64
+  %gep.a = getelementptr inbounds half, half addrspace(1)* %a, i64 %tid.ext
+  %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
+  %gep.out = getelementptr inbounds half, half addrspace(1)* %out, i64 %tid.ext
+  %tmp = load volatile half, half addrspace(1)* %gep.a
+  %tmp1 = load volatile half, half addrspace(1)* %gep.b
+  %tmp2 = fadd half %tmp, %tmp
+  %tmp3 = fmul half %tmp2, 4.0
+  %tmp4 = fsub half 1.0, %tmp3
+  %tmp5 = fadd half %tmp4, %tmp1
+  %tmp6 = fadd half %tmp1, %tmp1
+  %tmp7 = fmul half %tmp6, %tmp
+  %tmp8 = fsub half 1.0, %tmp7
+  %tmp9 = fmul half %tmp8, 8.0
+  %tmp10 = fadd half %tmp5, %tmp9
+  store half %tmp10, half addrspace(1)* %gep.out
+  ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #2
+
 attributes #0 = { nounwind "unsafe-fp-math"="false" }
 attributes #1 = { nounwind "unsafe-fp-math"="true" }
+attributes #2 = { nounwind readnone }
+attributes #3 = { nounwind }




More information about the llvm-commits mailing list