PATCH: R600/SI Prevent illegal VGPR to SGPR copies

Tom Stellard tom at stellard.net
Fri Aug 2 13:08:03 PDT 2013


Hi,

The attached patches should get around most of the issues we've been
having with illegal VGPR to SGPR copies being emitted by the compiler.
There is now a pass called SIFixSGPRCopies that we can add more
cases to if we run into any more problems in the future.

-Tom
-------------- next part --------------
>From 9abfde81624308c0d281d06aed227ac6b062a18c Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Tue, 18 Jun 2013 15:27:23 -0700
Subject: [PATCH] R600/SI: Add support for v4i32 and v4f32 kernel args

---
 lib/Target/R600/AMDGPUCallingConv.td    |  9 +++++----
 test/CodeGen/R600/128bit-kernel-args.ll | 16 ++++++++++------
 2 files changed, 15 insertions(+), 10 deletions(-)

diff --git a/lib/Target/R600/AMDGPUCallingConv.td b/lib/Target/R600/AMDGPUCallingConv.td
index 84e4f3a..826932b 100644
--- a/lib/Target/R600/AMDGPUCallingConv.td
+++ b/lib/Target/R600/AMDGPUCallingConv.td
@@ -38,10 +38,11 @@ def CC_SI : CallingConv<[
 
 // Calling convention for SI compute kernels
 def CC_SI_Kernel : CallingConv<[
-  CCIfType<[i64],      CCAssignToStack <8, 4>>,
-  CCIfType<[i32, f32], CCAssignToStack <4, 4>>,
-  CCIfType<[i16],      CCAssignToStack <2, 4>>,
-  CCIfType<[i8],       CCAssignToStack <1, 4>>
+  CCIfType<[v4i32, v4f32], CCAssignToStack <16, 4>>,
+  CCIfType<[i64],          CCAssignToStack < 8, 4>>,
+  CCIfType<[i32, f32],     CCAssignToStack < 4, 4>>,
+  CCIfType<[i16],          CCAssignToStack < 2, 4>>,
+  CCIfType<[i8],           CCAssignToStack < 1, 4>>
 ]>;
 
 def CC_AMDGPU : CallingConv<[
diff --git a/test/CodeGen/R600/128bit-kernel-args.ll b/test/CodeGen/R600/128bit-kernel-args.ll
index 114f9e7..bd60385 100644
--- a/test/CodeGen/R600/128bit-kernel-args.ll
+++ b/test/CodeGen/R600/128bit-kernel-args.ll
@@ -1,16 +1,20 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-
-; CHECK: @v4i32_kernel_arg
-; CHECK: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 40
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
+; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK
 
+; R600-CHECK: @v4i32_kernel_arg
+; R600-CHECK: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 40
+; SI-CHECK: @v4i32_kernel_arg
+; SI-CHECK: BUFFER_STORE_DWORDX4
 define void @v4i32_kernel_arg(<4 x i32> addrspace(1)* %out, <4 x i32>  %in) {
 entry:
   store <4 x i32> %in, <4 x i32> addrspace(1)* %out
   ret void
 }
 
-; CHECK: @v4f32_kernel_arg
-; CHECK: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 40
+; R600-CHECK: @v4f32_kernel_arg
+; R600-CHECK: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 40
+; SI-CHECK: @v4f32_kernel_arg
+; SI-CHECK: BUFFER_STORE_DWORDX4
 define void @v4f32_kernel_args(<4 x float> addrspace(1)* %out, <4 x float>  %in) {
 entry:
   store <4 x float> %in, <4 x float> addrspace(1)* %out
-- 
1.7.11.4

-------------- next part --------------
>From 643d57c07c0617f9e11d5fd7ff919d73a6f5649a Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Fri, 12 Jul 2013 21:24:05 -0700
Subject: [PATCH 2/2] R600/SI: Use VSrc_* register classes as the default
 classes for types

Since the VSrc_* register classes contain both VGPRs and SGPRs, copies
that used be emitted by isel like this:

SGPR = COPY VGPR

Will now be emitted like this:

VSrC = COPY VGPR

This patch also adds a pass that tries to identify and fix situations where
a VGPR to SGPR copy may occur.  Hopefully, these changes will make it
impossible for the compiler to generate illegal VGPR to SGPR copies.
---
 lib/Target/R600/AMDGPU.h                |   1 +
 lib/Target/R600/AMDGPUISelDAGToDAG.cpp  |  24 +----
 lib/Target/R600/AMDGPUTargetMachine.cpp |   2 +
 lib/Target/R600/SIFixSGPRCopies.cpp     | 152 ++++++++++++++++++++++++++++++++
 lib/Target/R600/SIISelLowering.cpp      |  28 ++----
 test/CodeGen/R600/sgpr-copy.ll          |  84 ++++++++++++++++++
 6 files changed, 247 insertions(+), 44 deletions(-)
 create mode 100644 lib/Target/R600/SIFixSGPRCopies.cpp
 create mode 100644 test/CodeGen/R600/sgpr-copy.ll

diff --git a/lib/Target/R600/AMDGPU.h b/lib/Target/R600/AMDGPU.h
index 51d0d3c..6b374cb 100644
--- a/lib/Target/R600/AMDGPU.h
+++ b/lib/Target/R600/AMDGPU.h
@@ -36,6 +36,7 @@ FunctionPass *createAMDGPUCFGStructurizerPass(TargetMachine &tm);
 // SI Passes
 FunctionPass *createSIAnnotateControlFlowPass();
 FunctionPass *createSILowerControlFlowPass(TargetMachine &tm);
+FunctionPass *createSIFixSGPRCopiesPass(TargetMachine &tm);
 FunctionPass *createSICodeEmitterPass(formatted_raw_ostream &OS);
 FunctionPass *createSIInsertWaits(TargetMachine &tm);
 
diff --git a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
index 38a5f24..f222901 100644
--- a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
+++ b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
@@ -302,7 +302,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
       SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
       SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
     } else if (N->getValueType(0) == MVT::i64) {
-      RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
+      RC = CurDAG->getTargetConstant(AMDGPU::VSrc_64RegClassID, MVT::i32);
       SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
       SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
     } else {
@@ -816,28 +816,6 @@ void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
        E = CurDAG->allnodes_end(); I != E; ++I) {
 
     SDNode *Node = I;
-    switch (Node->getOpcode()) {
-    // Fix the register class in copy to CopyToReg nodes - ISel will always
-    // use SReg classes for 64-bit copies, but this is not always what we want.
-    case ISD::CopyToReg: {
-      unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
-      SDValue Val = Node->getOperand(2);
-      const TargetRegisterClass *RC = RegInfo->getRegClass(Reg);
-      if (RC != &AMDGPU::SReg_64RegClass) {
-        continue;
-      }
-
-      if (!Val.getNode()->isMachineOpcode() ||
-          Val.getNode()->getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
-        continue;
-      }
-
-      const MCInstrDesc Desc = TM.getInstrInfo()->get(Val.getNode()->getMachineOpcode());
-      const TargetRegisterInfo *TRI = TM.getRegisterInfo();
-      RegInfo->setRegClass(Reg, TRI->getRegClass(Desc.OpInfo[0].RegClass));
-      continue;
-    }
-    }
 
     MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
     if (!MachineNode)
diff --git a/lib/Target/R600/AMDGPUTargetMachine.cpp b/lib/Target/R600/AMDGPUTargetMachine.cpp
index 33e2dae..89c4641 100644
--- a/lib/Target/R600/AMDGPUTargetMachine.cpp
+++ b/lib/Target/R600/AMDGPUTargetMachine.cpp
@@ -146,6 +146,8 @@ bool AMDGPUPassConfig::addPreRegAlloc() {
 
   if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
     addPass(createR600VectorRegMerger(*TM));
+  } else {
+    addPass(createSIFixSGPRCopiesPass(*TM));
   }
   return false;
 }
diff --git a/lib/Target/R600/SIFixSGPRCopies.cpp b/lib/Target/R600/SIFixSGPRCopies.cpp
new file mode 100644
index 0000000..435172a
--- /dev/null
+++ b/lib/Target/R600/SIFixSGPRCopies.cpp
@@ -0,0 +1,152 @@
+//===-- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies --------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// Copies from VGPR to SGPR registers are illegal and the register coalescer
+/// will sometimes generate these illegal copies in situations like this:
+///
+///  Register Class <vsrc> is the union of <vgpr> and <sgpr>
+///
+/// BB0:
+///   %vreg0 <sgpr> = SCALAR_INST
+///   %vreg1 <vsrc> = COPY %vreg0 <sgpr>
+///    ...
+///    BRANCH %cond BB1, BB2
+///  BB1:
+///    %vreg2 <vgpr> = VECTOR_INST
+///    %vreg3 <vsrc> = COPY %vreg2 <vgpr>
+///  BB2:
+///    %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
+///    %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc> 
+///
+/// 
+/// The coalescer will begin at BB0 and eliminate its copy, then the resulting
+/// code will look like this:
+///
+/// BB0:
+///   %vreg0 <sgpr> = SCALAR_INST
+///    ...
+///    BRANCH %cond BB1, BB2
+/// BB1:
+///   %vreg2 <vgpr> = VECTOR_INST
+///   %vreg3 <vsrc> = COPY %vreg2 <vgpr>
+/// BB2:
+///   %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
+///   %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
+///
+/// Now that the result of the PHI instruction is an SGPR, the register
+/// allocator is now forced to constrain the register class of %vreg3 to
+/// <sgpr> so we end up with final code like this:
+/// 
+/// BB0:
+///   %vreg0 <sgpr> = SCALAR_INST
+///    ...
+///    BRANCH %cond BB1, BB2
+/// BB1:
+///   %vreg2 <vgpr> = VECTOR_INST
+///   %vreg3 <sgpr> = COPY %vreg2 <vgpr>
+/// BB2:
+///   %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
+///   %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
+///
+/// Now this code contains an illegal copy from a VGPR to an SGPR. 
+///
+/// In order to avoid this problem, this pass searches for PHI instructions
+/// which define a <vsrc> register and constrains its definition class to
+/// <vgpr> if the user of the PHI's definition register is a vector instruction.
+/// If the PHI's definition class is constrained to <vgpr> then the coalescer
+/// will be unable to perform the COPY removal from the above example  which
+/// ultimately led to the creation of an illegal COPY.
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "SIInstrInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+namespace {
+
+class SIFixSGPRCopies : public MachineFunctionPass {
+
+private:
+  static char ID;
+  const TargetRegisterClass *inferRegClass(const TargetRegisterInfo *TRI,
+                                           const MachineRegisterInfo &MRI,
+                                           unsigned Reg) const;
+
+public:
+  SIFixSGPRCopies(TargetMachine &tm) : MachineFunctionPass(ID) { }
+
+  virtual bool runOnMachineFunction(MachineFunction &MF);
+
+  const char *getPassName() const {
+    return "SI Fix SGPR copies";
+  }
+
+};
+
+} // End anonymous namespace
+
+char SIFixSGPRCopies::ID = 0;
+
+FunctionPass *llvm::createSIFixSGPRCopiesPass(TargetMachine &tm) {
+  return new SIFixSGPRCopies(tm);
+}
+
+/// This functions walks the use/def chains starting with the definition of
+/// \p Reg until it finds an Instruction that isn't a COPY returns
+/// the register class of that instruction.
+const TargetRegisterClass *SIFixSGPRCopies::inferRegClass(
+                                                 const TargetRegisterInfo *TRI,
+                                                 const MachineRegisterInfo &MRI,
+                                                 unsigned Reg) const {
+  // The Reg parameter to the function must always be defined by either a PHI
+  // or a COPY, therefore it cannot be a physical register.
+  assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
+         "Reg cannot be a physical register");
+
+  const TargetRegisterClass *RC = MRI.getRegClass(Reg);
+  for (MachineRegisterInfo::use_iterator I = MRI.use_begin(Reg),
+                                         E = MRI.use_end(); I != E; ++I) {
+    switch (I->getOpcode()) {
+    case AMDGPU::COPY:
+      RC = TRI->getCommonSubClass(RC, inferRegClass(TRI, MRI,
+                                                    I->getOperand(0).getReg()));
+      break;
+    }
+  }
+
+  return RC;
+}
+
+bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
+  MachineRegisterInfo &MRI = MF.getRegInfo();
+  const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
+  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
+                                                  BI != BE; ++BI) {
+
+    MachineBasicBlock &MBB = *BI;
+    for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
+                                                      I != E; ++I) {
+      MachineInstr &MI = *I;
+      if (MI.getOpcode() != AMDGPU::PHI) {
+        continue;
+      }
+      unsigned Reg = MI.getOperand(0).getReg();
+      const TargetRegisterClass *RC = inferRegClass(TRI, MRI, Reg);
+      if (RC == &AMDGPU::VSrc_32RegClass) {
+        MRI.constrainRegClass(Reg, &AMDGPU::VReg_32RegClass);
+      }
+    }
+  }
+  return false;
+}
diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp
index a53e0b9..c64027f 100644
--- a/lib/Target/R600/SIISelLowering.cpp
+++ b/lib/Target/R600/SIISelLowering.cpp
@@ -32,7 +32,7 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
     AMDGPUTargetLowering(TM) {
 
   addRegisterClass(MVT::i1, &AMDGPU::SReg_64RegClass);
-  addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
+  addRegisterClass(MVT::i64, &AMDGPU::VSrc_64RegClass);
 
   addRegisterClass(MVT::v2i1, &AMDGPU::VReg_64RegClass);
   addRegisterClass(MVT::v4i1, &AMDGPU::VReg_128RegClass);
@@ -41,14 +41,14 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
   addRegisterClass(MVT::v32i8, &AMDGPU::SReg_256RegClass);
   addRegisterClass(MVT::v64i8, &AMDGPU::SReg_512RegClass);
 
-  addRegisterClass(MVT::i32, &AMDGPU::VReg_32RegClass);
-  addRegisterClass(MVT::f32, &AMDGPU::VReg_32RegClass);
+  addRegisterClass(MVT::i32, &AMDGPU::VSrc_32RegClass);
+  addRegisterClass(MVT::f32, &AMDGPU::VSrc_32RegClass);
 
-  addRegisterClass(MVT::v1i32, &AMDGPU::VReg_32RegClass);
+  addRegisterClass(MVT::v1i32, &AMDGPU::VSrc_32RegClass);
 
-  addRegisterClass(MVT::v2i32, &AMDGPU::VReg_64RegClass);
-  addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
-  addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
+  addRegisterClass(MVT::f64, &AMDGPU::VSrc_64RegClass);
+  addRegisterClass(MVT::v2i32, &AMDGPU::VSrc_64RegClass);
+  addRegisterClass(MVT::v2f32, &AMDGPU::VSrc_64RegClass);
 
   addRegisterClass(MVT::v4i32, &AMDGPU::VReg_128RegClass);
   addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
@@ -1042,20 +1042,6 @@ MachineSDNode *SITargetLowering::AdjustRegClass(MachineSDNode *N,
 
   switch (N->getMachineOpcode()) {
   default: return N;
-  case AMDGPU::REG_SEQUENCE: {
-    // MVT::i128 only use SGPRs, so i128 REG_SEQUENCEs don't need to be
-    // rewritten.
-    if (N->getValueType(0) == MVT::i128) {
-      return N;
-    }
-    const SDValue Ops[] = {
-      DAG.getTargetConstant(AMDGPU::VReg_64RegClassID, MVT::i32),
-      N->getOperand(1) , N->getOperand(2),
-      N->getOperand(3), N->getOperand(4)
-    };
-    return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::i64, Ops);
-  }
-
   case AMDGPU::S_LOAD_DWORD_IMM:
     NewOpcode = AMDGPU::BUFFER_LOAD_DWORD_ADDR64;
     // Fall-through
diff --git a/test/CodeGen/R600/sgpr-copy.ll b/test/CodeGen/R600/sgpr-copy.ll
new file mode 100644
index 0000000..b0d4549
--- /dev/null
+++ b/test/CodeGen/R600/sgpr-copy.ll
@@ -0,0 +1,84 @@
+; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s
+
+; This test checks that no VGPR to SGPR copies are created by the register
+; allocator.
+; CHECK: @main
+; CHECK: S_BUFFER_LOAD_DWORD [[DST:SGPR[0-9]]], {{[SGPR_[0-9]+}}, 0
+; CHECK: V_MOV_B32_e32 VGPR{{[0-9]}}, [[DST]]
+
+define void @main(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
+main_body:
+  %20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
+  %21 = load <16 x i8> addrspace(2)* %20, !tbaa !0
+  %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 0)
+  %23 = call float @llvm.SI.load.const(<16 x i8> %21, i32 16)
+  %24 = call float @llvm.SI.load.const(<16 x i8> %21, i32 32)
+  %25 = fptosi float %23 to i32
+  %26 = icmp ne i32 %25, 0
+  br i1 %26, label %ENDIF, label %ELSE
+
+ELSE:                                             ; preds = %main_body
+  %27 = fsub float -0.000000e+00, %22
+  br label %ENDIF
+
+ENDIF:                                            ; preds = %main_body, %ELSE
+  %temp.0 = phi float [ %27, %ELSE ], [ %22, %main_body ]
+  %28 = fadd float %temp.0, %24
+  call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %28, float %28, float 0.000000e+00, float 1.000000e+00)
+  ret void
+}
+
+; We just want ot make sure the program doesn't crash
+; CHECK: @loop
+
+define void @loop(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
+main_body:
+  %20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
+  %21 = load <16 x i8> addrspace(2)* %20, !tbaa !0
+  %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 0)
+  %23 = call float @llvm.SI.load.const(<16 x i8> %21, i32 4)
+  %24 = call float @llvm.SI.load.const(<16 x i8> %21, i32 8)
+  %25 = call float @llvm.SI.load.const(<16 x i8> %21, i32 12)
+  %26 = fptosi float %25 to i32
+  %27 = bitcast i32 %26 to float
+  %28 = bitcast float %27 to i32
+  br label %LOOP
+
+LOOP:                                             ; preds = %ENDIF, %main_body
+  %temp4.0 = phi float [ %22, %main_body ], [ %temp5.0, %ENDIF ]
+  %temp5.0 = phi float [ %23, %main_body ], [ %temp6.0, %ENDIF ]
+  %temp6.0 = phi float [ %24, %main_body ], [ %temp4.0, %ENDIF ]
+  %temp8.0 = phi float [ 0.000000e+00, %main_body ], [ %37, %ENDIF ]
+  %29 = bitcast float %temp8.0 to i32
+  %30 = icmp sge i32 %29, %28
+  %31 = sext i1 %30 to i32
+  %32 = bitcast i32 %31 to float
+  %33 = bitcast float %32 to i32
+  %34 = icmp ne i32 %33, 0
+  br i1 %34, label %IF, label %ENDIF
+
+IF:                                               ; preds = %LOOP
+  call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %temp4.0, float %temp5.0, float %temp6.0, float 1.000000e+00)
+  ret void
+
+ENDIF:                                            ; preds = %LOOP
+  %35 = bitcast float %temp8.0 to i32
+  %36 = add i32 %35, 1
+  %37 = bitcast i32 %36 to float
+  br label %LOOP
+}
+
+; Function Attrs: nounwind readnone
+declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+
+; Function Attrs: readonly
+declare float @fabs(float) #2
+
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { readonly }
+
+!0 = metadata !{metadata !"const", null, i32 1}
+
-- 
1.7.11.4



More information about the llvm-commits mailing list