[PATCH] R600: Custom lower frem

Matt Arsenault Matthew.Arsenault at amd.com
Fri Sep 12 10:22:39 PDT 2014


On 09/12/2014 01:19 PM, Jan Vesely wrote:
> Hi,
>
> do you think it makes sense to implement this in llvm instead of r600?
> see attached patch.
>
> thanks,
> jan
Yes, but it already had the libcall expansion. I think that expand 
libcall should be a distinct legalize action from expand for situations 
like this, but that would be a bigger change.

>
> On Wed, 2014-09-10 at 17:15 -0400, Tom Stellard wrote:
>> On Wed, Sep 10, 2014 at 08:55:52PM +0000, Matt Arsenault wrote:
>>> http://reviews.llvm.org/D5300
>> LGTM.
>>
>>> Files:
>>>    lib/Target/R600/AMDGPUISelLowering.cpp
>>>    lib/Target/R600/AMDGPUISelLowering.h
>>>    test/CodeGen/R600/frem.ll
>>> Index: lib/Target/R600/AMDGPUISelLowering.cpp
>>> ===================================================================
>>> --- lib/Target/R600/AMDGPUISelLowering.cpp
>>> +++ lib/Target/R600/AMDGPUISelLowering.cpp
>>> @@ -130,6 +130,9 @@
>>>     setOperationAction(ISD::FROUND, MVT::f32, Legal);
>>>     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
>>>   
>>> +  setOperationAction(ISD::FREM, MVT::f32, Custom);
>>> +  setOperationAction(ISD::FREM, MVT::f64, Custom);
>>> +
>>>     // Lower floating point store/load to integer store/load to reduce the number
>>>     // of patterns in tablegen.
>>>     setOperationAction(ISD::STORE, MVT::f32, Promote);
>>> @@ -347,6 +350,7 @@
>>>       setOperationAction(ISD::FDIV, VT, Expand);
>>>       setOperationAction(ISD::FEXP2, VT, Expand);
>>>       setOperationAction(ISD::FLOG2, VT, Expand);
>>> +    setOperationAction(ISD::FREM, VT, Expand);
>>>       setOperationAction(ISD::FPOW, VT, Expand);
>>>       setOperationAction(ISD::FFLOOR, VT, Expand);
>>>       setOperationAction(ISD::FTRUNC, VT, Expand);
>>> @@ -548,6 +552,7 @@
>>>     case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
>>>     case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
>>>     case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
>>> +  case ISD::FREM: return LowerFREM(Op, DAG);
>>>     case ISD::FCEIL: return LowerFCEIL(Op, DAG);
>>>     case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
>>>     case ISD::FRINT: return LowerFRINT(Op, DAG);
>>> @@ -1650,6 +1655,20 @@
>>>     return DAG.getMergeValues(Res, DL);
>>>   }
>>>   
>>> +// (frem x, y) -> (sub x, (mul (floor (div x, y)), y))
>>> +SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
>>> +  SDLoc SL(Op);
>>> +  EVT VT = Op.getValueType();
>>> +  SDValue X = Op.getOperand(0);
>>> +  SDValue Y = Op.getOperand(1);
>>> +
>>> +  SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y);
>>> +  SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div);
>>> +  SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y);
>>> +
>>> +  return DAG.getNode(ISD::FSUB, SL, VT, X, Mul);
>>> +}
>>> +
>>>   SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
>>>     SDLoc SL(Op);
>>>     SDValue Src = Op.getOperand(0);
>>> Index: lib/Target/R600/AMDGPUISelLowering.h
>>> ===================================================================
>>> --- lib/Target/R600/AMDGPUISelLowering.h
>>> +++ lib/Target/R600/AMDGPUISelLowering.h
>>> @@ -44,6 +44,7 @@
>>>     /// \returns The resulting chain.
>>>   
>>>     SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
>>> +  SDValue LowerFREM(SDValue Op, SelectionDAG &DAG) const;
>>>     SDValue LowerFCEIL(SDValue Op, SelectionDAG &DAG) const;
>>>     SDValue LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const;
>>>     SDValue LowerFRINT(SDValue Op, SelectionDAG &DAG) const;
>>> Index: test/CodeGen/R600/frem.ll
>>> ===================================================================
>>> --- /dev/null
>>> +++ test/CodeGen/R600/frem.ll
>>> @@ -0,0 +1,103 @@
>>> +; RUN: llc -march=r600 -mcpu=SI -enable-misched < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
>>> +
>>> +; FUNC-LABEL: @frem_f32:
>>> +; SI-DAG: BUFFER_LOAD_DWORD [[X:v[0-9]+]], {{.*$}}
>>> +; SI-DAG: BUFFER_LOAD_DWORD [[Y:v[0-9]+]], {{.*}} offset:0x10
>>> +; SI-DAG: V_CMP
>>> +; SI-DAG: V_MUL_F32
>>> +; SI: V_RCP_F32_e32
>>> +; SI: V_MUL_F32_e32
>>> +; SI: V_MUL_F32_e32
>>> +; SI: V_TRUNC_F32_e32
>>> +; SI: V_MAD_F32
>>> +; SI: S_ENDPGM
>>> +define void @frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
>>> +                      float addrspace(1)* %in2) #0 {
>>> +   %gep2 = getelementptr float addrspace(1)* %in2, i32 4
>>> +   %r0 = load float addrspace(1)* %in1, align 4
>>> +   %r1 = load float addrspace(1)* %gep2, align 4
>>> +   %r2 = frem float %r0, %r1
>>> +   store float %r2, float addrspace(1)* %out, align 4
>>> +   ret void
>>> +}
>>> +
>>> +; FUNC-LABEL: @unsafe_frem_f32:
>>> +; SI: BUFFER_LOAD_DWORD [[Y:v[0-9]+]], {{.*}} offset:0x10
>>> +; SI: BUFFER_LOAD_DWORD [[X:v[0-9]+]], {{.*}}
>>> +; SI: V_RCP_F32_e32 [[INVY:v[0-9]+]], [[Y]]
>>> +; SI: V_MUL_F32_e32 [[DIV:v[0-9]+]], [[INVY]], [[X]]
>>> +; SI: V_TRUNC_F32_e32 [[TRUNC:v[0-9]+]], [[DIV]]
>>> +; SI: V_MAD_F32 [[RESULT:v[0-9]+]], -[[TRUNC]], [[Y]], [[X]],
>>> +; SI: BUFFER_STORE_DWORD [[RESULT]]
>>> +; SI: S_ENDPGM
>>> +define void @unsafe_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
>>> +                             float addrspace(1)* %in2) #1 {
>>> +   %gep2 = getelementptr float addrspace(1)* %in2, i32 4
>>> +   %r0 = load float addrspace(1)* %in1, align 4
>>> +   %r1 = load float addrspace(1)* %gep2, align 4
>>> +   %r2 = frem float %r0, %r1
>>> +   store float %r2, float addrspace(1)* %out, align 4
>>> +   ret void
>>> +}
>>> +
>>> +; TODO: This should check something when f64 fdiv is implemented
>>> +; correctly
>>> +
>>> +; FUNC-LABEL: @frem_f64:
>>> +; SI: S_ENDPGM
>>> +define void @frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
>>> +                      double addrspace(1)* %in2) #0 {
>>> +   %r0 = load double addrspace(1)* %in1, align 8
>>> +   %r1 = load double addrspace(1)* %in2, align 8
>>> +   %r2 = frem double %r0, %r1
>>> +   store double %r2, double addrspace(1)* %out, align 8
>>> +   ret void
>>> +}
>>> +
>>> +; FUNC-LABEL: @unsafe_frem_f64:
>>> +; SI: V_RCP_F64_e32
>>> +; SI: V_MUL_F64
>>> +; SI: V_BFE_I32
>>> +; SI: V_FMA_F64
>>> +; SI: S_ENDPGM
>>> +define void @unsafe_frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
>>> +                             double addrspace(1)* %in2) #1 {
>>> +   %r0 = load double addrspace(1)* %in1, align 8
>>> +   %r1 = load double addrspace(1)* %in2, align 8
>>> +   %r2 = frem double %r0, %r1
>>> +   store double %r2, double addrspace(1)* %out, align 8
>>> +   ret void
>>> +}
>>> +
>>> +define void @frem_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in1,
>>> +                        <2 x float> addrspace(1)* %in2) #0 {
>>> +   %gep2 = getelementptr <2 x float> addrspace(1)* %in2, i32 4
>>> +   %r0 = load <2 x float> addrspace(1)* %in1, align 8
>>> +   %r1 = load <2 x float> addrspace(1)* %gep2, align 8
>>> +   %r2 = frem <2 x float> %r0, %r1
>>> +   store <2 x float> %r2, <2 x float> addrspace(1)* %out, align 8
>>> +   ret void
>>> +}
>>> +
>>> +define void @frem_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in1,
>>> +                        <4 x float> addrspace(1)* %in2) #0 {
>>> +   %gep2 = getelementptr <4 x float> addrspace(1)* %in2, i32 4
>>> +   %r0 = load <4 x float> addrspace(1)* %in1, align 16
>>> +   %r1 = load <4 x float> addrspace(1)* %gep2, align 16
>>> +   %r2 = frem <4 x float> %r0, %r1
>>> +   store <4 x float> %r2, <4 x float> addrspace(1)* %out, align 16
>>> +   ret void
>>> +}
>>> +
>>> +define void @frem_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
>>> +                        <2 x double> addrspace(1)* %in2) #0 {
>>> +   %gep2 = getelementptr <2 x double> addrspace(1)* %in2, i32 4
>>> +   %r0 = load <2 x double> addrspace(1)* %in1, align 16
>>> +   %r1 = load <2 x double> addrspace(1)* %gep2, align 16
>>> +   %r2 = frem <2 x double> %r0, %r1
>>> +   store <2 x double> %r2, <2 x double> addrspace(1)* %out, align 16
>>> +   ret void
>>> +}
>>> +
>>> +attributes #0 = { nounwind "unsafe-fp-math"="false" }
>>> +attributes #1 = { nounwind "unsafe-fp-math"="true" }
>>> _______________________________________________
>>> llvm-commits mailing list
>>> llvm-commits at cs.uiuc.edu
>>> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>> _______________________________________________
>> llvm-commits mailing list
>> llvm-commits at cs.uiuc.edu
>> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits




More information about the llvm-commits mailing list