[PATCH] R600/SI: Make private pointers be 32-bit.
Matt Arsenault
Matthew.Arsenault at amd.com
Mon Dec 16 13:47:21 PST 2013
On 12/16/2013 01:45 PM, Tom Stellard wrote:
> On Mon, Dec 16, 2013 at 12:53:13PM -0800, Matt Arsenault wrote:
>> Different sized address spaces should theoretically work most of the time now,
>> and since 64-bit add is currently disabled, using more 32-bit pointers fixes some cases.
>>
>> http://llvm-reviews.chandlerc.com/D2418
>>
>> Files:
>> lib/Target/R600/AMDGPUTargetMachine.cpp
>> lib/Target/R600/SIISelLowering.cpp
>> lib/Target/R600/SIInstrInfo.td
>> lib/Target/R600/SIInstructions.td
>> test/CodeGen/R600/array-ptr-calc-i32.ll
>>
>> Index: lib/Target/R600/AMDGPUTargetMachine.cpp
>> ===================================================================
>> --- lib/Target/R600/AMDGPUTargetMachine.cpp
>> +++ lib/Target/R600/AMDGPUTargetMachine.cpp
>> @@ -52,12 +52,12 @@
>> static std::string computeDataLayout(const AMDGPUSubtarget &ST) {
>> std::string Ret = "e";
>>
>> - if (!ST.is64bit())
>> + if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
>> + // 32-bit private, local, and region pointers. 64-bit global and constant.
>> + Ret += "-p:32:32-p1:64:64:64-p2:64:64:64-p3:32:32-p4:32:32:32-p5:64:64:64";
>> + } else if (!ST.is64bit())
>> Ret += "-p:32:32";
> You can factor the -p:32:32 out of the branch, otherwise LGTM. Do you
> have a patch to update the DataLayout in clang too?
>
> -Tom
No, I forgot about clang. I'll do that next. It would probably be a good
idea to send this through your tests. I haven't managed to get mesa to
build yet
>
>>
>> - if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)
>> - Ret += "-p3:32:32";
>> -
>> Ret += "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256"
>> "-v512:512-v1024:1024-v2048:2048-n32:64";
>>
>> Index: lib/Target/R600/SIISelLowering.cpp
>> ===================================================================
>> --- lib/Target/R600/SIISelLowering.cpp
>> +++ lib/Target/R600/SIISelLowering.cpp
>> @@ -137,7 +137,7 @@
>> setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
>>
>> setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
>> - setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
>> + setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
>>
>> setTargetDAGCombine(ISD::SELECT_CC);
>>
>> @@ -704,9 +704,7 @@
>> if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
>> return SDValue();
>>
>> - SDValue TruncPtr = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32,
>> - Load->getBasePtr(), DAG.getConstant(0, MVT::i32));
>> - SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, TruncPtr,
>> + SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
>> DAG.getConstant(2, MVT::i32));
>>
>> SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
>> @@ -793,8 +791,7 @@
>> if (Store->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
>> return SDValue();
>>
>> - SDValue TruncPtr = DAG.getZExtOrTrunc(Store->getBasePtr(), DL, MVT::i32);
>> - SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, TruncPtr,
>> + SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Store->getBasePtr(),
>> DAG.getConstant(2, MVT::i32));
>> SDValue Chain = Store->getChain();
>> SmallVector<SDValue, 8> Values;
>> Index: lib/Target/R600/SIInstrInfo.td
>> ===================================================================
>> --- lib/Target/R600/SIInstrInfo.td
>> +++ lib/Target/R600/SIInstrInfo.td
>> @@ -121,7 +121,7 @@
>> return false;
>> }]>;
>>
>> -def FRAMEri64 : Operand<iPTR> {
>> +def FRAMEri32 : Operand<iPTR> {
>> let MIOperandInfo = (ops SReg_32:$ptr, i32imm:$index);
>> }
>>
>> Index: lib/Target/R600/SIInstructions.td
>> ===================================================================
>> --- lib/Target/R600/SIInstructions.td
>> +++ lib/Target/R600/SIInstructions.td
>> @@ -1328,13 +1328,13 @@
>>
>> let Uses = [EXEC], Defs = [EXEC,VCC,M0] in {
>>
>> -//defm SI_ : RegisterLoadStore <VReg_32, FRAMEri64, ADDRIndirect>;
>> +//defm SI_ : RegisterLoadStore <VReg_32, FRAMEri, ADDRIndirect>;
>>
>> let UseNamedOperandTable = 1 in {
>>
>> def SI_RegisterLoad : AMDGPUShaderInst <
>> - (outs VReg_32:$dst, SReg_64:$temp),
>> - (ins FRAMEri64:$addr, i32imm:$chan),
>> + (outs VReg_32:$dst, SReg_32:$temp),
>> + (ins FRAMEri32:$addr, i32imm:$chan),
>> "", []
>> > {
>> let isRegisterLoad = 1;
>> @@ -1343,7 +1343,7 @@
>>
>> class SIRegStore<dag outs> : AMDGPUShaderInst <
>> outs,
>> - (ins VReg_32:$val, FRAMEri64:$addr, i32imm:$chan),
>> + (ins VReg_32:$val, FRAMEri32:$addr, i32imm:$chan),
>> "", []
>> > {
>> let isRegisterStore = 1;
>> @@ -1353,20 +1353,20 @@
>> let usesCustomInserter = 1 in {
>> def SI_RegisterStorePseudo : SIRegStore<(outs)>;
>> } // End usesCustomInserter = 1
>> -def SI_RegisterStore : SIRegStore<(outs SReg_64:$temp)>;
>> +def SI_RegisterStore : SIRegStore<(outs SReg_32:$temp)>;
>>
>>
>> } // End UseNamedOperandTable = 1
>>
>> def SI_INDIRECT_SRC : InstSI <
>> - (outs VReg_32:$dst, SReg_64:$temp),
>> + (outs VReg_32:$dst, SReg_32:$temp),
>> (ins unknown:$src, VSrc_32:$idx, i32imm:$off),
>> "SI_INDIRECT_SRC $dst, $temp, $src, $idx, $off",
>> []
>> >;
>>
>> class SI_INDIRECT_DST<RegisterClass rc> : InstSI <
>> - (outs rc:$dst, SReg_64:$temp),
>> + (outs rc:$dst, SReg_32:$temp),
>> (ins unknown:$src, VSrc_32:$idx, i32imm:$off, VReg_32:$val),
>> "SI_INDIRECT_DST $dst, $temp, $src, $idx, $off, $val",
>> []
>> Index: test/CodeGen/R600/array-ptr-calc-i32.ll
>> ===================================================================
>> --- /dev/null
>> +++ test/CodeGen/R600/array-ptr-calc-i32.ll
>> @@ -0,0 +1,31 @@
>> +; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
>> +
>> +declare i32 @llvm.SI.tid() nounwind readnone
>> +declare void @llvm.AMDGPU.barrier.local() nounwind noduplicate
>> +
>> +; The required pointer calculations for the alloca'd actually requires
>> +; an add and won't be folded into the addressing, which fails with a
>> +; 64-bit pointer add. This should work since private pointers should
>> +; be 32-bits.
>> +
>> +; SI-LABEL: @test_private_array_ptr_calc:
>> +; SI: V_ADD_I32_e32 [[PTRREG:v[0-9]+]]
>> +; SI: V_MOVRELD_B32_e32 {{v[0-9]+}}, [[PTRREG]]
>> +define void @test_private_array_ptr_calc(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) {
>> + %alloca = alloca [4 x i32], i32 4, align 16
>> + %tid = call i32 @llvm.SI.tid() readnone
>> + %a_ptr = getelementptr i32 addrspace(1)* %inA, i32 %tid
>> + %b_ptr = getelementptr i32 addrspace(1)* %inB, i32 %tid
>> + %a = load i32 addrspace(1)* %a_ptr
>> + %b = load i32 addrspace(1)* %b_ptr
>> + %result = add i32 %a, %b
>> + %alloca_ptr = getelementptr [4 x i32]* %alloca, i32 1, i32 %b
>> + store i32 %result, i32* %alloca_ptr, align 4
>> + ; Dummy call
>> + call void @llvm.AMDGPU.barrier.local() nounwind noduplicate
>> + %reload = load i32* %alloca_ptr, align 4
>> + %out_ptr = getelementptr i32 addrspace(1)* %out, i32 %tid
>> + store i32 %reload, i32 addrspace(1)* %out_ptr, align 4
>> + ret void
>> +}
>> +
>> Index: lib/Target/R600/AMDGPUTargetMachine.cpp
>> ===================================================================
>> --- lib/Target/R600/AMDGPUTargetMachine.cpp
>> +++ lib/Target/R600/AMDGPUTargetMachine.cpp
>> @@ -52,12 +52,12 @@
>> static std::string computeDataLayout(const AMDGPUSubtarget &ST) {
>> std::string Ret = "e";
>>
>> - if (!ST.is64bit())
>> + if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
>> + // 32-bit private, local, and region pointers. 64-bit global and constant.
>> + Ret += "-p:32:32-p1:64:64:64-p2:64:64:64-p3:32:32-p4:32:32:32-p5:64:64:64";
>> + } else if (!ST.is64bit())
>> Ret += "-p:32:32";
>>
>> - if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)
>> - Ret += "-p3:32:32";
>> -
>> Ret += "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256"
>> "-v512:512-v1024:1024-v2048:2048-n32:64";
>>
>> Index: lib/Target/R600/SIISelLowering.cpp
>> ===================================================================
>> --- lib/Target/R600/SIISelLowering.cpp
>> +++ lib/Target/R600/SIISelLowering.cpp
>> @@ -137,7 +137,7 @@
>> setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
>>
>> setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
>> - setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
>> + setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
>>
>> setTargetDAGCombine(ISD::SELECT_CC);
>>
>> @@ -704,9 +704,7 @@
>> if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
>> return SDValue();
>>
>> - SDValue TruncPtr = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32,
>> - Load->getBasePtr(), DAG.getConstant(0, MVT::i32));
>> - SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, TruncPtr,
>> + SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
>> DAG.getConstant(2, MVT::i32));
>>
>> SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
>> @@ -793,8 +791,7 @@
>> if (Store->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
>> return SDValue();
>>
>> - SDValue TruncPtr = DAG.getZExtOrTrunc(Store->getBasePtr(), DL, MVT::i32);
>> - SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, TruncPtr,
>> + SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Store->getBasePtr(),
>> DAG.getConstant(2, MVT::i32));
>> SDValue Chain = Store->getChain();
>> SmallVector<SDValue, 8> Values;
>> Index: lib/Target/R600/SIInstrInfo.td
>> ===================================================================
>> --- lib/Target/R600/SIInstrInfo.td
>> +++ lib/Target/R600/SIInstrInfo.td
>> @@ -121,7 +121,7 @@
>> return false;
>> }]>;
>>
>> -def FRAMEri64 : Operand<iPTR> {
>> +def FRAMEri32 : Operand<iPTR> {
>> let MIOperandInfo = (ops SReg_32:$ptr, i32imm:$index);
>> }
>>
>> Index: lib/Target/R600/SIInstructions.td
>> ===================================================================
>> --- lib/Target/R600/SIInstructions.td
>> +++ lib/Target/R600/SIInstructions.td
>> @@ -1328,13 +1328,13 @@
>>
>> let Uses = [EXEC], Defs = [EXEC,VCC,M0] in {
>>
>> -//defm SI_ : RegisterLoadStore <VReg_32, FRAMEri64, ADDRIndirect>;
>> +//defm SI_ : RegisterLoadStore <VReg_32, FRAMEri, ADDRIndirect>;
>>
>> let UseNamedOperandTable = 1 in {
>>
>> def SI_RegisterLoad : AMDGPUShaderInst <
>> - (outs VReg_32:$dst, SReg_64:$temp),
>> - (ins FRAMEri64:$addr, i32imm:$chan),
>> + (outs VReg_32:$dst, SReg_32:$temp),
>> + (ins FRAMEri32:$addr, i32imm:$chan),
>> "", []
>> > {
>> let isRegisterLoad = 1;
>> @@ -1343,7 +1343,7 @@
>>
>> class SIRegStore<dag outs> : AMDGPUShaderInst <
>> outs,
>> - (ins VReg_32:$val, FRAMEri64:$addr, i32imm:$chan),
>> + (ins VReg_32:$val, FRAMEri32:$addr, i32imm:$chan),
>> "", []
>> > {
>> let isRegisterStore = 1;
>> @@ -1353,20 +1353,20 @@
>> let usesCustomInserter = 1 in {
>> def SI_RegisterStorePseudo : SIRegStore<(outs)>;
>> } // End usesCustomInserter = 1
>> -def SI_RegisterStore : SIRegStore<(outs SReg_64:$temp)>;
>> +def SI_RegisterStore : SIRegStore<(outs SReg_32:$temp)>;
>>
>>
>> } // End UseNamedOperandTable = 1
>>
>> def SI_INDIRECT_SRC : InstSI <
>> - (outs VReg_32:$dst, SReg_64:$temp),
>> + (outs VReg_32:$dst, SReg_32:$temp),
>> (ins unknown:$src, VSrc_32:$idx, i32imm:$off),
>> "SI_INDIRECT_SRC $dst, $temp, $src, $idx, $off",
>> []
>> >;
>>
>> class SI_INDIRECT_DST<RegisterClass rc> : InstSI <
>> - (outs rc:$dst, SReg_64:$temp),
>> + (outs rc:$dst, SReg_32:$temp),
>> (ins unknown:$src, VSrc_32:$idx, i32imm:$off, VReg_32:$val),
>> "SI_INDIRECT_DST $dst, $temp, $src, $idx, $off, $val",
>> []
>> Index: test/CodeGen/R600/array-ptr-calc-i32.ll
>> ===================================================================
>> --- /dev/null
>> +++ test/CodeGen/R600/array-ptr-calc-i32.ll
>> @@ -0,0 +1,31 @@
>> +; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
>> +
>> +declare i32 @llvm.SI.tid() nounwind readnone
>> +declare void @llvm.AMDGPU.barrier.local() nounwind noduplicate
>> +
>> +; The required pointer calculations for the alloca'd actually requires
>> +; an add and won't be folded into the addressing, which fails with a
>> +; 64-bit pointer add. This should work since private pointers should
>> +; be 32-bits.
>> +
>> +; SI-LABEL: @test_private_array_ptr_calc:
>> +; SI: V_ADD_I32_e32 [[PTRREG:v[0-9]+]]
>> +; SI: V_MOVRELD_B32_e32 {{v[0-9]+}}, [[PTRREG]]
>> +define void @test_private_array_ptr_calc(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) {
>> + %alloca = alloca [4 x i32], i32 4, align 16
>> + %tid = call i32 @llvm.SI.tid() readnone
>> + %a_ptr = getelementptr i32 addrspace(1)* %inA, i32 %tid
>> + %b_ptr = getelementptr i32 addrspace(1)* %inB, i32 %tid
>> + %a = load i32 addrspace(1)* %a_ptr
>> + %b = load i32 addrspace(1)* %b_ptr
>> + %result = add i32 %a, %b
>> + %alloca_ptr = getelementptr [4 x i32]* %alloca, i32 1, i32 %b
>> + store i32 %result, i32* %alloca_ptr, align 4
>> + ; Dummy call
>> + call void @llvm.AMDGPU.barrier.local() nounwind noduplicate
>> + %reload = load i32* %alloca_ptr, align 4
>> + %out_ptr = getelementptr i32 addrspace(1)* %out, i32 %tid
>> + store i32 %reload, i32 addrspace(1)* %out_ptr, align 4
>> + ret void
>> +}
>> +
>> _______________________________________________
>> llvm-commits mailing list
>> llvm-commits at cs.uiuc.edu
>> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>
More information about the llvm-commits
mailing list