[PATCH] R600/SI: Make private pointers be 32-bit.
Tom Stellard
tom at stellard.net
Wed Dec 18 12:44:24 PST 2013
On Tue, Dec 17, 2013 at 10:11:48AM -0800, Matt Arsenault wrote:
> Fix check for 64-bit. I don't think there's any particular reason to check for SI here.
>
> http://llvm-reviews.chandlerc.com/D2418
>
> CHANGE SINCE LAST DIFF
> http://llvm-reviews.chandlerc.com/D2418?vs=6128&id=6147#toc
>
I've found a few issues while testing this.
> Files:
> lib/Target/R600/AMDGPUTargetMachine.cpp
> lib/Target/R600/SIISelLowering.cpp
> lib/Target/R600/SIInstrInfo.td
> lib/Target/R600/SIInstructions.td
> test/CodeGen/R600/array-ptr-calc-i32.ll
>
> Index: lib/Target/R600/AMDGPUTargetMachine.cpp
> ===================================================================
> --- lib/Target/R600/AMDGPUTargetMachine.cpp
> +++ lib/Target/R600/AMDGPUTargetMachine.cpp
> @@ -50,13 +50,12 @@
> createR600MachineScheduler);
>
> static std::string computeDataLayout(const AMDGPUSubtarget &ST) {
> - std::string Ret = "e";
> + std::string Ret = "e-p32:32";
>
I had to replace p32:32 with p0:32:32 or else I would get an assertion
failure in the DataLayout parser. This is probably due to some of the
recent DataLayout changes.
> - if (!ST.is64bit())
> - Ret += "-p:32:32";
> -
> - if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)
> - Ret += "-p3:32:32";
> + if (ST.is64bit()) {
> + // 32-bit private, local, and region pointers. 64-bit global and constant.
> + Ret += "-p1:64:64:64-p2:64:64:64-p3:32:32-p4:32:32:32-p5:64:64:64";
> + }
>
> Ret += "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256"
> "-v512:512-v1024:1024-v2048:2048-n32:64";
> Index: lib/Target/R600/SIISelLowering.cpp
> ===================================================================
> --- lib/Target/R600/SIISelLowering.cpp
> +++ lib/Target/R600/SIISelLowering.cpp
> @@ -137,7 +137,7 @@
> setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
>
> setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
> - setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
> + setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
>
> setTargetDAGCombine(ISD::SELECT_CC);
>
> @@ -704,9 +704,7 @@
> if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
> return SDValue();
>
> - SDValue TruncPtr = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32,
> - Load->getBasePtr(), DAG.getConstant(0, MVT::i32));
> - SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, TruncPtr,
> + SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
> DAG.getConstant(2, MVT::i32));
>
> SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
> @@ -793,8 +791,7 @@
> if (Store->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
> return SDValue();
>
> - SDValue TruncPtr = DAG.getZExtOrTrunc(Store->getBasePtr(), DL, MVT::i32);
> - SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, TruncPtr,
> + SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Store->getBasePtr(),
> DAG.getConstant(2, MVT::i32));
> SDValue Chain = Store->getChain();
> SmallVector<SDValue, 8> Values;
> Index: lib/Target/R600/SIInstrInfo.td
> ===================================================================
> --- lib/Target/R600/SIInstrInfo.td
> +++ lib/Target/R600/SIInstrInfo.td
> @@ -121,7 +121,7 @@
> return false;
> }]>;
>
> -def FRAMEri64 : Operand<iPTR> {
> +def FRAMEri32 : Operand<iPTR> {
> let MIOperandInfo = (ops SReg_32:$ptr, i32imm:$index);
> }
>
> Index: lib/Target/R600/SIInstructions.td
> ===================================================================
> --- lib/Target/R600/SIInstructions.td
> +++ lib/Target/R600/SIInstructions.td
> @@ -1328,13 +1328,13 @@
>
> let Uses = [EXEC], Defs = [EXEC,VCC,M0] in {
>
> -//defm SI_ : RegisterLoadStore <VReg_32, FRAMEri64, ADDRIndirect>;
> +//defm SI_ : RegisterLoadStore <VReg_32, FRAMEri, ADDRIndirect>;
>
> let UseNamedOperandTable = 1 in {
>
> def SI_RegisterLoad : AMDGPUShaderInst <
> - (outs VReg_32:$dst, SReg_64:$temp),
> - (ins FRAMEri64:$addr, i32imm:$chan),
> + (outs VReg_32:$dst, SReg_32:$temp),
$temp is used for the EXEC mask, so it needs to be a 64-bit register.
> + (ins FRAMEri32:$addr, i32imm:$chan),
> "", []
> > {
> let isRegisterLoad = 1;
> @@ -1343,7 +1343,7 @@
>
> class SIRegStore<dag outs> : AMDGPUShaderInst <
> outs,
> - (ins VReg_32:$val, FRAMEri64:$addr, i32imm:$chan),
> + (ins VReg_32:$val, FRAMEri32:$addr, i32imm:$chan),
> "", []
> > {
> let isRegisterStore = 1;
> @@ -1353,20 +1353,20 @@
> let usesCustomInserter = 1 in {
> def SI_RegisterStorePseudo : SIRegStore<(outs)>;
> } // End usesCustomInserter = 1
> -def SI_RegisterStore : SIRegStore<(outs SReg_64:$temp)>;
> +def SI_RegisterStore : SIRegStore<(outs SReg_32:$temp)>;
>
Same here $temp needs to be 64-bit.
>
> } // End UseNamedOperandTable = 1
>
> def SI_INDIRECT_SRC : InstSI <
> - (outs VReg_32:$dst, SReg_64:$temp),
> + (outs VReg_32:$dst, SReg_32:$temp),
> (ins unknown:$src, VSrc_32:$idx, i32imm:$off),
> "SI_INDIRECT_SRC $dst, $temp, $src, $idx, $off",
> []
> >;
>
Here too.
> class SI_INDIRECT_DST<RegisterClass rc> : InstSI <
> - (outs rc:$dst, SReg_64:$temp),
> + (outs rc:$dst, SReg_32:$temp),
> (ins unknown:$src, VSrc_32:$idx, i32imm:$off, VReg_32:$val),
> "SI_INDIRECT_DST $dst, $temp, $src, $idx, $off, $val",
> []
And here.
> Index: test/CodeGen/R600/array-ptr-calc-i32.ll
> ===================================================================
> --- /dev/null
> +++ test/CodeGen/R600/array-ptr-calc-i32.ll
> @@ -0,0 +1,31 @@
> +; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
> +
> +declare i32 @llvm.SI.tid() nounwind readnone
> +declare void @llvm.AMDGPU.barrier.local() nounwind noduplicate
> +
> +; The required pointer calculations for the alloca'd actually requires
> +; an add and won't be folded into the addressing, which fails with a
> +; 64-bit pointer add. This should work since private pointers should
> +; be 32-bits.
> +
> +; SI-LABEL: @test_private_array_ptr_calc:
> +; SI: V_ADD_I32_e32 [[PTRREG:v[0-9]+]]
> +; SI: V_MOVRELD_B32_e32 {{v[0-9]+}}, [[PTRREG]]
> +define void @test_private_array_ptr_calc(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) {
> + %alloca = alloca [4 x i32], i32 4, align 16
> + %tid = call i32 @llvm.SI.tid() readnone
> + %a_ptr = getelementptr i32 addrspace(1)* %inA, i32 %tid
> + %b_ptr = getelementptr i32 addrspace(1)* %inB, i32 %tid
> + %a = load i32 addrspace(1)* %a_ptr
> + %b = load i32 addrspace(1)* %b_ptr
> + %result = add i32 %a, %b
> + %alloca_ptr = getelementptr [4 x i32]* %alloca, i32 1, i32 %b
> + store i32 %result, i32* %alloca_ptr, align 4
> + ; Dummy call
> + call void @llvm.AMDGPU.barrier.local() nounwind noduplicate
> + %reload = load i32* %alloca_ptr, align 4
> + %out_ptr = getelementptr i32 addrspace(1)* %out, i32 %tid
> + store i32 %reload, i32 addrspace(1)* %out_ptr, align 4
> + ret void
> +}
> +
> Index: lib/Target/R600/AMDGPUTargetMachine.cpp
> ===================================================================
> --- lib/Target/R600/AMDGPUTargetMachine.cpp
> +++ lib/Target/R600/AMDGPUTargetMachine.cpp
> @@ -50,13 +50,12 @@
> createR600MachineScheduler);
>
> static std::string computeDataLayout(const AMDGPUSubtarget &ST) {
> - std::string Ret = "e";
> + std::string Ret = "e-p32:32";
>
> - if (!ST.is64bit())
> - Ret += "-p:32:32";
> -
> - if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)
> - Ret += "-p3:32:32";
> + if (ST.is64bit()) {
> + // 32-bit private, local, and region pointers. 64-bit global and constant.
> + Ret += "-p1:64:64:64-p2:64:64:64-p3:32:32-p4:32:32:32-p5:64:64:64";
> + }
>
> Ret += "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256"
> "-v512:512-v1024:1024-v2048:2048-n32:64";
> Index: lib/Target/R600/SIISelLowering.cpp
> ===================================================================
> --- lib/Target/R600/SIISelLowering.cpp
> +++ lib/Target/R600/SIISelLowering.cpp
> @@ -137,7 +137,7 @@
> setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
>
> setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
> - setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
> + setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
>
> setTargetDAGCombine(ISD::SELECT_CC);
>
> @@ -704,9 +704,7 @@
> if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
> return SDValue();
>
> - SDValue TruncPtr = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32,
> - Load->getBasePtr(), DAG.getConstant(0, MVT::i32));
> - SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, TruncPtr,
> + SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
> DAG.getConstant(2, MVT::i32));
>
> SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
> @@ -793,8 +791,7 @@
> if (Store->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
> return SDValue();
>
> - SDValue TruncPtr = DAG.getZExtOrTrunc(Store->getBasePtr(), DL, MVT::i32);
> - SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, TruncPtr,
> + SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Store->getBasePtr(),
> DAG.getConstant(2, MVT::i32));
> SDValue Chain = Store->getChain();
> SmallVector<SDValue, 8> Values;
> Index: lib/Target/R600/SIInstrInfo.td
> ===================================================================
> --- lib/Target/R600/SIInstrInfo.td
> +++ lib/Target/R600/SIInstrInfo.td
> @@ -121,7 +121,7 @@
> return false;
> }]>;
>
> -def FRAMEri64 : Operand<iPTR> {
> +def FRAMEri32 : Operand<iPTR> {
> let MIOperandInfo = (ops SReg_32:$ptr, i32imm:$index);
> }
>
> Index: lib/Target/R600/SIInstructions.td
> ===================================================================
> --- lib/Target/R600/SIInstructions.td
> +++ lib/Target/R600/SIInstructions.td
> @@ -1328,13 +1328,13 @@
>
> let Uses = [EXEC], Defs = [EXEC,VCC,M0] in {
>
> -//defm SI_ : RegisterLoadStore <VReg_32, FRAMEri64, ADDRIndirect>;
> +//defm SI_ : RegisterLoadStore <VReg_32, FRAMEri, ADDRIndirect>;
>
> let UseNamedOperandTable = 1 in {
>
> def SI_RegisterLoad : AMDGPUShaderInst <
> - (outs VReg_32:$dst, SReg_64:$temp),
> - (ins FRAMEri64:$addr, i32imm:$chan),
> + (outs VReg_32:$dst, SReg_32:$temp),
> + (ins FRAMEri32:$addr, i32imm:$chan),
> "", []
> > {
> let isRegisterLoad = 1;
> @@ -1343,7 +1343,7 @@
>
> class SIRegStore<dag outs> : AMDGPUShaderInst <
> outs,
> - (ins VReg_32:$val, FRAMEri64:$addr, i32imm:$chan),
> + (ins VReg_32:$val, FRAMEri32:$addr, i32imm:$chan),
> "", []
> > {
> let isRegisterStore = 1;
> @@ -1353,20 +1353,20 @@
> let usesCustomInserter = 1 in {
> def SI_RegisterStorePseudo : SIRegStore<(outs)>;
> } // End usesCustomInserter = 1
> -def SI_RegisterStore : SIRegStore<(outs SReg_64:$temp)>;
> +def SI_RegisterStore : SIRegStore<(outs SReg_32:$temp)>;
>
>
> } // End UseNamedOperandTable = 1
>
> def SI_INDIRECT_SRC : InstSI <
> - (outs VReg_32:$dst, SReg_64:$temp),
> + (outs VReg_32:$dst, SReg_32:$temp),
> (ins unknown:$src, VSrc_32:$idx, i32imm:$off),
> "SI_INDIRECT_SRC $dst, $temp, $src, $idx, $off",
> []
> >;
>
> class SI_INDIRECT_DST<RegisterClass rc> : InstSI <
> - (outs rc:$dst, SReg_64:$temp),
> + (outs rc:$dst, SReg_32:$temp),
> (ins unknown:$src, VSrc_32:$idx, i32imm:$off, VReg_32:$val),
> "SI_INDIRECT_DST $dst, $temp, $src, $idx, $off, $val",
> []
> Index: test/CodeGen/R600/array-ptr-calc-i32.ll
> ===================================================================
> --- /dev/null
> +++ test/CodeGen/R600/array-ptr-calc-i32.ll
> @@ -0,0 +1,31 @@
> +; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
> +
> +declare i32 @llvm.SI.tid() nounwind readnone
> +declare void @llvm.AMDGPU.barrier.local() nounwind noduplicate
> +
> +; The required pointer calculations for the alloca'd actually requires
> +; an add and won't be folded into the addressing, which fails with a
> +; 64-bit pointer add. This should work since private pointers should
> +; be 32-bits.
> +
> +; SI-LABEL: @test_private_array_ptr_calc:
> +; SI: V_ADD_I32_e32 [[PTRREG:v[0-9]+]]
> +; SI: V_MOVRELD_B32_e32 {{v[0-9]+}}, [[PTRREG]]
> +define void @test_private_array_ptr_calc(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) {
> + %alloca = alloca [4 x i32], i32 4, align 16
> + %tid = call i32 @llvm.SI.tid() readnone
> + %a_ptr = getelementptr i32 addrspace(1)* %inA, i32 %tid
> + %b_ptr = getelementptr i32 addrspace(1)* %inB, i32 %tid
> + %a = load i32 addrspace(1)* %a_ptr
> + %b = load i32 addrspace(1)* %b_ptr
> + %result = add i32 %a, %b
> + %alloca_ptr = getelementptr [4 x i32]* %alloca, i32 1, i32 %b
> + store i32 %result, i32* %alloca_ptr, align 4
> + ; Dummy call
> + call void @llvm.AMDGPU.barrier.local() nounwind noduplicate
> + %reload = load i32* %alloca_ptr, align 4
> + %out_ptr = getelementptr i32 addrspace(1)* %out, i32 %tid
> + store i32 %reload, i32 addrspace(1)* %out_ptr, align 4
> + ret void
> +}
> +
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
More information about the llvm-commits
mailing list