[PATCH] R600/SI: Use v_cvt_f32_ubyte* instructions

Tom Stellard tom at stellard.net
Tue Jun 10 12:52:35 PDT 2014


On Mon, May 26, 2014 at 10:56:42PM +0000, Matt Arsenault wrote:
> This eliminates extra extract instructions when loading an i8 vector to a float vector
> 
> http://reviews.llvm.org/D3915
> 
> Files:
>   lib/Target/R600/AMDGPUISelLowering.cpp
>   lib/Target/R600/AMDGPUISelLowering.h
>   lib/Target/R600/AMDGPUInstrInfo.td
>   lib/Target/R600/SIISelLowering.cpp
>   lib/Target/R600/SIISelLowering.h
>   lib/Target/R600/SIInstructions.td
>   test/CodeGen/R600/bitcast.ll
>   test/CodeGen/R600/cvt_f32_ubyte.ll

> Index: lib/Target/R600/AMDGPUISelLowering.cpp
> ===================================================================
> --- lib/Target/R600/AMDGPUISelLowering.cpp
> +++ lib/Target/R600/AMDGPUISelLowering.cpp
> @@ -85,18 +85,18 @@
>  #include "AMDGPUGenCallingConv.inc"
>  
>  // Find a larger type to do a load / store of a vector with.
> -static MVT getEquivalentMemType(MVT VT) {
> +EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
>    unsigned StoreSize = VT.getStoreSizeInBits();
>    if (StoreSize <= 32)
> -    return MVT::getIntegerVT(StoreSize);
> +    return EVT::getIntegerVT(Ctx, StoreSize);
>  
>    assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
>  
> -  return MVT::getVectorVT(MVT::i32, StoreSize / 32);
> +  return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
>  }
>  
>  // Type for a vector that will be loaded to.
> -static MVT getEquivalentLoadRegType(MVT VT) {
> +EVT AMDGPUTargetLowering::getEquivalentLoadRegType(LLVMContext &Ctx, EVT VT) {
>    unsigned StoreSize = VT.getStoreSizeInBits();
>    if (StoreSize <= 32)
>      return MVT::getIntegerVT(32);
> @@ -1061,7 +1061,7 @@
>      if (MemEltBits != 64)
>        return SDValue();
>  
> -    MVT StoreVT = getEquivalentMemType(VT.getSimpleVT());
> +    EVT StoreVT = getEquivalentMemType(*DAG.getContext(), VT);
>      SDValue Cast = DAG.getNode(ISD::BITCAST, DL, StoreVT, Value);
>  
>      return DAG.getStore(Store->getChain(), DL, Cast,
> @@ -1077,7 +1077,7 @@
>        EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
>  
>        // Doing a byte or short store.
> -      MVT StoreVT = getEquivalentMemType(PackedVT.getSimpleVT());
> +      EVT StoreVT = getEquivalentMemType(*DAG.getContext(), PackedVT);
>        Packed = DAG.getNode(ISD::TRUNCATE, DL, StoreVT, Packed);
>      }
>  
> @@ -1256,8 +1256,8 @@
>      assert(!VT.isFloatingPoint() && "FP ext loads not yet handled");
>  
>      // Figure out what type to load as.
> -    MVT LoadVT = getEquivalentMemType(MemVT);
> -    MVT DestLoadVT = getEquivalentLoadRegType(MemVT);
> +    EVT LoadVT = getEquivalentMemType(*DAG.getContext(), MemVT);
> +    EVT DestLoadVT = getEquivalentLoadRegType(*DAG.getContext(), MemVT);
>  
>      // If we're loading a v2i8 as an i16, we need to do an extload of the i16 to
>      // i32.
> @@ -1323,7 +1323,7 @@
>  
>    if (ExtType == ISD::NON_EXTLOAD && VT.isVector() &&
>        MemVT.getScalarSizeInBits() == 64) {
> -    MVT LoadVT = getEquivalentMemType(MemVT);
> +    EVT LoadVT = getEquivalentMemType(*DAG.getContext(), MemVT);
>      assert(LoadVT != MemVT); // Avoid infinite loop.
>  
>      SDValue NewLoad = DAG.getLoad(Load->getAddressingMode(),
> @@ -1656,7 +1656,7 @@
>    unsigned BitWidth = PackedVT.getScalarSizeInBits();
>    unsigned NElts = Elts.size();
>  
> -  MVT StoreVT = getEquivalentMemType(PackedVT.getSimpleVT());
> +  EVT StoreVT = getEquivalentMemType(*DAG.getContext(), PackedVT);
>  
>    assert(NElts > 0);
>  
> @@ -2019,6 +2019,10 @@
>    NODE_NAME_CASE(SAMPLEB)
>    NODE_NAME_CASE(SAMPLED)
>    NODE_NAME_CASE(SAMPLEL)
> +  NODE_NAME_CASE(CVT_F32_UBYTE0)
> +  NODE_NAME_CASE(CVT_F32_UBYTE1)
> +  NODE_NAME_CASE(CVT_F32_UBYTE2)
> +  NODE_NAME_CASE(CVT_F32_UBYTE3)
>    NODE_NAME_CASE(STORE_MSKOR)
>    NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
>    }
> Index: lib/Target/R600/AMDGPUISelLowering.h
> ===================================================================
> --- lib/Target/R600/AMDGPUISelLowering.h
> +++ lib/Target/R600/AMDGPUISelLowering.h
> @@ -46,6 +46,8 @@
>    SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
>  
>  protected:
> +  static EVT getEquivalentMemType(LLVMContext &Ctx, EVT VT);
> +  static EVT getEquivalentLoadRegType(LLVMContext &Ctx, EVT VT);
>  
>    /// \brief Helper function that adds Reg to the LiveIn list of the DAG's
>    /// MachineFunction.
> @@ -218,6 +220,10 @@
>    SAMPLEB,
>    SAMPLED,
>    SAMPLEL,

Can you add a comment here saying that the order of the CVT_* values
should not be changed since some of the lowering code assumes this
order.  Otherwise, LGTM.

-Tom

> +  CVT_F32_UBYTE0,
> +  CVT_F32_UBYTE1,
> +  CVT_F32_UBYTE2,
> +  CVT_F32_UBYTE3,
>    FIRST_MEM_OPCODE_NUMBER = ISD::FIRST_TARGET_MEMORY_OPCODE,
>    STORE_MSKOR,
>    LOAD_CONSTANT,
> Index: lib/Target/R600/AMDGPUInstrInfo.td
> ===================================================================
> --- lib/Target/R600/AMDGPUInstrInfo.td
> +++ lib/Target/R600/AMDGPUInstrInfo.td
> @@ -59,6 +59,17 @@
>    [SDNPCommutative, SDNPAssociative]
>  >;
>  
> +
> +def AMDGPUcvt_f32_ubyte0 : SDNode<"AMDGPUISD::CVT_F32_UBYTE0",
> +  SDTIntToFPOp, []>;
> +def AMDGPUcvt_f32_ubyte1 : SDNode<"AMDGPUISD::CVT_F32_UBYTE1",
> +  SDTIntToFPOp, []>;
> +def AMDGPUcvt_f32_ubyte2 : SDNode<"AMDGPUISD::CVT_F32_UBYTE2",
> +  SDTIntToFPOp, []>;
> +def AMDGPUcvt_f32_ubyte3 : SDNode<"AMDGPUISD::CVT_F32_UBYTE3",
> +  SDTIntToFPOp, []>;
> +
> +
>  // urecip - This operation is a helper for integer division, it returns the
>  // result of 1 / a as a fractional unsigned integer.
>  // out = (2^32 / a) + e
> Index: lib/Target/R600/SIISelLowering.cpp
> ===================================================================
> --- lib/Target/R600/SIISelLowering.cpp
> +++ lib/Target/R600/SIISelLowering.cpp
> @@ -24,6 +24,7 @@
>  #include "llvm/CodeGen/MachineRegisterInfo.h"
>  #include "llvm/CodeGen/SelectionDAG.h"
>  #include "llvm/IR/Function.h"
> +#include "llvm/ADT/SmallString.h"
>  
>  using namespace llvm;
>  
> @@ -236,6 +237,8 @@
>    setTargetDAGCombine(ISD::SELECT_CC);
>    setTargetDAGCombine(ISD::SETCC);
>  
> +  setTargetDAGCombine(ISD::UINT_TO_FP);
> +
>    setSchedulingPreference(Sched::RegPressure);
>  }
>  
> @@ -1046,6 +1049,96 @@
>  // Custom DAG optimizations
>  //===----------------------------------------------------------------------===//
>  
> +SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
> +                                                     DAGCombinerInfo &DCI) {
> +  EVT VT = N->getValueType(0);
> +  EVT ScalarVT = VT.getScalarType();
> +  if (ScalarVT != MVT::f32)
> +    return SDValue();
> +
> +  SelectionDAG &DAG = DCI.DAG;
> +  SDLoc DL(N);
> +
> +  SDValue Src = N->getOperand(0);
> +  EVT SrcVT = Src.getValueType();
> +
> +  // TODO: We could try to match extracting the higher bytes, which would be
> +  // easier if i8 vectors weren't promoted to i32 vectors, particularly after
> +  // types are legalized. v4i8 -> v4f32 is probably the only case to worry
> +  // about in practice.
> +  if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) {
> +    if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
> +      SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
> +      DCI.AddToWorklist(Cvt.getNode());
> +      return Cvt;
> +    }
> +  }
> +
> +  // We are primarily trying to catch operations on illegal vector types
> +  // before they are expanded.
> +  // For scalars, we can use the more flexible method of checking masked bits
> +  // after legalization.
> +  if (!DCI.isBeforeLegalize() ||
> +      !SrcVT.isVector() ||
> +      SrcVT.getVectorElementType() != MVT::i8) {
> +    return SDValue();
> +  }
> +
> +  assert(DCI.isBeforeLegalize() && "Unexpected legal type");
> +
> +  // Weird sized vectors are a pain to handle, but we know 3 is really the same
> +  // size as 4.
> +  unsigned NElts = SrcVT.getVectorNumElements();
> +  if (!SrcVT.isSimple() && NElts != 3)
> +    return SDValue();
> +
> +  // Handle v4i8 -> v4f32 extload. Replace the v4i8 with a legal i32 load to
> +  // prevent a mess from expanding to v4i32 and repacking.
> +  if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
> +    EVT LoadVT = getEquivalentMemType(*DAG.getContext(), SrcVT);
> +    EVT RegVT = getEquivalentLoadRegType(*DAG.getContext(), SrcVT);
> +    EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f32, NElts);
> +
> +    LoadSDNode *Load = cast<LoadSDNode>(Src);
> +    SDValue NewLoad = DAG.getExtLoad(ISD::ZEXTLOAD, DL, RegVT,
> +                                     Load->getChain(),
> +                                     Load->getBasePtr(),
> +                                     LoadVT,
> +                                     Load->getMemOperand());
> +
> +    // Make sure successors of the original load stay after it by updating
> +    // them to use the new Chain.
> +    DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), NewLoad.getValue(1));
> +
> +    SmallVector<SDValue, 4> Elts;
> +    if (RegVT.isVector())
> +      DAG.ExtractVectorElements(NewLoad, Elts);
> +    else
> +      Elts.push_back(NewLoad);
> +
> +    SmallVector<SDValue, 4> Ops;
> +
> +    unsigned EltIdx = 0;
> +    for (SDValue Elt : Elts) {
> +      unsigned ComponentsInElt = std::min(4u, NElts - 4 * EltIdx);
> +      for (unsigned I = 0; I < ComponentsInElt; ++I) {
> +        unsigned Opc = AMDGPUISD::CVT_F32_UBYTE0 + I;
> +        SDValue Cvt = DAG.getNode(Opc, DL, MVT::f32, Elt);
> +        DCI.AddToWorklist(Cvt.getNode());
> +        Ops.push_back(Cvt);
> +      }
> +
> +      ++EltIdx;
> +    }
> +
> +    assert(Ops.size() == NElts);
> +
> +    return DAG.getNode(ISD::BUILD_VECTOR, DL, FloatVT, Ops);
> +  }
> +
> +  return SDValue();
> +}
> +
>  SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
>                                              DAGCombinerInfo &DCI) const {
>    SelectionDAG &DAG = DCI.DAG;
> @@ -1087,6 +1180,31 @@
>        }
>        break;
>      }
> +
> +  case AMDGPUISD::CVT_F32_UBYTE0:
> +  case AMDGPUISD::CVT_F32_UBYTE1:
> +  case AMDGPUISD::CVT_F32_UBYTE2:
> +  case AMDGPUISD::CVT_F32_UBYTE3: {
> +    unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
> +
> +    SDValue Src = N->getOperand(0);
> +    APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
> +
> +    APInt KnownZero, KnownOne;
> +    TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
> +                                          !DCI.isBeforeLegalizeOps());
> +    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
> +    if (TLO.ShrinkDemandedConstant(Src, Demanded) ||
> +        TLI.SimplifyDemandedBits(Src, Demanded, KnownZero, KnownOne, TLO)) {
> +      DCI.CommitTargetLoweringOpt(TLO);
> +    }
> +
> +    break;
> +  }
> +
> +  case ISD::UINT_TO_FP: {
> +    return performUCharToFloatCombine(N, DCI);
> +  }
>    }
>  
>    return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
> Index: lib/Target/R600/SIISelLowering.h
> ===================================================================
> --- lib/Target/R600/SIISelLowering.h
> +++ lib/Target/R600/SIISelLowering.h
> @@ -46,6 +46,9 @@
>    void adjustWritemask(MachineSDNode *&N, SelectionDAG &DAG) const;
>    MachineSDNode *AdjustRegClass(MachineSDNode *N, SelectionDAG &DAG) const;
>  
> +  static SDValue performUCharToFloatCombine(SDNode *N,
> +                                            DAGCombinerInfo &DCI);
> +
>  public:
>    SITargetLowering(TargetMachine &tm);
>    bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AS,
> Index: lib/Target/R600/SIInstructions.td
> ===================================================================
> --- lib/Target/R600/SIInstructions.td
> +++ lib/Target/R600/SIInstructions.td
> @@ -973,10 +973,18 @@
>  defm V_CVT_F64_F32 : VOP1_64_32 <0x00000010, "V_CVT_F64_F32",
>    [(set f64:$dst, (fextend f32:$src0))]
>  >;
> -//defm V_CVT_F32_UBYTE0 : VOP1_32 <0x00000011, "V_CVT_F32_UBYTE0", []>;
> -//defm V_CVT_F32_UBYTE1 : VOP1_32 <0x00000012, "V_CVT_F32_UBYTE1", []>;
> -//defm V_CVT_F32_UBYTE2 : VOP1_32 <0x00000013, "V_CVT_F32_UBYTE2", []>;
> -//defm V_CVT_F32_UBYTE3 : VOP1_32 <0x00000014, "V_CVT_F32_UBYTE3", []>;
> +defm V_CVT_F32_UBYTE0 : VOP1_32 <0x00000011, "V_CVT_F32_UBYTE0",
> +  [(set f32:$dst, (AMDGPUcvt_f32_ubyte0 i32:$src0))]
> +>;
> +defm V_CVT_F32_UBYTE1 : VOP1_32 <0x00000012, "V_CVT_F32_UBYTE1",
> +  [(set f32:$dst, (AMDGPUcvt_f32_ubyte1 i32:$src0))]
> +>;
> +defm V_CVT_F32_UBYTE2 : VOP1_32 <0x00000013, "V_CVT_F32_UBYTE2",
> +  [(set f32:$dst, (AMDGPUcvt_f32_ubyte2 i32:$src0))]
> +>;
> +defm V_CVT_F32_UBYTE3 : VOP1_32 <0x00000014, "V_CVT_F32_UBYTE3",
> +  [(set f32:$dst, (AMDGPUcvt_f32_ubyte3 i32:$src0))]
> +>;
>  defm V_CVT_U32_F64 : VOP1_32_64 <0x00000015, "V_CVT_U32_F64",
>    [(set i32:$dst, (fp_to_uint f64:$src0))]
>  >;
> Index: test/CodeGen/R600/bitcast.ll
> ===================================================================
> --- test/CodeGen/R600/bitcast.ll
> +++ test/CodeGen/R600/bitcast.ll
> @@ -1,4 +1,4 @@
> -; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
> +; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck %s
>  
>  ; This test just checks that the compiler doesn't crash.
>  ; CHECK-LABEL: @v32i8_to_v8i32
> @@ -28,3 +28,17 @@
>    store <16 x i8> %1, <16 x i8> addrspace(1)* %out
>    ret void
>  }
> +
> +define void @v4i8_to_i32(i32 addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
> +  %load = load <4 x i8> addrspace(1)* %in, align 4
> +  %bc = bitcast <4 x i8> %load to i32
> +  store i32 %bc, i32 addrspace(1)* %out, align 4
> +  ret void
> +}
> +
> +define void @i32_to_v4i8(<4 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
> +  %load = load i32 addrspace(1)* %in, align 4
> +  %bc = bitcast i32 %load to <4 x i8>
> +  store <4 x i8> %bc, <4 x i8> addrspace(1)* %out, align 4
> +  ret void
> +}
> Index: test/CodeGen/R600/cvt_f32_ubyte.ll
> ===================================================================
> --- /dev/null
> +++ test/CodeGen/R600/cvt_f32_ubyte.ll
> @@ -0,0 +1,155 @@
> +; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
> +
> +; SI-LABEL: @load_i8_to_f32:
> +; SI: BUFFER_LOAD_UBYTE [[LOADREG:v[0-9]+]],
> +; SI-NOT: BFE
> +; SI-NOT: LSHR
> +; SI: V_CVT_F32_UBYTE0_e32 [[CONV:v[0-9]+]], [[LOADREG]]
> +; SI: BUFFER_STORE_DWORD [[CONV]],
> +define void @load_i8_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind {
> +  %load = load i8 addrspace(1)* %in, align 1
> +  %cvt = uitofp i8 %load to float
> +  store float %cvt, float addrspace(1)* %out, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @load_v2i8_to_v2f32:
> +; SI: BUFFER_LOAD_USHORT [[LOADREG:v[0-9]+]],
> +; SI-NOT: BFE
> +; SI-NOT: LSHR
> +; SI-NOT: AND
> +; SI-DAG: V_CVT_F32_UBYTE1_e32 v[[HIRESULT:[0-9]+]], [[LOADREG]]
> +; SI-DAG: V_CVT_F32_UBYTE0_e32 v[[LORESULT:[0-9]+]], [[LOADREG]]
> +; SI: BUFFER_STORE_DWORDX2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
> +define void @load_v2i8_to_v2f32(<2 x float> addrspace(1)* noalias %out, <2 x i8> addrspace(1)* noalias %in) nounwind {
> +  %load = load <2 x i8> addrspace(1)* %in, align 1
> +  %cvt = uitofp <2 x i8> %load to <2 x float>
> +  store <2 x float> %cvt, <2 x float> addrspace(1)* %out, align 16
> +  ret void
> +}
> +
> +; SI-LABEL: @load_v3i8_to_v3f32:
> +; SI-NOT: BFE
> +; SI-NOT: V_CVT_F32_UBYTE3_e32
> +; SI-DAG: V_CVT_F32_UBYTE2_e32
> +; SI-DAG: V_CVT_F32_UBYTE1_e32
> +; SI-DAG: V_CVT_F32_UBYTE0_e32
> +; SI: BUFFER_STORE_DWORDX2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
> +define void @load_v3i8_to_v3f32(<3 x float> addrspace(1)* noalias %out, <3 x i8> addrspace(1)* noalias %in) nounwind {
> +  %load = load <3 x i8> addrspace(1)* %in, align 1
> +  %cvt = uitofp <3 x i8> %load to <3 x float>
> +  store <3 x float> %cvt, <3 x float> addrspace(1)* %out, align 16
> +  ret void
> +}
> +
> +; SI-LABEL: @load_v4i8_to_v4f32:
> +; SI: BUFFER_LOAD_DWORD [[LOADREG:v[0-9]+]],
> +; SI-NOT: BFE
> +; SI-NOT: LSHR
> +; SI-DAG: V_CVT_F32_UBYTE3_e32 v[[HIRESULT:[0-9]+]], [[LOADREG]]
> +; SI-DAG: V_CVT_F32_UBYTE2_e32 v{{[0-9]+}}, [[LOADREG]]
> +; SI-DAG: V_CVT_F32_UBYTE1_e32 v{{[0-9]+}}, [[LOADREG]]
> +; SI-DAG: V_CVT_F32_UBYTE0_e32 v[[LORESULT:[0-9]+]], [[LOADREG]]
> +; SI: BUFFER_STORE_DWORDX4 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
> +define void @load_v4i8_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
> +  %load = load <4 x i8> addrspace(1)* %in, align 1
> +  %cvt = uitofp <4 x i8> %load to <4 x float>
> +  store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
> +  ret void
> +}
> +
> +; XXX - This should really still be able to use the V_CVT_F32_UBYTE0
> +; for each component, but computeKnownBits doesn't handle vectors very
> +; well.
> +
> +; SI-LABEL: @load_v4i8_to_v4f32_2_uses:
> +; SI: BUFFER_LOAD_DWORD
> +; SI: V_CVT_F32_U32_e32
> +; SI: V_CVT_F32_U32_e32
> +; SI: V_CVT_F32_U32_e32
> +; SI: V_CVT_F32_U32_e32
> +; SI: S_ENDPGM
> +define void @load_v4i8_to_v4f32_2_uses(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %out2, <4 x i8> addrspace(1)* noalias %in) nounwind {
> +  %load = load <4 x i8> addrspace(1)* %in, align 4
> +  %cvt = uitofp <4 x i8> %load to <4 x float>
> +  store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
> +  %add = add <4 x i8> %load, <i8 9, i8 9, i8 9, i8 9> ; Second use of %load
> +  store <4 x i8> %add, <4 x i8> addrspace(1)* %out2, align 4
> +  ret void
> +}
> +
> +; Make sure this doesn't crash.
> +; SI-LABEL: @load_v7i8_to_v7f32:
> +; SI: S_ENDPGM
> +define void @load_v7i8_to_v7f32(<7 x float> addrspace(1)* noalias %out, <7 x i8> addrspace(1)* noalias %in) nounwind {
> +  %load = load <7 x i8> addrspace(1)* %in, align 1
> +  %cvt = uitofp <7 x i8> %load to <7 x float>
> +  store <7 x float> %cvt, <7 x float> addrspace(1)* %out, align 16
> +  ret void
> +}
> +
> +; SI-LABEL: @load_v8i8_to_v8f32:
> +; SI: BUFFER_LOAD_DWORDX2 v{{\[}}[[LOLOAD:[0-9]+]]:[[HILOAD:[0-9]+]]{{\]}},
> +; SI-NOT: BFE
> +; SI-NOT: LSHR
> +; SI-DAG: V_CVT_F32_UBYTE3_e32 v{{[0-9]+}}, v[[LOLOAD]]
> +; SI-DAG: V_CVT_F32_UBYTE2_e32 v{{[0-9]+}}, v[[LOLOAD]]
> +; SI-DAG: V_CVT_F32_UBYTE1_e32 v{{[0-9]+}}, v[[LOLOAD]]
> +; SI-DAG: V_CVT_F32_UBYTE0_e32 v{{[0-9]+}}, v[[LOLOAD]]
> +; SI-DAG: V_CVT_F32_UBYTE3_e32 v{{[0-9]+}}, v[[HILOAD]]
> +; SI-DAG: V_CVT_F32_UBYTE2_e32 v{{[0-9]+}}, v[[HILOAD]]
> +; SI-DAG: V_CVT_F32_UBYTE1_e32 v{{[0-9]+}}, v[[HILOAD]]
> +; SI-DAG: V_CVT_F32_UBYTE0_e32 v{{[0-9]+}}, v[[HILOAD]]
> +; SI-NOT: BFE
> +; SI-NOT: LSHR
> +; SI: BUFFER_STORE_DWORDX4
> +; SI: BUFFER_STORE_DWORDX4
> +define void @load_v8i8_to_v8f32(<8 x float> addrspace(1)* noalias %out, <8 x i8> addrspace(1)* noalias %in) nounwind {
> +  %load = load <8 x i8> addrspace(1)* %in, align 1
> +  %cvt = uitofp <8 x i8> %load to <8 x float>
> +  store <8 x float> %cvt, <8 x float> addrspace(1)* %out, align 16
> +  ret void
> +}
> +
> +; SI-LABEL: @i8_zext_inreg_i32_to_f32:
> +; SI: BUFFER_LOAD_DWORD [[LOADREG:v[0-9]+]],
> +; SI: V_ADD_I32_e32 [[ADD:v[0-9]+]], 2, [[LOADREG]]
> +; SI-NEXT: V_CVT_F32_UBYTE0_e32 [[CONV:v[0-9]+]], [[ADD]]
> +; SI: BUFFER_STORE_DWORD [[CONV]],
> +define void @i8_zext_inreg_i32_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
> +  %load = load i32 addrspace(1)* %in, align 4
> +  %add = add i32 %load, 2
> +  %inreg = and i32 %add, 255
> +  %cvt = uitofp i32 %inreg to float
> +  store float %cvt, float addrspace(1)* %out, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @i8_zext_inreg_hi1_to_f32:
> +define void @i8_zext_inreg_hi1_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
> +  %load = load i32 addrspace(1)* %in, align 4
> +  %inreg = and i32 %load, 65280
> +  %shr = lshr i32 %inreg, 8
> +  %cvt = uitofp i32 %shr to float
> +  store float %cvt, float addrspace(1)* %out, align 4
> +  ret void
> +}
> +
> +
> +; We don't get these ones because of the zext, but instcombine removes
> +; them so it shouldn't really matter.
> +define void @i8_zext_i32_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind {
> +  %load = load i8 addrspace(1)* %in, align 1
> +  %ext = zext i8 %load to i32
> +  %cvt = uitofp i32 %ext to float
> +  store float %cvt, float addrspace(1)* %out, align 4
> +  ret void
> +}
> +
> +define void @v4i8_zext_v4i32_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
> +  %load = load <4 x i8> addrspace(1)* %in, align 1
> +  %ext = zext <4 x i8> %load to <4 x i32>
> +  %cvt = uitofp <4 x i32> %ext to <4 x float>
> +  store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
> +  ret void
> +}

> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits




More information about the llvm-commits mailing list