[PATCH] Make SimplifyDemandedBits understand BUILD_PAIR

Tom Stellard tom at stellard.net
Mon May 12 06:25:28 PDT 2014


On Sun, May 11, 2014 at 11:33:55PM +0000, Matt Arsenault wrote:
> Replace testcase with one that still works. Select used to use build_pair, but now it doesn't, so replace it with one that still does
>

LGTM.
 
> http://reviews.llvm.org/D3204
> 
> Files:
>   lib/CodeGen/SelectionDAG/TargetLowering.cpp
>   test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
> 
> Index: lib/CodeGen/SelectionDAG/TargetLowering.cpp
> ===================================================================
> --- lib/CodeGen/SelectionDAG/TargetLowering.cpp
> +++ lib/CodeGen/SelectionDAG/TargetLowering.cpp
> @@ -854,6 +854,31 @@
>      }
>      break;
>    }
> +  case ISD::BUILD_PAIR: {
> +    EVT HalfVT = Op.getOperand(0).getValueType();
> +    unsigned HalfBitWidth = HalfVT.getScalarSizeInBits();
> +
> +    APInt MaskLo = NewMask.getLoBits(HalfBitWidth).trunc(HalfBitWidth);
> +    APInt MaskHi = NewMask.getHiBits(HalfBitWidth).trunc(HalfBitWidth);
> +
> +    APInt KnownZeroLo, KnownOneLo;
> +    APInt KnownZeroHi, KnownOneHi;
> +
> +    if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownZeroLo,
> +                             KnownOneLo, TLO, Depth + 1))
> +      return true;
> +
> +    if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownZeroHi,
> +                             KnownOneHi, TLO, Depth + 1))
> +      return true;
> +
> +    KnownZero = KnownZeroLo.zext(BitWidth) |
> +                KnownZeroHi.zext(BitWidth).shl(HalfBitWidth);
> +
> +    KnownOne = KnownOneLo.zext(BitWidth) |
> +               KnownOneHi.zext(BitWidth).shl(HalfBitWidth);
> +    break;
> +  }
>    case ISD::ZERO_EXTEND: {
>      unsigned OperandBitWidth =
>        Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
> Index: test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
> ===================================================================
> --- /dev/null
> +++ test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
> @@ -0,0 +1,37 @@
> +; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
> +
> +; 64-bit select was originally lowered with a build_pair, and this
> +; could be simplified to 1 cndmask instead of 2, but that broken when
> +; it started being implemented with a v2i32 build_vector and
> +; bitcasting.
> +-define void @trunc_select_i64(i32 addrspace(1)* %out, i64 %a, i64 %b, i32 %c) {
> +-  %cmp = icmp eq i32 %c, 0
> +-  %select = select i1 %cmp, i64 %a, i64 %b
> +-  %trunc = trunc i64 %select to i32
> +-  store i32 %trunc, i32 addrspace(1)* %out, align 4
> +-  ret void
> +-}
> +
> +
> +; SI-LABEL: @trunc_alloca_i64
> +; SI: V_CNDMASK_B32
> +; SI-NOT: V_CNDMASK_B32
> +; SI: S_ENDPGM
> +define void @trunc_load_alloca_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) {
> +  %idx = add i32 %a, %b
> +  %alloca = alloca i64, i32 4
> +  %gep0 = getelementptr i64* %alloca, i64 0
> +  %gep1 = getelementptr i64* %alloca, i64 1
> +  %gep2 = getelementptr i64* %alloca, i64 2
> +  %gep3 = getelementptr i64* %alloca, i64 3
> +  store i64 24, i64* %gep0, align 8
> +  store i64 9334, i64* %gep1, align 8
> +  store i64 3935, i64* %gep2, align 8
> +  store i64 9342, i64* %gep3, align 8
> +  %gep = getelementptr i64* %alloca, i32 %idx
> +  %load = load i64* %gep, align 8
> +  %mask = and i64 %load, 4294967296
> +  %add = add i64 %mask, -1
> +  store i64 %add, i64 addrspace(1)* %out, align 4
> +  ret void
> +}

> Index: lib/CodeGen/SelectionDAG/TargetLowering.cpp
> ===================================================================
> --- lib/CodeGen/SelectionDAG/TargetLowering.cpp
> +++ lib/CodeGen/SelectionDAG/TargetLowering.cpp
> @@ -854,6 +854,31 @@
>      }
>      break;
>    }
> +  case ISD::BUILD_PAIR: {
> +    EVT HalfVT = Op.getOperand(0).getValueType();
> +    unsigned HalfBitWidth = HalfVT.getScalarSizeInBits();
> +
> +    APInt MaskLo = NewMask.getLoBits(HalfBitWidth).trunc(HalfBitWidth);
> +    APInt MaskHi = NewMask.getHiBits(HalfBitWidth).trunc(HalfBitWidth);
> +
> +    APInt KnownZeroLo, KnownOneLo;
> +    APInt KnownZeroHi, KnownOneHi;
> +
> +    if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownZeroLo,
> +                             KnownOneLo, TLO, Depth + 1))
> +      return true;
> +
> +    if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownZeroHi,
> +                             KnownOneHi, TLO, Depth + 1))
> +      return true;
> +
> +    KnownZero = KnownZeroLo.zext(BitWidth) |
> +                KnownZeroHi.zext(BitWidth).shl(HalfBitWidth);
> +
> +    KnownOne = KnownOneLo.zext(BitWidth) |
> +               KnownOneHi.zext(BitWidth).shl(HalfBitWidth);
> +    break;
> +  }
>    case ISD::ZERO_EXTEND: {
>      unsigned OperandBitWidth =
>        Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
> Index: test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
> ===================================================================
> --- /dev/null
> +++ test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
> @@ -0,0 +1,37 @@
> +; RUN: llc -verify-machineinstrs -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
> +
> +; 64-bit select was originally lowered with a build_pair, and this
> +; could be simplified to 1 cndmask instead of 2, but that broken when
> +; it started being implemented with a v2i32 build_vector and
> +; bitcasting.
> +-define void @trunc_select_i64(i32 addrspace(1)* %out, i64 %a, i64 %b, i32 %c) {
> +-  %cmp = icmp eq i32 %c, 0
> +-  %select = select i1 %cmp, i64 %a, i64 %b
> +-  %trunc = trunc i64 %select to i32
> +-  store i32 %trunc, i32 addrspace(1)* %out, align 4
> +-  ret void
> +-}
> +
> +
> +; SI-LABEL: @trunc_alloca_i64
> +; SI: V_CNDMASK_B32
> +; SI-NOT: V_CNDMASK_B32
> +; SI: S_ENDPGM
> +define void @trunc_load_alloca_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) {
> +  %idx = add i32 %a, %b
> +  %alloca = alloca i64, i32 4
> +  %gep0 = getelementptr i64* %alloca, i64 0
> +  %gep1 = getelementptr i64* %alloca, i64 1
> +  %gep2 = getelementptr i64* %alloca, i64 2
> +  %gep3 = getelementptr i64* %alloca, i64 3
> +  store i64 24, i64* %gep0, align 8
> +  store i64 9334, i64* %gep1, align 8
> +  store i64 3935, i64* %gep2, align 8
> +  store i64 9342, i64* %gep3, align 8
> +  %gep = getelementptr i64* %alloca, i32 %idx
> +  %load = load i64* %gep, align 8
> +  %mask = and i64 %load, 4294967296
> +  %add = add i64 %mask, -1
> +  store i64 %add, i64 addrspace(1)* %out, align 4
> +  ret void
> +}

> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits




More information about the llvm-commits mailing list