[llvm] r279902 - AMDGPU: Select mulhi 24-bit instructions

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 26 18:32:28 PDT 2016


Author: arsenm
Date: Fri Aug 26 20:32:27 2016
New Revision: 279902

URL: http://llvm.org/viewvc/llvm-project?rev=279902&view=rev
Log:
AMDGPU: Select mulhi 24-bit instructions

Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h
    llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.td
    llvm/trunk/lib/Target/AMDGPU/CaymanInstructions.td
    llvm/trunk/lib/Target/AMDGPU/EvergreenInstructions.td
    llvm/trunk/lib/Target/AMDGPU/R600Instructions.td
    llvm/trunk/lib/Target/AMDGPU/SIInstructions.td
    llvm/trunk/test/CodeGen/AMDGPU/mul_int24.ll
    llvm/trunk/test/CodeGen/AMDGPU/mul_uint24.ll

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp?rev=279902&r1=279901&r2=279902&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp Fri Aug 26 20:32:27 2016
@@ -467,6 +467,8 @@ AMDGPUTargetLowering::AMDGPUTargetLoweri
   setTargetDAGCombine(ISD::SRA);
   setTargetDAGCombine(ISD::SRL);
   setTargetDAGCombine(ISD::MUL);
+  setTargetDAGCombine(ISD::MULHU);
+  setTargetDAGCombine(ISD::MULHS);
   setTargetDAGCombine(ISD::SELECT);
   setTargetDAGCombine(ISD::SELECT_CC);
   setTargetDAGCombine(ISD::STORE);
@@ -1985,7 +1987,7 @@ static bool isI24(SDValue Op, SelectionD
          (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24;
 }
 
-static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
+static bool simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
 
   SelectionDAG &DAG = DCI.DAG;
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -1994,8 +1996,12 @@ static void simplifyI24(SDValue Op, Targ
   APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
   APInt KnownZero, KnownOne;
   TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
-  if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
+  if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) {
     DCI.CommitTargetLoweringOpt(TLO);
+    return true;
+  }
+
+  return false;
 }
 
 template <typename IntTy>
@@ -2285,11 +2291,36 @@ SDValue AMDGPUTargetLowering::performSrl
   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair);
 }
 
+// We need to specifically handle i64 mul here to avoid unnecessary conversion
+// instructions. If we only match on the legalized i64 mul expansion,
+// SimplifyDemandedBits will be unable to remove them because there will be
+// multiple uses due to the separate mul + mulh[su].
+static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL,
+                        SDValue N0, SDValue N1, unsigned Size, bool Signed) {
+  if (Size <= 32) {
+    unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
+    return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1);
+  }
+
+  // Because we want to eliminate extension instructions before the
+  // operation, we need to create a single user here (i.e. not the separate
+  // mul_lo + mul_hi) so that SimplifyDemandedBits will deal with it.
+
+  unsigned MulOpc = Signed ? AMDGPUISD::MUL_LOHI_I24 : AMDGPUISD::MUL_LOHI_U24;
+
+  SDValue Mul = DAG.getNode(MulOpc, SL,
+                            DAG.getVTList(MVT::i32, MVT::i32), N0, N1);
+
+  return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64,
+                     Mul.getValue(0), Mul.getValue(1));
+}
+
 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
                                                 DAGCombinerInfo &DCI) const {
   EVT VT = N->getValueType(0);
 
-  if (VT.isVector() || VT.getSizeInBits() > 32)
+  unsigned Size = VT.getSizeInBits();
+  if (VT.isVector() || Size > 64)
     return SDValue();
 
   SelectionDAG &DAG = DCI.DAG;
@@ -2302,11 +2333,11 @@ SDValue AMDGPUTargetLowering::performMul
   if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
     N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
     N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
-    Mul = DAG.getNode(AMDGPUISD::MUL_U24, DL, MVT::i32, N0, N1);
+    Mul = getMul24(DAG, DL, N0, N1, Size, false);
   } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
     N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
     N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
-    Mul = DAG.getNode(AMDGPUISD::MUL_I24, DL, MVT::i32, N0, N1);
+    Mul = getMul24(DAG, DL, N0, N1, Size, true);
   } else {
     return SDValue();
   }
@@ -2316,6 +2347,77 @@ SDValue AMDGPUTargetLowering::performMul
   return DAG.getSExtOrTrunc(Mul, DL, VT);
 }
 
+SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N,
+                                                  DAGCombinerInfo &DCI) const {
+  EVT VT = N->getValueType(0);
+
+  if (!Subtarget->hasMulI24() || VT.isVector())
+    return SDValue();
+
+  SelectionDAG &DAG = DCI.DAG;
+  SDLoc DL(N);
+
+  SDValue N0 = N->getOperand(0);
+  SDValue N1 = N->getOperand(1);
+
+  if (!isI24(N0, DAG) || !isI24(N1, DAG))
+    return SDValue();
+
+  N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
+  N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
+
+  SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1);
+  DCI.AddToWorklist(Mulhi.getNode());
+  return DAG.getSExtOrTrunc(Mulhi, DL, VT);
+}
+
+SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N,
+                                                  DAGCombinerInfo &DCI) const {
+  EVT VT = N->getValueType(0);
+
+  if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32)
+    return SDValue();
+
+  SelectionDAG &DAG = DCI.DAG;
+  SDLoc DL(N);
+
+  SDValue N0 = N->getOperand(0);
+  SDValue N1 = N->getOperand(1);
+
+  if (!isU24(N0, DAG) || !isU24(N1, DAG))
+    return SDValue();
+
+  N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
+  N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
+
+  SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1);
+  DCI.AddToWorklist(Mulhi.getNode());
+  return DAG.getZExtOrTrunc(Mulhi, DL, VT);
+}
+
+SDValue AMDGPUTargetLowering::performMulLoHi24Combine(
+  SDNode *N, DAGCombinerInfo &DCI) const {
+  SelectionDAG &DAG = DCI.DAG;
+
+  SDValue N0 = N->getOperand(0);
+  SDValue N1 = N->getOperand(1);
+
+  // Simplify demanded bits before splitting into multiple users.
+  if (simplifyI24(N0, DCI) || simplifyI24(N1, DCI))
+    return SDValue();
+
+  bool Signed = (N->getOpcode() == AMDGPUISD::MUL_LOHI_I24);
+
+  unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
+  unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24;
+
+  SDLoc SL(N);
+
+  SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1);
+  SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1);
+  return DAG.getMergeValues({ MulLo, MulHi }, SL);
+}
+
 static bool isNegativeOne(SDValue Val) {
   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val))
     return C->isAllOnesValue();
@@ -2476,14 +2578,23 @@ SDValue AMDGPUTargetLowering::PerformDAG
   }
   case ISD::MUL:
     return performMulCombine(N, DCI);
+  case ISD::MULHS:
+    return performMulhsCombine(N, DCI);
+  case ISD::MULHU:
+    return performMulhuCombine(N, DCI);
   case AMDGPUISD::MUL_I24:
-  case AMDGPUISD::MUL_U24: {
+  case AMDGPUISD::MUL_U24:
+  case AMDGPUISD::MULHI_I24:
+  case AMDGPUISD::MULHI_U24: {
     SDValue N0 = N->getOperand(0);
     SDValue N1 = N->getOperand(1);
     simplifyI24(N0, DCI);
     simplifyI24(N1, DCI);
     return SDValue();
   }
+  case AMDGPUISD::MUL_LOHI_I24:
+  case AMDGPUISD::MUL_LOHI_U24:
+    return performMulLoHi24Combine(N, DCI);
   case ISD::SELECT:
     return performSelectCombine(N, DCI);
   case AMDGPUISD::BFE_I32:
@@ -2695,6 +2806,10 @@ const char* AMDGPUTargetLowering::getTar
   NODE_NAME_CASE(FFBH_I32)
   NODE_NAME_CASE(MUL_U24)
   NODE_NAME_CASE(MUL_I24)
+  NODE_NAME_CASE(MULHI_U24)
+  NODE_NAME_CASE(MULHI_I24)
+  NODE_NAME_CASE(MUL_LOHI_U24)
+  NODE_NAME_CASE(MUL_LOHI_I24)
   NODE_NAME_CASE(MAD_U24)
   NODE_NAME_CASE(MAD_I24)
   NODE_NAME_CASE(TEXTURE_FETCH)

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h?rev=279902&r1=279901&r2=279902&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h Fri Aug 26 20:32:27 2016
@@ -70,6 +70,9 @@ protected:
   SDValue performSraCombine(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue performSrlCombine(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+  SDValue performMulhsCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+  SDValue performMulhuCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+  SDValue performMulLoHi24Combine(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue performCtlzCombine(const SDLoc &SL, SDValue Cond, SDValue LHS,
                              SDValue RHS, DAGCombinerInfo &DCI) const;
   SDValue performSelectCombine(SDNode *N, DAGCombinerInfo &DCI) const;
@@ -226,9 +229,9 @@ enum NodeType : unsigned {
   DWORDADDR,
   FRACT,
   CLAMP,
-  // This is SETCC with the full mask result which is used for a compare with a 
+  // This is SETCC with the full mask result which is used for a compare with a
   // result bit per item in the wavefront.
-  SETCC,    
+  SETCC,
 
   // SIN_HW, COS_HW - f32 for SI, 1 ULP max error, valid from -100 pi to 100 pi.
   // Denormals handled on some parts.
@@ -272,8 +275,12 @@ enum NodeType : unsigned {
   FFBH_I32,
   MUL_U24,
   MUL_I24,
+  MULHI_U24,
+  MULHI_I24,
   MAD_U24,
   MAD_I24,
+  MUL_LOHI_I24,
+  MUL_LOHI_U24,
   TEXTURE_FETCH,
   EXPORT,
   CONST_ADDRESS,

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.td?rev=279902&r1=279901&r2=279902&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.td Fri Aug 26 20:32:27 2016
@@ -216,13 +216,20 @@ def AMDGPUbfm : SDNode<"AMDGPUISD::BFM",
 def AMDGPUffbh_u32 : SDNode<"AMDGPUISD::FFBH_U32", SDTIntUnaryOp>;
 def AMDGPUffbh_i32 : SDNode<"AMDGPUISD::FFBH_I32", SDTIntUnaryOp>;
 
-// Signed and unsigned 24-bit mulitply.  The highest 8-bits are ignore when
-// performing the mulitply.  The result is a 32-bit value.
+// Signed and unsigned 24-bit multiply. The highest 8-bits are ignore
+// when performing the mulitply. The result is a 32-bit value.
 def AMDGPUmul_u24 : SDNode<"AMDGPUISD::MUL_U24", SDTIntBinOp,
-  [SDNPCommutative]
+  [SDNPCommutative, SDNPAssociative]
 >;
 def AMDGPUmul_i24 : SDNode<"AMDGPUISD::MUL_I24", SDTIntBinOp,
-  [SDNPCommutative]
+  [SDNPCommutative, SDNPAssociative]
+>;
+
+def AMDGPUmulhi_u24 : SDNode<"AMDGPUISD::MULHI_U24", SDTIntBinOp,
+  [SDNPCommutative, SDNPAssociative]
+>;
+def AMDGPUmulhi_i24 : SDNode<"AMDGPUISD::MULHI_I24", SDTIntBinOp,
+  [SDNPCommutative, SDNPAssociative]
 >;
 
 def AMDGPUmad_u24 : SDNode<"AMDGPUISD::MAD_U24", AMDGPUDTIntTernaryOp,

Modified: llvm/trunk/lib/Target/AMDGPU/CaymanInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/CaymanInstructions.td?rev=279902&r1=279901&r2=279902&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/CaymanInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/CaymanInstructions.td Fri Aug 26 20:32:27 2016
@@ -37,6 +37,9 @@ def MULLO_INT_cm : MULLO_INT_Common<0x8F
 def MULHI_INT_cm : MULHI_INT_Common<0x90>;
 def MULLO_UINT_cm : MULLO_UINT_Common<0x91>;
 def MULHI_UINT_cm : MULHI_UINT_Common<0x92>;
+def MULHI_INT_cm24 : MULHI_INT24_Common<0x5c>;
+def MULHI_UINT_cm24 : MULHI_UINT24_Common<0xb2>;
+
 def RECIPSQRT_CLAMPED_cm : RECIPSQRT_CLAMPED_Common<0x87>;
 def EXP_IEEE_cm : EXP_IEEE_Common<0x81>;
 def LOG_IEEE_cm : LOG_IEEE_Common<0x83>;

Modified: llvm/trunk/lib/Target/AMDGPU/EvergreenInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/EvergreenInstructions.td?rev=279902&r1=279901&r2=279902&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/EvergreenInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/EvergreenInstructions.td Fri Aug 26 20:32:27 2016
@@ -72,6 +72,8 @@ def MULLO_INT_eg : MULLO_INT_Common<0x8F
 def MULHI_INT_eg : MULHI_INT_Common<0x90>;
 def MULLO_UINT_eg : MULLO_UINT_Common<0x91>;
 def MULHI_UINT_eg : MULHI_UINT_Common<0x92>;
+def MULHI_UINT24_eg : MULHI_UINT24_Common<0xb2>;
+
 def RECIP_UINT_eg : RECIP_UINT_Common<0x94>;
 def RECIPSQRT_CLAMPED_eg : RECIPSQRT_CLAMPED_Common<0x87>;
 def EXP_IEEE_eg : EXP_IEEE_Common<0x81>;

Modified: llvm/trunk/lib/Target/AMDGPU/R600Instructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600Instructions.td?rev=279902&r1=279901&r2=279902&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/R600Instructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/R600Instructions.td Fri Aug 26 20:32:27 2016
@@ -1073,18 +1073,27 @@ class LSHL_Common <bits<11> inst> : R600
 class LSHR_Common <bits<11> inst> : R600_2OP_Helper <inst, "LSHR", srl>;
 class ASHR_Common <bits<11> inst> : R600_2OP_Helper <inst, "ASHR", sra>;
 class MULHI_INT_Common <bits<11> inst> : R600_2OP_Helper <
-  inst, "MULHI_INT", mulhs
-> {
+  inst, "MULHI_INT", mulhs> {
   let Itinerary = TransALU;
 }
+
+class MULHI_INT24_Common <bits<11> inst> : R600_2OP_Helper <
+  inst, "MULHI_INT24", AMDGPUmulhi_i24> {
+  let Itinerary = VecALU;
+}
+
 class MULHI_UINT_Common <bits<11> inst> : R600_2OP_Helper <
-  inst, "MULHI", mulhu
-> {
+  inst, "MULHI", mulhu> {
   let Itinerary = TransALU;
 }
+
+class MULHI_UINT24_Common <bits<11> inst> : R600_2OP_Helper <
+  inst, "MULHI_UINT24", AMDGPUmulhi_u24> {
+  let Itinerary = VecALU;
+}
+
 class MULLO_INT_Common <bits<11> inst> : R600_2OP_Helper <
-  inst, "MULLO_INT", mul
-> {
+  inst, "MULLO_INT", mul> {
   let Itinerary = TransALU;
 }
 class MULLO_UINT_Common <bits<11> inst> : R600_2OP <inst, "MULLO_UINT", []> {

Modified: llvm/trunk/lib/Target/AMDGPU/SIInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstructions.td?rev=279902&r1=279901&r2=279902&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInstructions.td Fri Aug 26 20:32:27 2016
@@ -1362,7 +1362,7 @@ defm V_MUL_I32_I24 : VOP2Inst <vop2<0x9,
 >;
 
 defm V_MUL_HI_I32_I24 : VOP2Inst <vop2<0xa,0x7>, "v_mul_hi_i32_i24",
-  VOP_I32_I32_I32
+  VOP_I32_I32_I32, AMDGPUmulhi_i24
 >;
 
 defm V_MUL_U32_U24 : VOP2Inst <vop2<0xb, 0x8>, "v_mul_u32_u24",
@@ -1370,7 +1370,7 @@ defm V_MUL_U32_U24 : VOP2Inst <vop2<0xb,
 >;
 
 defm V_MUL_HI_U32_U24 : VOP2Inst <vop2<0xc,0x9>, "v_mul_hi_u32_u24",
- VOP_I32_I32_I32
+ VOP_I32_I32_I32, AMDGPUmulhi_u24
 >;
 
 defm V_MIN_F32 : VOP2Inst <vop2<0xf, 0xa>, "v_min_f32", VOP_F32_F32_F32,

Modified: llvm/trunk/test/CodeGen/AMDGPU/mul_int24.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/mul_int24.ll?rev=279902&r1=279901&r2=279902&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/mul_int24.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/mul_int24.ll Fri Aug 26 20:32:27 2016
@@ -1,23 +1,151 @@
-; RUN: llc < %s -march=amdgcn -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
-; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM --check-prefix=FUNC
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=CM -check-prefix=FUNC %s
+
+; FUNC-LABEL: {{^}}test_smul24_i32:
+; GCN-NOT: bfe
+; GCN: v_mul_i32_i24
 
-; FUNC-LABEL: {{^}}i32_mul24:
 ; Signed 24-bit multiply is not supported on pre-Cayman GPUs.
 ; EG: MULLO_INT
+
 ; Make sure we are not masking the inputs
 ; CM-NOT: AND
 ; CM: MUL_INT24
-; SI-NOT: and
-; SI: v_mul_i32_i24
-define void @i32_mul24(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+define void @test_smul24_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
-  %0 = shl i32 %a, 8
-  %a_24 = ashr i32 %0, 8
-  %1 = shl i32 %b, 8
-  %b_24 = ashr i32 %1, 8
-  %2 = mul i32 %a_24, %b_24
-  store i32 %2, i32 addrspace(1)* %out
+  %a.shl = shl i32 %a, 8
+  %a.24 = ashr i32 %a.shl, 8
+  %b.shl = shl i32 %b, 8
+  %b.24 = ashr i32 %b.shl, 8
+  %mul24 = mul i32 %a.24, %b.24
+  store i32 %mul24, i32 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}test_smulhi24_i64:
+; GCN-NOT: bfe
+; GCN-NOT: ashr
+; GCN: v_mul_hi_i32_i24_e32 [[RESULT:v[0-9]+]],
+; GCN-NEXT: buffer_store_dword [[RESULT]]
+
+; EG: ASHR
+; EG: ASHR
+; EG: MULHI_INT
+
+; CM-NOT: ASHR
+; CM: MULHI_INT24
+; CM: MULHI_INT24
+; CM: MULHI_INT24
+; CM: MULHI_INT24
+define void @test_smulhi24_i64(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+entry:
+  %a.shl = shl i32 %a, 8
+  %a.24 = ashr i32 %a.shl, 8
+  %b.shl = shl i32 %b, 8
+  %b.24 = ashr i32 %b.shl, 8
+  %a.24.i64 = sext i32 %a.24 to i64
+  %b.24.i64 = sext i32 %b.24 to i64
+  %mul48 = mul i64 %a.24.i64, %b.24.i64
+  %mul48.hi = lshr i64 %mul48, 32
+  %mul24hi = trunc i64 %mul48.hi to i32
+  store i32 %mul24hi, i32 addrspace(1)* %out
+  ret void
+}
+
+; This requires handling of the original 64-bit mul node to eliminate
+; unnecessary extension instructions because after legalization they
+; will not be removed by SimplifyDemandedBits because there are
+; multiple uses by the separate mul and mulhi.
+
+; FUNC-LABEL: {{^}}test_smul24_i64:
+; GCN: s_load_dword s
+; GCN: s_load_dword s
+
+; GCN-NOT: bfe
+; GCN-NOT: ashr
+
+; GCN-DAG: v_mul_hi_i32_i24_e32
+; GCN-DAG: v_mul_i32_i24_e32
+
+; GCN: buffer_store_dwordx2
+define void @test_smul24_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+  %shl.i = shl i32 %a, 8
+  %shr.i = ashr i32 %shl.i, 8
+  %conv.i = sext i32 %shr.i to i64
+  %shl1.i = shl i32 %b, 8
+  %shr2.i = ashr i32 %shl1.i, 8
+  %conv3.i = sext i32 %shr2.i to i64
+  %mul.i = mul i64 %conv3.i, %conv.i
+  store i64 %mul.i, i64 addrspace(1)* %out
+  ret void
+}
+
+; FIXME: Should be able to eliminate bfe
+; FUNC-LABEL: {{^}}test_smul24_i64_square:
+; GCN: s_load_dword [[A:s[0-9]+]]
+; GCN: s_bfe_i32 [[SEXT:s[0-9]+]], [[A]], 0x180000{{$}}
+; GCN-DAG: v_mul_hi_i32_i24_e64 v{{[0-9]+}}, [[SEXT]], [[SEXT]]
+; GCN-DAG: v_mul_i32_i24_e64 v{{[0-9]+}}, [[SEXT]], [[SEXT]]
+; GCN: buffer_store_dwordx2
+define void @test_smul24_i64_square(i64 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+  %shl.i = shl i32 %a, 8
+  %shr.i = ashr i32 %shl.i, 8
+  %conv.i = sext i32 %shr.i to i64
+  %mul.i = mul i64 %conv.i, %conv.i
+  store i64 %mul.i, i64 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}test_smul24_i33:
+; GCN: s_load_dword s
+; GCN: s_load_dword s
+
+; GCN-NOT: and
+; GCN-NOT: lshr
+
+; GCN-DAG: v_mul_i32_i24_e32
+; GCN-DAG: v_mul_hi_i32_i24_e32
+; SI: v_lshl_b64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, 31
+; SI: v_ashr_i64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, 31
+
+; VI: v_lshlrev_b64 v{{\[[0-9]+:[0-9]+\]}}, 31, v{{\[[0-9]+:[0-9]+\]}}
+; VI: v_ashrrev_i64 v{{\[[0-9]+:[0-9]+\]}}, 31, v{{\[[0-9]+:[0-9]+\]}}
+
+; GCN: buffer_store_dwordx2
+define void @test_smul24_i33(i64 addrspace(1)* %out, i33 %a, i33 %b) #0 {
+entry:
+  %a.shl = shl i33 %a, 9
+  %a.24 = ashr i33 %a.shl, 9
+  %b.shl = shl i33 %b, 9
+  %b.24 = ashr i33 %b.shl, 9
+  %mul24 = mul i33 %a.24, %b.24
+  %ext = sext i33 %mul24 to i64
+  store i64 %ext, i64 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}test_smulhi24_i33:
+; SI: s_load_dword s
+; SI: s_load_dword s
+
+; SI-NOT: bfe
+
+; SI: v_mul_hi_i32_i24_e32 v[[MUL_HI:[0-9]+]],
+; SI-NEXT: v_and_b32_e32 v[[HI:[0-9]+]], 1, v[[MUL_HI]]
+; SI-NEXT: buffer_store_dword v[[HI]]
+define void @test_smulhi24_i33(i32 addrspace(1)* %out, i33 %a, i33 %b) {
+entry:
+  %tmp0 = shl i33 %a, 9
+  %a_24 = ashr i33 %tmp0, 9
+  %tmp1 = shl i33 %b, 9
+  %b_24 = ashr i33 %tmp1, 9
+  %tmp2 = mul i33 %a_24, %b_24
+  %hi = lshr i33 %tmp2, 32
+  %trunc = trunc i33 %hi to i32
+
+  store i32 %trunc, i32 addrspace(1)* %out
   ret void
 }
+attributes #0 = { nounwind }

Modified: llvm/trunk/test/CodeGen/AMDGPU/mul_uint24.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/mul_uint24.ll?rev=279902&r1=279901&r2=279902&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/mul_uint24.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/mul_uint24.ll Fri Aug 26 20:32:27 2016
@@ -1,13 +1,12 @@
-; RUN: llc < %s -march=amdgcn -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
-; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
 
-; FUNC-LABEL: {{^}}u32_mul24:
+; FUNC-LABEL: {{^}}test_umul24_i32:
 ; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[2].W
 ; SI: v_mul_u32_u24
-
-define void @u32_mul24(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+define void @test_umul24_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
 entry:
   %0 = shl i32 %a, 8
   %a_24 = lshr i32 %0, 8
@@ -18,46 +17,98 @@ entry:
   ret void
 }
 
-; FUNC-LABEL: {{^}}i16_mul24:
+; FUNC-LABEL: {{^}}test_umul24_i16_sext:
 ; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]]
 ; The result must be sign-extended
 ; EG: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
 ; EG: 16
+
 ; SI: v_mul_u32_u24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
 ; SI: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 16
-define void @i16_mul24(i32 addrspace(1)* %out, i16 %a, i16 %b) {
+define void @test_umul24_i16_sext(i32 addrspace(1)* %out, i16 %a, i16 %b) {
 entry:
-  %0 = mul i16 %a, %b
-  %1 = sext i16 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %mul = mul i16 %a, %b
+  %ext = sext i16 %mul to i32
+  store i32 %ext, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}i8_mul24:
+; FUNC-LABEL: {{^}}test_umul24_i16:
+; SI: s_and_b32
+; SI: v_mul_u32_u24_e32
+; SI: v_and_b32_e32
+define void @test_umul24_i16(i32 addrspace(1)* %out, i16 %a, i16 %b) {
+entry:
+  %mul = mul i16 %a, %b
+  %ext = zext i16 %mul to i32
+  store i32 %ext, i32 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}test_umul24_i8:
 ; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]]
 ; The result must be sign-extended
 ; EG: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
 ; SI: v_mul_u32_u24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
 ; SI: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 8
 
-define void @i8_mul24(i32 addrspace(1)* %out, i8 %a, i8 %b) {
+define void @test_umul24_i8(i32 addrspace(1)* %out, i8 %a, i8 %b) {
+entry:
+  %mul = mul i8 %a, %b
+  %ext = sext i8 %mul to i32
+  store i32 %ext, i32 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}test_umulhi24_i32_i64:
+; SI-NOT: and
+; SI: v_mul_hi_u32_u24_e32 [[RESULT:v[0-9]+]],
+; SI-NEXT: buffer_store_dword [[RESULT]]
+
+; EG: MULHI_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[2].W
+define void @test_umulhi24_i32_i64(i32 addrspace(1)* %out, i32 %a, i32 %b) {
 entry:
-  %0 = mul i8 %a, %b
-  %1 = sext i8 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %a.24 = and i32 %a, 16777215
+  %b.24 = and i32 %b, 16777215
+  %a.24.i64 = zext i32 %a.24 to i64
+  %b.24.i64 = zext i32 %b.24 to i64
+  %mul48 = mul i64 %a.24.i64, %b.24.i64
+  %mul48.hi = lshr i64 %mul48, 32
+  %mul24hi = trunc i64 %mul48.hi to i32
+  store i32 %mul24hi, i32 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}test_umulhi24:
+; SI-NOT: and
+; SI: v_mul_hi_u32_u24_e32 [[RESULT:v[0-9]+]],
+; SI-NEXT: buffer_store_dword [[RESULT]]
+
+; EG: MULHI_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
+define void @test_umulhi24(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+entry:
+  %a.24 = and i64 %a, 16777215
+  %b.24 = and i64 %b, 16777215
+  %mul48 = mul i64 %a.24, %b.24
+  %mul48.hi = lshr i64 %mul48, 32
+  %mul24.hi = trunc i64 %mul48.hi to i32
+  store i32 %mul24.hi, i32 addrspace(1)* %out
   ret void
 }
 
 ; Multiply with 24-bit inputs and 64-bit output
-; FUNC_LABEL: {{^}}mul24_i64:
+; FUNC-LABEL: {{^}}test_umul24_i64:
 ; EG; MUL_UINT24
 ; EG: MULHI
-; FIXME: SI support 24-bit mulhi
 
-; SI-DAG: v_mul_u32_u24
-; SI-DAG: v_mul_hi_u32
-; SI: s_endpgm
-define void @mul24_i64(i64 addrspace(1)* %out, i64 %a, i64 %b, i64 %c) {
+; SI-NOT: and
+; SI-NOT: lshr
+
+; SI-DAG: v_mul_u32_u24_e32
+; SI-DAG: v_mul_hi_u32_u24_e32
+
+; SI: buffer_store_dwordx2
+define void @test_umul24_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
 entry:
   %tmp0 = shl i64 %a, 40
   %a_24 = lshr i64 %tmp0, 40
@@ -67,3 +118,80 @@ entry:
   store i64 %tmp2, i64 addrspace(1)* %out
   ret void
 }
+
+; FIXME: Should be able to eliminate the and
+; FUNC-LABEL: {{^}}test_umul24_i64_square:
+; SI: s_load_dword [[A:s[0-9]+]]
+; SI: s_and_b32 [[TRUNC:s[0-9]+]], [[A]], 0xffffff{{$}}
+; SI-DAG: v_mul_hi_u32_u24_e64 v{{[0-9]+}}, [[TRUNC]], [[TRUNC]]
+; SI-DAG: v_mul_u32_u24_e64 v{{[0-9]+}}, [[TRUNC]], [[TRUNC]]
+define void @test_umul24_i64_square(i64 addrspace(1)* %out, i64 %a) {
+entry:
+  %tmp0 = shl i64 %a, 40
+  %a.24 = lshr i64 %tmp0, 40
+  %tmp2 = mul i64 %a.24, %a.24
+  store i64 %tmp2, i64 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}test_umulhi16_i32:
+; SI: s_and_b32
+; SI: s_and_b32
+; SI: v_mul_u32_u24_e32 [[MUL24:v[0-9]+]]
+; SI: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, [[MUL24]]
+define void @test_umulhi16_i32(i16 addrspace(1)* %out, i32 %a, i32 %b) {
+entry:
+  %a.16 = and i32 %a, 65535
+  %b.16 = and i32 %b, 65535
+  %mul = mul i32 %a.16, %b.16
+  %hi = lshr i32 %mul, 16
+  %mulhi = trunc i32 %hi to i16
+  store i16 %mulhi, i16 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}test_umul24_i33:
+; SI: s_load_dword s
+; SI: s_load_dword s
+
+; SI-NOT: and
+; SI-NOT: lshr
+
+; SI-DAG: v_mul_u32_u24_e32 v[[MUL_LO:[0-9]+]],
+; SI-DAG: v_mul_hi_u32_u24_e32 v[[MUL_HI:[0-9]+]],
+; SI-DAG: v_and_b32_e32 v[[HI:[0-9]+]], 1, v[[MUL_HI]]
+; SI: buffer_store_dwordx2 v{{\[}}[[MUL_LO]]:[[HI]]{{\]}}
+define void @test_umul24_i33(i64 addrspace(1)* %out, i33 %a, i33 %b) {
+entry:
+  %tmp0 = shl i33 %a, 9
+  %a_24 = lshr i33 %tmp0, 9
+  %tmp1 = shl i33 %b, 9
+  %b_24 = lshr i33 %tmp1, 9
+  %tmp2 = mul i33 %a_24, %b_24
+  %ext = zext i33 %tmp2 to i64
+  store i64 %ext, i64 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}test_umulhi24_i33:
+; SI: s_load_dword s
+; SI: s_load_dword s
+
+; SI-NOT: and
+; SI-NOT: lshr
+
+; SI: v_mul_hi_u32_u24_e32 v[[MUL_HI:[0-9]+]],
+; SI-NEXT: v_and_b32_e32 v[[HI:[0-9]+]], 1, v[[MUL_HI]]
+; SI-NEXT: buffer_store_dword v[[HI]]
+define void @test_umulhi24_i33(i32 addrspace(1)* %out, i33 %a, i33 %b) {
+entry:
+  %tmp0 = shl i33 %a, 9
+  %a_24 = lshr i33 %tmp0, 9
+  %tmp1 = shl i33 %b, 9
+  %b_24 = lshr i33 %tmp1, 9
+  %tmp2 = mul i33 %a_24, %b_24
+  %hi = lshr i33 %tmp2, 32
+  %trunc = trunc i33 %hi to i32
+  store i32 %trunc, i32 addrspace(1)* %out
+  ret void
+}




More information about the llvm-commits mailing list