[llvm] r258096 - AMDGPU: Reduce 64-bit SRAs

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 18 14:09:05 PST 2016


Author: arsenm
Date: Mon Jan 18 16:09:04 2016
New Revision: 258096

URL: http://llvm.org/viewvc/llvm-project?rev=258096&view=rev
Log:
AMDGPU: Reduce 64-bit SRAs

Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h
    llvm/trunk/test/CodeGen/AMDGPU/shift-i64-opts.ll
    llvm/trunk/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
    llvm/trunk/test/CodeGen/AMDGPU/sra.ll

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp?rev=258096&r1=258095&r2=258096&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp Mon Jan 18 16:09:04 2016
@@ -378,6 +378,7 @@ AMDGPUTargetLowering::AMDGPUTargetLoweri
 
   setTargetDAGCombine(ISD::AND);
   setTargetDAGCombine(ISD::SHL);
+  setTargetDAGCombine(ISD::SRA);
   setTargetDAGCombine(ISD::SRL);
   setTargetDAGCombine(ISD::MUL);
   setTargetDAGCombine(ISD::SELECT);
@@ -1193,6 +1194,22 @@ AMDGPUTargetLowering::split64BitValue(SD
   return std::make_pair(Lo, Hi);
 }
 
+SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const {
+  SDLoc SL(Op);
+
+  SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
+  const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
+  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
+}
+
+SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const {
+  SDLoc SL(Op);
+
+  SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
+  const SDValue One = DAG.getConstant(1, SL, MVT::i32);
+  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
+}
+
 SDValue AMDGPUTargetLowering::ScalarizeVectorLoad(const SDValue Op,
                                                   SelectionDAG &DAG) const {
   LoadSDNode *Load = cast<LoadSDNode>(Op);
@@ -2626,6 +2643,43 @@ SDValue AMDGPUTargetLowering::performShl
   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
 }
 
+SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
+                                                DAGCombinerInfo &DCI) const {
+  if (N->getValueType(0) != MVT::i64)
+    return SDValue();
+
+  const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
+  if (!RHS)
+    return SDValue();
+
+  SelectionDAG &DAG = DCI.DAG;
+  SDLoc SL(N);
+  unsigned RHSVal = RHS->getZExtValue();
+
+  // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31)
+  if (RHSVal == 32) {
+    SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
+    SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
+                                   DAG.getConstant(31, SL, MVT::i32));
+
+    SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
+                                   Hi, NewShift);
+    return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
+  }
+
+  // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31)
+  if (RHSVal == 63) {
+    SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
+    SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
+                                   DAG.getConstant(31, SL, MVT::i32));
+    SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
+                                   NewShift, NewShift);
+    return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
+  }
+
+  return SDValue();
+}
+
 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
                                                 DAGCombinerInfo &DCI) const {
   if (N->getValueType(0) != MVT::i64)
@@ -2804,6 +2858,12 @@ SDValue AMDGPUTargetLowering::PerformDAG
 
     return performSrlCombine(N, DCI);
   }
+  case ISD::SRA: {
+    if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
+      break;
+
+    return performSraCombine(N, DCI);
+  }
   case ISD::AND: {
     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
       break;

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h?rev=258096&r1=258095&r2=258096&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h Mon Jan 18 16:09:04 2016
@@ -87,6 +87,8 @@ protected:
   /// Return 64-bit value Op as two 32-bit integers.
   std::pair<SDValue, SDValue> split64BitValue(SDValue Op,
                                               SelectionDAG &DAG) const;
+  SDValue getLoHalf64(SDValue Op, SelectionDAG &DAG) const;
+  SDValue getHiHalf64(SDValue Op, SelectionDAG &DAG) const;
 
   /// \brief Split a vector load into a scalar load of each component.
   SDValue ScalarizeVectorLoad(SDValue Op, SelectionDAG &DAG) const;

Modified: llvm/trunk/test/CodeGen/AMDGPU/shift-i64-opts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/shift-i64-opts.ll?rev=258096&r1=258095&r2=258096&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/shift-i64-opts.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/shift-i64-opts.ll Mon Jan 18 16:09:04 2016
@@ -105,10 +105,18 @@ define void @shl_i64_const_63(i64 addrsp
 
 ; ashr (i64 x), 63 => (ashr lo(x), 31), lo(x)
 
-; GCN-LABEL: {{^}}ashr_i64_const_gt_32:
-define void @ashr_i64_const_gt_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+; GCN-LABEL: {{^}}ashr_i64_const_32:
+define void @ashr_i64_const_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
   %val = load i64, i64 addrspace(1)* %in
-  %shl = ashr i64 %val, 35
+  %shl = ashr i64 %val, 32
+  store i64 %shl, i64 addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}ashr_i64_const_63:
+define void @ashr_i64_const_63(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+  %val = load i64, i64 addrspace(1)* %in
+  %shl = ashr i64 %val, 63
   store i64 %shl, i64 addrspace(1)* %out
   ret void
 }

Modified: llvm/trunk/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sint_to_fp.i64.ll?rev=258096&r1=258095&r2=258096&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/sint_to_fp.i64.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/sint_to_fp.i64.ll Mon Jan 18 16:09:04 2016
@@ -13,8 +13,7 @@ define void @s_sint_to_fp_i64_to_f32(flo
 ; FUNC-LABEL: {{^}}v_sint_to_fp_i64_to_f32:
 ; GCN: {{buffer|flat}}_load_dwordx2
 
-; SI: v_ashr_i64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, 63
-; VI: v_ashrrev_i64 {{v\[[0-9]+:[0-9]+\]}}, 63, {{v\[[0-9]+:[0-9]+\]}}
+; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
 ; GCN: v_xor_b32
 
 ; GCN: v_ffbh_u32

Modified: llvm/trunk/test/CodeGen/AMDGPU/sra.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sra.ll?rev=258096&r1=258095&r2=258096&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/sra.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/sra.ll Mon Jan 18 16:09:04 2016
@@ -201,10 +201,10 @@ define void @ashr_v4i64(<4 x i64> addrsp
 }
 
 ; GCN-LABEL: {{^}}s_ashr_32_i64:
-; GCN: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, {{0xb|0x2c}}
-; GCN: s_ashr_i64 [[SHIFT:s\[[0-9]+:[0-9]+\]]], [[VAL]], 32
-; GCN: s_add_u32
-; GCN: s_addc_u32
+; GCN: s_load_dword s[[HI:[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, {{0xc|0x30}}
+; GCN: s_ashr_i32 s[[SHIFT:[0-9]+]], s[[HI]], 31
+; GCN: s_add_u32 s{{[0-9]+}}, s[[HI]], s{{[0-9]+}}
+; GCN: s_addc_u32 s{{[0-9]+}}, s[[SHIFT]], s{{[0-9]+}}
 define void @s_ashr_32_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
   %result = ashr i64 %a, 32
   %add = add i64 %result, %b
@@ -213,10 +213,10 @@ define void @s_ashr_32_i64(i64 addrspace
 }
 
 ; GCN-LABEL: {{^}}v_ashr_32_i64:
-; GCN: {{buffer|flat}}_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]]
-; SI: v_ashr_i64 [[SHIFT:v\[[0-9]+:[0-9]+\]]], [[VAL]], 32
-; VI: v_ashrrev_i64 [[SHIFT:v\[[0-9]+:[0-9]+\]]], 32, [[VAL]]
-; GCN: {{buffer|flat}}_store_dwordx2 [[SHIFT]]
+; SI: buffer_load_dword v[[HI:[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; VI: flat_load_dword v[[HI:[0-9]+]]
+; GCN: v_ashrrev_i32_e32 v[[SHIFT:[0-9]+]], 31, v[[HI]]
+; GCN: {{buffer|flat}}_store_dwordx2 v{{\[}}[[HI]]:[[SHIFT]]{{\]}}
 define void @v_ashr_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
   %tid = call i32 @llvm.r600.read.tidig.x() #0
   %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
@@ -228,9 +228,11 @@ define void @v_ashr_32_i64(i64 addrspace
 }
 
 ; GCN-LABEL: {{^}}s_ashr_63_i64:
-; GCN-DAG: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, {{0xb|0x2c}}
-; GCN: s_ashr_i64 [[SHIFT:s\[[0-9]+:[0-9]+\]]], [[VAL]], 63
-; GCN: s_add_u32
+; GCN-DAG: s_load_dword s[[HI:[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, {{0xc|0x30}}
+; GCN: s_ashr_i32 s[[SHIFT:[0-9]+]], s[[HI]], 31
+; GCN: s_mov_b32 s[[COPYSHIFT:[0-9]+]], s[[SHIFT]]
+; GCN: s_add_u32 {{s[0-9]+}}, s[[HI]], {{s[0-9]+}}
+; GCN: s_addc_u32 {{s[0-9]+}}, s[[COPYSHIFT]], {{s[0-9]+}}
 define void @s_ashr_63_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
   %result = ashr i64 %a, 63
   %add = add i64 %result, %b
@@ -239,10 +241,11 @@ define void @s_ashr_63_i64(i64 addrspace
 }
 
 ; GCN-LABEL: {{^}}v_ashr_63_i64:
-; GCN-DAG: {{buffer|flat}}_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]]
-; SI: v_ashr_i64 [[SHIFT:v\[[0-9]+:[0-9]+\]]], [[VAL]], 63
-; VI: v_ashrrev_i64 [[SHIFT:v\[[0-9]+:[0-9]+\]]], 63, [[VAL]]
-; GCN: {{buffer|flat}}_store_dwordx2 [[SHIFT]]
+; SI: buffer_load_dword v[[HI:[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; VI: flat_load_dword v[[HI:[0-9]+]]
+; GCN: v_ashrrev_i32_e32 v[[SHIFT:[0-9]+]], 31, v[[HI]]
+; GCN: v_mov_b32_e32 v[[COPY:[0-9]+]], v[[SHIFT]]
+; GCN: {{buffer|flat}}_store_dwordx2 v{{\[}}[[SHIFT]]:[[COPY]]{{\]}}
 define void @v_ashr_63_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
   %tid = call i32 @llvm.r600.read.tidig.x() #0
   %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid




More information about the llvm-commits mailing list