[llvm] r311896 - [AMDGPU] computeKnownBitsForTargetNode for 24 bit mul

Stanislav Mekhanoshin via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 28 09:35:37 PDT 2017


Author: rampitec
Date: Mon Aug 28 09:35:37 2017
New Revision: 311896

URL: http://llvm.org/viewvc/llvm-project?rev=311896&view=rev
Log:
[AMDGPU] computeKnownBitsForTargetNode for 24 bit mul

Differential Revision: https://reviews.llvm.org/D37168

Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
    llvm/trunk/test/CodeGen/AMDGPU/lshl64-to-32.ll

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp?rev=311896&r1=311895&r2=311896&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp Mon Aug 28 09:35:37 2017
@@ -3841,7 +3841,6 @@ void AMDGPUTargetLowering::computeKnownB
 
   Known.resetAll(); // Don't know anything.
 
-  KnownBits Known2;
   unsigned Opc = Op.getOpcode();
 
   switch (Opc) {
@@ -3874,6 +3873,37 @@ void AMDGPUTargetLowering::computeKnownB
     Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
     break;
   }
+  case AMDGPUISD::MUL_U24:
+  case AMDGPUISD::MUL_I24: {
+    KnownBits LHSKnown, RHSKnown;
+    DAG.computeKnownBits(Op.getOperand(0), LHSKnown);
+    DAG.computeKnownBits(Op.getOperand(1), RHSKnown);
+
+    unsigned TrailZ = LHSKnown.countMinTrailingZeros() +
+                      RHSKnown.countMinTrailingZeros();
+    Known.Zero.setLowBits(std::min(TrailZ, 32u));
+
+    unsigned LHSValBits = 32 - std::max(LHSKnown.countMinSignBits(), 8u);
+    unsigned RHSValBits = 32 - std::max(RHSKnown.countMinSignBits(), 8u);
+    unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u);
+    if (MaxValBits >= 32)
+      break;
+    bool Negative = false;
+    if (Opc == AMDGPUISD::MUL_I24) {
+      bool LHSNegative = !!(LHSKnown.One  & (1 << 23));
+      bool LHSPositive = !!(LHSKnown.Zero & (1 << 23));
+      bool RHSNegative = !!(RHSKnown.One  & (1 << 23));
+      bool RHSPositive = !!(RHSKnown.Zero & (1 << 23));
+      if ((!LHSNegative && !LHSPositive) || (!RHSNegative && !RHSPositive))
+        break;
+      Negative = (LHSNegative && RHSPositive) || (LHSPositive && RHSNegative);
+    }
+    if (Negative)
+      Known.One.setHighBits(32 - MaxValBits);
+    else
+      Known.Zero.setHighBits(32 - MaxValBits);
+    break;
+  }
   }
 }
 

Modified: llvm/trunk/test/CodeGen/AMDGPU/lshl64-to-32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/lshl64-to-32.ll?rev=311896&r1=311895&r2=311896&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/lshl64-to-32.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/lshl64-to-32.ll Mon Aug 28 09:35:37 2017
@@ -1,8 +1,8 @@
-; RUN: llc -march=amdgcn < %s | FileCheck %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
 
-; CHECK-LABEL: {{^}}zext_shl64_to_32:
-; CHECK: s_lshl_b32
-; CHECK-NOT: s_lshl_b64
+; GCN-LABEL: {{^}}zext_shl64_to_32:
+; GCN: s_lshl_b32
+; GCN-NOT: s_lshl_b64
 define amdgpu_kernel void @zext_shl64_to_32(i64 addrspace(1)* nocapture %out, i32 %x) {
   %and = and i32 %x, 1073741823
   %ext = zext i32 %and to i64
@@ -11,9 +11,9 @@ define amdgpu_kernel void @zext_shl64_to
   ret void
 }
 
-; CHECK-LABEL: {{^}}sext_shl64_to_32:
-; CHECK: s_lshl_b32
-; CHECK-NOT: s_lshl_b64
+; GCN-LABEL: {{^}}sext_shl64_to_32:
+; GCN: s_lshl_b32
+; GCN-NOT: s_lshl_b64
 define amdgpu_kernel void @sext_shl64_to_32(i64 addrspace(1)* nocapture %out, i32 %x) {
   %and = and i32 %x, 536870911
   %ext = sext i32 %and to i64
@@ -22,9 +22,9 @@ define amdgpu_kernel void @sext_shl64_to
   ret void
 }
 
-; CHECK-LABEL: {{^}}zext_shl64_overflow:
-; CHECK: s_lshl_b64
-; CHECK-NOT: s_lshl_b32
+; GCN-LABEL: {{^}}zext_shl64_overflow:
+; GCN: s_lshl_b64
+; GCN-NOT: s_lshl_b32
 define amdgpu_kernel void @zext_shl64_overflow(i64 addrspace(1)* nocapture %out, i32 %x) {
   %and = and i32 %x, 2147483647
   %ext = zext i32 %and to i64
@@ -33,9 +33,9 @@ define amdgpu_kernel void @zext_shl64_ov
   ret void
 }
 
-; CHECK-LABEL: {{^}}sext_shl64_overflow:
-; CHECK: s_lshl_b64
-; CHECK-NOT: s_lshl_b32
+; GCN-LABEL: {{^}}sext_shl64_overflow:
+; GCN: s_lshl_b64
+; GCN-NOT: s_lshl_b32
 define amdgpu_kernel void @sext_shl64_overflow(i64 addrspace(1)* nocapture %out, i32 %x) {
   %and = and i32 %x, 2147483647
   %ext = sext i32 %and to i64
@@ -43,3 +43,37 @@ define amdgpu_kernel void @sext_shl64_ov
   store i64 %shl, i64 addrspace(1)* %out, align 4
   ret void
 }
+
+; GCN-LABEL: {{^}}mulu24_shl64:
+; GCN: v_mul_u32_u24_e32 [[M:v[0-9]+]], 7, v{{[0-9]+}}
+; GCN: v_lshlrev_b32_e32 v{{[0-9]+}}, 2, [[M]]
+define amdgpu_kernel void @mulu24_shl64(i32 addrspace(1)* nocapture %arg) {
+bb:
+  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %tmp1 = and i32 %tmp, 6
+  %mulconv = mul nuw nsw i32 %tmp1, 7
+  %tmp2 = zext i32 %mulconv to i64
+  %tmp3 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp2
+  store i32 0, i32 addrspace(1)* %tmp3, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}muli24_shl64:
+; GCN: v_mul_i32_i24_e32 [[M:v[0-9]+]], -7, v{{[0-9]+}}
+; GCN: v_lshlrev_b32_e32 v{{[0-9]+}}, 3, [[M]]
+define amdgpu_kernel void @muli24_shl64(i64 addrspace(1)* nocapture %arg, i32 addrspace(1)* nocapture readonly %arg1) {
+bb:
+  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %tmp2 = sext i32 %tmp to i64
+  %tmp3 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i64 %tmp2
+  %tmp4 = load i32, i32 addrspace(1)* %tmp3, align 4
+  %tmp5 = or i32 %tmp4, -8388608
+  %tmp6 = mul nsw i32 %tmp5, -7
+  %tmp7 = zext i32 %tmp6 to i64
+  %tmp8 = shl nuw nsw i64 %tmp7, 3
+  %tmp9 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 %tmp2
+  store i64 %tmp8, i64 addrspace(1)* %tmp9, align 8
+  ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x()




More information about the llvm-commits mailing list