[llvm] 7cdb1df - [AMDGPU] Divergence driven selection for fused bitlogic

Stanislav Mekhanoshin via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 18 01:44:37 PDT 2021


Author: Stanislav Mekhanoshin
Date: 2021-10-18T01:44:25-07:00
New Revision: 7cdb1df8c70425b30905418636f9008cf8d3a844

URL: https://github.com/llvm/llvm-project/commit/7cdb1df8c70425b30905418636f9008cf8d3a844
DIFF: https://github.com/llvm/llvm-project/commit/7cdb1df8c70425b30905418636f9008cf8d3a844.diff

LOG: [AMDGPU] Divergence driven selection for fused bitlogic

The change adds divergence predicates for fused logical operations.
The problem with selecting a scalar fused op such as S_NOR_B32 is
that it does not have a VALU counterpart and will be split in
moveToVALU. At the same time it prevents selection of a better
opcode on the VALU side (such as V_OR3_B32) which does not have a
counterpart on SALU side.

XNOR opcodes are left as is and selected as scalar to get advantage
of the SIInstrInfo::lowerScalarXnor() code which can commute
operations to keep one of two opcodes on SALU if possible. See
xnor.ll test for this.

Differential Revision: https://reviews.llvm.org/D111907

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/SOPInstructions.td
    llvm/lib/Target/AMDGPU/VOP3Instructions.td
    llvm/test/CodeGen/AMDGPU/fused-bitlogic.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index ff7c0a87a7f7f..5f63970c66973 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -558,19 +558,19 @@ def S_XNOR_B64 : SOP2_64 <"s_xnor_b64",
 >;
 
 def S_NAND_B32 : SOP2_32 <"s_nand_b32",
-  [(set i32:$sdst, (not (and_oneuse i32:$src0, i32:$src1)))]
+  [(set i32:$sdst, (UniformUnaryFrag<not> (and_oneuse i32:$src0, i32:$src1)))]
 >;
 
 def S_NAND_B64 : SOP2_64 <"s_nand_b64",
-  [(set i64:$sdst, (not (and_oneuse i64:$src0, i64:$src1)))]
+  [(set i64:$sdst, (UniformUnaryFrag<not> (and_oneuse i64:$src0, i64:$src1)))]
 >;
 
 def S_NOR_B32 : SOP2_32 <"s_nor_b32",
-  [(set i32:$sdst, (not (or_oneuse i32:$src0, i32:$src1)))]
+  [(set i32:$sdst, (UniformUnaryFrag<not> (or_oneuse i32:$src0, i32:$src1)))]
 >;
 
 def S_NOR_B64 : SOP2_64 <"s_nor_b64",
-  [(set i64:$sdst, (not (or_oneuse i64:$src0, i64:$src1)))]
+  [(set i64:$sdst, (UniformUnaryFrag<not> (or_oneuse i64:$src0, i64:$src1)))]
 >;
 } // End isCommutable = 1
 

diff  --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
index 4b216a4ec1572..8c6d1884c5c5e 100644
--- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
@@ -667,6 +667,14 @@ def : ThreeOp_i32_Pats<xor, add, V_XAD_U32_e64>;
 def : VOPBinOpClampPat<saddsat, V_ADD_I32_e64, i32>;
 def : VOPBinOpClampPat<ssubsat, V_SUB_I32_e64, i32>;
 
+def : GCNPat<(getDivergentFrag<or>.ret (or_oneuse i64:$src0, i64:$src1), i64:$src2),
+             (REG_SEQUENCE VReg_64,
+               (V_OR3_B32_e64 (i32 (EXTRACT_SUBREG $src0, sub0)),
+                              (i32 (EXTRACT_SUBREG $src1, sub0)),
+                              (i32 (EXTRACT_SUBREG $src2, sub0))), sub0,
+               (V_OR3_B32_e64 (i32 (EXTRACT_SUBREG $src0, sub1)),
+                              (i32 (EXTRACT_SUBREG $src1, sub1)),
+                              (i32 (EXTRACT_SUBREG $src2, sub1))), sub1)>;
 
 // FIXME: Probably should hardcode clamp bit in pseudo and avoid this.
 class OpSelBinOpClampPat<SDPatternOperator node,

diff  --git a/llvm/test/CodeGen/AMDGPU/fused-bitlogic.ll b/llvm/test/CodeGen/AMDGPU/fused-bitlogic.ll
index 654ed0faa11d9..933f73c0fe5d4 100644
--- a/llvm/test/CodeGen/AMDGPU/fused-bitlogic.ll
+++ b/llvm/test/CodeGen/AMDGPU/fused-bitlogic.ll
@@ -9,8 +9,7 @@ define amdgpu_kernel void @divergent_or3_b32(<3 x i32> addrspace(1)* %arg) {
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    global_load_dwordx3 v[0:2], v3, s[0:1]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v0, v1, v0
-; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
+; GCN-NEXT:    v_or3_b32 v0, v1, v0, v2
 ; GCN-NEXT:    v_not_b32_e32 v0, v0
 ; GCN-NEXT:    global_store_dword v3, v0, s[0:1]
 ; GCN-NEXT:    s_endpgm
@@ -39,10 +38,8 @@ define amdgpu_kernel void @divergent_or3_b64(<3 x i64> addrspace(1)* %arg) {
 ; GCN-NEXT:    global_load_dwordx2 v[4:5], v6, s[0:1] offset:16
 ; GCN-NEXT:    global_load_dwordx4 v[0:3], v6, s[0:1]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v1, v3, v1
-; GCN-NEXT:    v_or_b32_e32 v0, v2, v0
-; GCN-NEXT:    v_or_b32_e32 v0, v0, v4
-; GCN-NEXT:    v_or_b32_e32 v1, v1, v5
+; GCN-NEXT:    v_or3_b32 v1, v3, v1, v5
+; GCN-NEXT:    v_or3_b32 v0, v2, v0, v4
 ; GCN-NEXT:    v_not_b32_e32 v0, v0
 ; GCN-NEXT:    v_not_b32_e32 v1, v1
 ; GCN-NEXT:    global_store_dwordx2 v6, v[0:1], s[0:1]
@@ -103,8 +100,8 @@ define amdgpu_kernel void @divergent_and3_b64(<3 x i64> addrspace(1)* %arg) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, v3, v1
 ; GCN-NEXT:    v_and_b32_e32 v0, v2, v0
-; GCN-NEXT:    v_and_b32_e32 v0, v0, v4
 ; GCN-NEXT:    v_and_b32_e32 v1, v1, v5
+; GCN-NEXT:    v_and_b32_e32 v0, v0, v4
 ; GCN-NEXT:    v_not_b32_e32 v0, v0
 ; GCN-NEXT:    v_not_b32_e32 v1, v1
 ; GCN-NEXT:    global_store_dwordx2 v6, v[0:1], s[0:1]


        


More information about the llvm-commits mailing list