[llvm] 87b2af8 - AMDGPU/GlobalISel: Enable s_{and|or}n2_{b32|b64} patterns

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 6 15:00:45 PDT 2020


Author: Matt Arsenault
Date: 2020-08-06T18:00:38-04:00
New Revision: 87b2af814078d319dd823eb9c0a279a96ae5f349

URL: https://github.com/llvm/llvm-project/commit/87b2af814078d319dd823eb9c0a279a96ae5f349
DIFF: https://github.com/llvm/llvm-project/commit/87b2af814078d319dd823eb9c0a279a96ae5f349.diff

LOG: AMDGPU/GlobalISel: Enable s_{and|or}n2_{b32|b64} patterns

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/SOPInstructions.td
    llvm/test/CodeGen/AMDGPU/GlobalISel/andn2.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/orn2.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index 92cb2807611a..1bff2fe76c44 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -408,8 +408,14 @@ class SOP2_64_32_32 <string opName, list<dag> pattern=[]> : SOP2_Pseudo <
 class UniformUnaryFrag<SDPatternOperator Op> : PatFrag <
   (ops node:$src0),
   (Op $src0),
-  [{ return !N->isDivergent(); }]
->;
+  [{ return !N->isDivergent(); }]> {
+  // This check is unnecessary as it's captured by the result register
+  // bank constraint.
+  //
+  // FIXME: Should add a way for the emitter to recognize this is a
+  // trivially true predicate to eliminate the check.
+  let GISelPredicateCode = [{return true;}];
+}
 
 class UniformBinFrag<SDPatternOperator Op> : PatFrag <
   (ops node:$src0, node:$src1),

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/andn2.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/andn2.ll
index 29bf1b028076..1dbcd56e9237 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/andn2.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/andn2.ll
@@ -5,8 +5,7 @@
 define amdgpu_ps i32 @s_andn2_i32(i32 inreg %src0, i32 inreg %src1) {
 ; GCN-LABEL: s_andn2_i32:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_not_b32 s0, s3
-; GCN-NEXT:    s_and_b32 s0, s2, s0
+; GCN-NEXT:    s_andn2_b32 s0, s2, s3
 ; GCN-NEXT:    ; return to shader part epilog
   %not.src1 = xor i32 %src1, -1
   %and = and i32 %src0, %not.src1
@@ -16,8 +15,7 @@ define amdgpu_ps i32 @s_andn2_i32(i32 inreg %src0, i32 inreg %src1) {
 define amdgpu_ps i32 @s_andn2_i32_commute(i32 inreg %src0, i32 inreg %src1) {
 ; GCN-LABEL: s_andn2_i32_commute:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_not_b32 s0, s3
-; GCN-NEXT:    s_and_b32 s0, s0, s2
+; GCN-NEXT:    s_andn2_b32 s0, s2, s3
 ; GCN-NEXT:    ; return to shader part epilog
   %not.src1 = xor i32 %src1, -1
   %and = and i32 %not.src1, %src0
@@ -28,7 +26,7 @@ define amdgpu_ps { i32, i32 } @s_andn2_i32_multi_use(i32 inreg %src0, i32 inreg
 ; GCN-LABEL: s_andn2_i32_multi_use:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_not_b32 s1, s3
-; GCN-NEXT:    s_and_b32 s0, s2, s1
+; GCN-NEXT:    s_andn2_b32 s0, s2, s3
 ; GCN-NEXT:    ; return to shader part epilog
   %not.src1 = xor i32 %src1, -1
   %and = and i32 %src0, %not.src1
@@ -40,9 +38,8 @@ define amdgpu_ps { i32, i32 } @s_andn2_i32_multi_use(i32 inreg %src0, i32 inreg
 define amdgpu_ps { i32, i32 } @s_andn2_i32_multi_foldable_use(i32 inreg %src0, i32 inreg %src1, i32 inreg %src2) {
 ; GCN-LABEL: s_andn2_i32_multi_foldable_use:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_not_b32 s1, s4
-; GCN-NEXT:    s_and_b32 s0, s2, s1
-; GCN-NEXT:    s_and_b32 s1, s3, s1
+; GCN-NEXT:    s_andn2_b32 s0, s2, s4
+; GCN-NEXT:    s_andn2_b32 s1, s3, s4
 ; GCN-NEXT:    ; return to shader part epilog
   %not.src2 = xor i32 %src2, -1
   %and0 = and i32 %src0, %not.src2
@@ -91,8 +88,7 @@ define amdgpu_ps float @v_andn2_i32_vs(i32 %src0, i32 inreg %src1) {
 define amdgpu_ps i64 @s_andn2_i64(i64 inreg %src0, i64 inreg %src1) {
 ; GCN-LABEL: s_andn2_i64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_not_b64 s[0:1], s[4:5]
-; GCN-NEXT:    s_and_b64 s[0:1], s[2:3], s[0:1]
+; GCN-NEXT:    s_andn2_b64 s[0:1], s[2:3], s[4:5]
 ; GCN-NEXT:    ; return to shader part epilog
   %not.src1 = xor i64 %src1, -1
   %and = and i64 %src0, %not.src1
@@ -102,8 +98,7 @@ define amdgpu_ps i64 @s_andn2_i64(i64 inreg %src0, i64 inreg %src1) {
 define amdgpu_ps i64 @s_andn2_i64_commute(i64 inreg %src0, i64 inreg %src1) {
 ; GCN-LABEL: s_andn2_i64_commute:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_not_b64 s[0:1], s[4:5]
-; GCN-NEXT:    s_and_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT:    s_andn2_b64 s[0:1], s[2:3], s[4:5]
 ; GCN-NEXT:    ; return to shader part epilog
   %not.src1 = xor i64 %src1, -1
   %and = and i64 %not.src1, %src0
@@ -113,9 +108,8 @@ define amdgpu_ps i64 @s_andn2_i64_commute(i64 inreg %src0, i64 inreg %src1) {
 define amdgpu_ps { i64, i64 } @s_andn2_i64_multi_foldable_use(i64 inreg %src0, i64 inreg %src1, i64 inreg %src2) {
 ; GCN-LABEL: s_andn2_i64_multi_foldable_use:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_not_b64 s[6:7], s[6:7]
-; GCN-NEXT:    s_and_b64 s[0:1], s[2:3], s[6:7]
-; GCN-NEXT:    s_and_b64 s[2:3], s[4:5], s[6:7]
+; GCN-NEXT:    s_andn2_b64 s[0:1], s[2:3], s[6:7]
+; GCN-NEXT:    s_andn2_b64 s[2:3], s[4:5], s[6:7]
 ; GCN-NEXT:    ; return to shader part epilog
   %not.src2 = xor i64 %src2, -1
   %and0 = and i64 %src0, %not.src2
@@ -128,10 +122,10 @@ define amdgpu_ps { i64, i64 } @s_andn2_i64_multi_foldable_use(i64 inreg %src0, i
 define amdgpu_ps { i64, i64 } @s_andn2_i64_multi_use(i64 inreg %src0, i64 inreg %src1) {
 ; GCN-LABEL: s_andn2_i64_multi_use:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_not_b64 s[4:5], s[4:5]
-; GCN-NEXT:    s_and_b64 s[0:1], s[2:3], s[4:5]
-; GCN-NEXT:    s_mov_b32 s2, s4
-; GCN-NEXT:    s_mov_b32 s3, s5
+; GCN-NEXT:    s_not_b64 s[6:7], s[4:5]
+; GCN-NEXT:    s_andn2_b64 s[0:1], s[2:3], s[4:5]
+; GCN-NEXT:    s_mov_b32 s2, s6
+; GCN-NEXT:    s_mov_b32 s3, s7
 ; GCN-NEXT:    ; return to shader part epilog
   %not.src1 = xor i64 %src1, -1
   %and = and i64 %src0, %not.src1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/orn2.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/orn2.ll
index dd9758d621e8..3e396b56c52e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/orn2.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/orn2.ll
@@ -5,8 +5,7 @@
 define amdgpu_ps i32 @s_orn2_i32(i32 inreg %src0, i32 inreg %src1) {
 ; GCN-LABEL: s_orn2_i32:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_not_b32 s0, s3
-; GCN-NEXT:    s_or_b32 s0, s2, s0
+; GCN-NEXT:    s_orn2_b32 s0, s2, s3
 ; GCN-NEXT:    ; return to shader part epilog
   %not.src1 = xor i32 %src1, -1
   %or = or i32 %src0, %not.src1
@@ -16,8 +15,7 @@ define amdgpu_ps i32 @s_orn2_i32(i32 inreg %src0, i32 inreg %src1) {
 define amdgpu_ps i32 @s_orn2_i32_commute(i32 inreg %src0, i32 inreg %src1) {
 ; GCN-LABEL: s_orn2_i32_commute:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_not_b32 s0, s3
-; GCN-NEXT:    s_or_b32 s0, s0, s2
+; GCN-NEXT:    s_orn2_b32 s0, s2, s3
 ; GCN-NEXT:    ; return to shader part epilog
   %not.src1 = xor i32 %src1, -1
   %or = or i32 %not.src1, %src0
@@ -28,7 +26,7 @@ define amdgpu_ps { i32, i32 } @s_orn2_i32_multi_use(i32 inreg %src0, i32 inreg %
 ; GCN-LABEL: s_orn2_i32_multi_use:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_not_b32 s1, s3
-; GCN-NEXT:    s_or_b32 s0, s2, s1
+; GCN-NEXT:    s_orn2_b32 s0, s2, s3
 ; GCN-NEXT:    ; return to shader part epilog
   %not.src1 = xor i32 %src1, -1
   %or = or i32 %src0, %not.src1
@@ -40,9 +38,8 @@ define amdgpu_ps { i32, i32 } @s_orn2_i32_multi_use(i32 inreg %src0, i32 inreg %
 define amdgpu_ps { i32, i32 } @s_orn2_i32_multi_foldable_use(i32 inreg %src0, i32 inreg %src1, i32 inreg %src2) {
 ; GCN-LABEL: s_orn2_i32_multi_foldable_use:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_not_b32 s1, s4
-; GCN-NEXT:    s_or_b32 s0, s2, s1
-; GCN-NEXT:    s_or_b32 s1, s3, s1
+; GCN-NEXT:    s_orn2_b32 s0, s2, s4
+; GCN-NEXT:    s_orn2_b32 s1, s3, s4
 ; GCN-NEXT:    ; return to shader part epilog
   %not.src2 = xor i32 %src2, -1
   %or0 = or i32 %src0, %not.src2
@@ -91,8 +88,7 @@ define amdgpu_ps float @v_orn2_i32_vs(i32 %src0, i32 inreg %src1) {
 define amdgpu_ps i64 @s_orn2_i64(i64 inreg %src0, i64 inreg %src1) {
 ; GCN-LABEL: s_orn2_i64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_not_b64 s[0:1], s[4:5]
-; GCN-NEXT:    s_or_b64 s[0:1], s[2:3], s[0:1]
+; GCN-NEXT:    s_orn2_b64 s[0:1], s[2:3], s[4:5]
 ; GCN-NEXT:    ; return to shader part epilog
   %not.src1 = xor i64 %src1, -1
   %or = or i64 %src0, %not.src1
@@ -102,8 +98,7 @@ define amdgpu_ps i64 @s_orn2_i64(i64 inreg %src0, i64 inreg %src1) {
 define amdgpu_ps i64 @s_orn2_i64_commute(i64 inreg %src0, i64 inreg %src1) {
 ; GCN-LABEL: s_orn2_i64_commute:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_not_b64 s[0:1], s[4:5]
-; GCN-NEXT:    s_or_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT:    s_orn2_b64 s[0:1], s[2:3], s[4:5]
 ; GCN-NEXT:    ; return to shader part epilog
   %not.src1 = xor i64 %src1, -1
   %or = or i64 %not.src1, %src0
@@ -113,9 +108,8 @@ define amdgpu_ps i64 @s_orn2_i64_commute(i64 inreg %src0, i64 inreg %src1) {
 define amdgpu_ps { i64, i64 } @s_orn2_i64_multi_foldable_use(i64 inreg %src0, i64 inreg %src1, i64 inreg %src2) {
 ; GCN-LABEL: s_orn2_i64_multi_foldable_use:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_not_b64 s[6:7], s[6:7]
-; GCN-NEXT:    s_or_b64 s[0:1], s[2:3], s[6:7]
-; GCN-NEXT:    s_or_b64 s[2:3], s[4:5], s[6:7]
+; GCN-NEXT:    s_orn2_b64 s[0:1], s[2:3], s[6:7]
+; GCN-NEXT:    s_orn2_b64 s[2:3], s[4:5], s[6:7]
 ; GCN-NEXT:    ; return to shader part epilog
   %not.src2 = xor i64 %src2, -1
   %or0 = or i64 %src0, %not.src2
@@ -128,10 +122,10 @@ define amdgpu_ps { i64, i64 } @s_orn2_i64_multi_foldable_use(i64 inreg %src0, i6
 define amdgpu_ps { i64, i64 } @s_orn2_i64_multi_use(i64 inreg %src0, i64 inreg %src1) {
 ; GCN-LABEL: s_orn2_i64_multi_use:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_not_b64 s[4:5], s[4:5]
-; GCN-NEXT:    s_or_b64 s[0:1], s[2:3], s[4:5]
-; GCN-NEXT:    s_mov_b32 s2, s4
-; GCN-NEXT:    s_mov_b32 s3, s5
+; GCN-NEXT:    s_not_b64 s[6:7], s[4:5]
+; GCN-NEXT:    s_orn2_b64 s[0:1], s[2:3], s[4:5]
+; GCN-NEXT:    s_mov_b32 s2, s6
+; GCN-NEXT:    s_mov_b32 s3, s7
 ; GCN-NEXT:    ; return to shader part epilog
   %not.src1 = xor i64 %src1, -1
   %or = or i64 %src0, %not.src1


        


More information about the llvm-commits mailing list