[llvm] 965f3ca - [GISel] Fold bitreverse(shl/srl(bitreverse(x), y)) -> srl/shl(x,y) (#91355)

via llvm-commits llvm-commits at lists.llvm.org
Wed May 8 13:59:03 PDT 2024


Author: Simon Pilgrim
Date: 2024-05-08T21:58:59+01:00
New Revision: 965f3ca3dc5464892e283e176bf058ae04d8b654

URL: https://github.com/llvm/llvm-project/commit/965f3ca3dc5464892e283e176bf058ae04d8b654
DIFF: https://github.com/llvm/llvm-project/commit/965f3ca3dc5464892e283e176bf058ae04d8b654.diff

LOG: [GISel] Fold bitreverse(shl/srl(bitreverse(x),y)) -> srl/shl(x,y) (#91355)

Sibling patch to #89897

Added: 
    

Modified: 
    llvm/include/llvm/Target/GlobalISel/Combine.td
    llvm/test/CodeGen/AArch64/GlobalISel/combine-bitreverse-shift.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index d0e1253903475..98d266c8c0b4f 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -325,6 +325,28 @@ def reduce_shl_of_extend : GICombineRule<
          [{ return Helper.matchCombineShlOfExtend(*${mi}, ${matchinfo}); }]),
   (apply [{ Helper.applyCombineShlOfExtend(*${mi}, ${matchinfo}); }])>;
 
+// Combine bitreverse(shl (bitreverse x), y)) -> (lshr x, y)
+def bitreverse_shl : GICombineRule<
+  (defs root:$d),
+  (match (G_BITREVERSE $rev, $val),
+         (G_SHL $src, $rev, $amt):$mi,
+         (G_BITREVERSE $d, $src),
+         [{ return Helper.isLegalOrBeforeLegalizer({TargetOpcode::G_LSHR,
+                                                   {MRI.getType(${val}.getReg()),
+                                                    MRI.getType(${amt}.getReg())}}); }]),
+  (apply (G_LSHR $d, $val, $amt))>;
+
+// Combine bitreverse(lshr (bitreverse x), y)) -> (shl x, y)
+def bitreverse_lshr : GICombineRule<
+  (defs root:$d, build_fn_matchinfo:$matchinfo),
+  (match (G_BITREVERSE $rev, $val),
+         (G_LSHR $src, $rev, $amt):$mi,
+         (G_BITREVERSE $d, $src),
+         [{ return Helper.isLegalOrBeforeLegalizer({TargetOpcode::G_SHL,
+                                                   {MRI.getType(${val}.getReg()),
+                                                    MRI.getType(${amt}.getReg())}}); }]),
+  (apply (G_SHL $d, $val, $amt))>;
+
 // Combine (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
 // Combine (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
 def commute_shift : GICombineRule<
@@ -1645,6 +1667,8 @@ def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend,
 
 def phi_combines : GICombineGroup<[extend_through_phis]>;
 
+def bitreverse_shift : GICombineGroup<[bitreverse_shl, bitreverse_lshr]>;
+
 def select_combines : GICombineGroup<[select_undef_cmp, select_constant_cmp,
                                       match_selects]>;
 
@@ -1674,7 +1698,7 @@ def all_combines : GICombineGroup<[trivial_combines, vector_ops_combines,
     unmerge_zext_to_zext, merge_unmerge, trunc_ext_fold, trunc_shift,
     const_combines, xor_of_and_with_same_reg, ptr_add_with_zero,
     shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine,
-    div_rem_to_divrem, funnel_shift_combines, commute_shift,
+    div_rem_to_divrem, funnel_shift_combines, bitreverse_shift, commute_shift,
     form_bitfield_extract, constant_fold_binops, constant_fold_fma,
     constant_fold_cast_op, fabs_fneg_fold,
     intdiv_combines, mulh_combines, redundant_neg_operands,

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-bitreverse-shift.ll b/llvm/test/CodeGen/AArch64/GlobalISel/combine-bitreverse-shift.ll
index 3ce94e2c40a9a..b9fbe2379a42d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-bitreverse-shift.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-bitreverse-shift.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-unknown-unknown | FileCheck %s --check-prefixes=SDAG
-; RUN: llc < %s -mtriple=aarch64-unknown-unknown -global-isel | FileCheck %s --check-prefixes=GISEL
+; RUN: llc < %s -mtriple=aarch64-unknown-unknown | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64-unknown-unknown -global-isel | FileCheck %s
 
 ; These tests can be optimised
 ;       fold (bitreverse(srl (bitreverse c), x)) -> (shl c, x)
@@ -12,19 +12,10 @@ declare i32 @llvm.bitreverse.i32(i32)
 declare i64 @llvm.bitreverse.i64(i64)
 
 define i8 @test_bitreverse_srli_bitreverse_i8(i8 %a) nounwind {
-; SDAG-LABEL: test_bitreverse_srli_bitreverse_i8:
-; SDAG:       // %bb.0:
-; SDAG-NEXT:    lsl w0, w0, #3
-; SDAG-NEXT:    ret
-;
-; GISEL-LABEL: test_bitreverse_srli_bitreverse_i8:
-; GISEL:       // %bb.0:
-; GISEL-NEXT:    rbit w8, w0
-; GISEL-NEXT:    lsr w8, w8, #24
-; GISEL-NEXT:    lsr w8, w8, #3
-; GISEL-NEXT:    rbit w8, w8
-; GISEL-NEXT:    lsr w0, w8, #24
-; GISEL-NEXT:    ret
+; CHECK-LABEL: test_bitreverse_srli_bitreverse_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    lsl w0, w0, #3
+; CHECK-NEXT:    ret
   %1 = call i8 @llvm.bitreverse.i8(i8 %a)
   %2 = lshr i8 %1, 3
   %3 = call i8 @llvm.bitreverse.i8(i8 %2)
@@ -32,19 +23,10 @@ define i8 @test_bitreverse_srli_bitreverse_i8(i8 %a) nounwind {
 }
 
 define i16 @test_bitreverse_srli_bitreverse_i16(i16 %a) nounwind {
-; SDAG-LABEL: test_bitreverse_srli_bitreverse_i16:
-; SDAG:       // %bb.0:
-; SDAG-NEXT:    lsl w0, w0, #7
-; SDAG-NEXT:    ret
-;
-; GISEL-LABEL: test_bitreverse_srli_bitreverse_i16:
-; GISEL:       // %bb.0:
-; GISEL-NEXT:    rbit w8, w0
-; GISEL-NEXT:    lsr w8, w8, #16
-; GISEL-NEXT:    lsr w8, w8, #7
-; GISEL-NEXT:    rbit w8, w8
-; GISEL-NEXT:    lsr w0, w8, #16
-; GISEL-NEXT:    ret
+; CHECK-LABEL: test_bitreverse_srli_bitreverse_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    lsl w0, w0, #7
+; CHECK-NEXT:    ret
   %1 = call i16 @llvm.bitreverse.i16(i16 %a)
   %2 = lshr i16 %1, 7
   %3 = call i16 @llvm.bitreverse.i16(i16 %2)
@@ -52,17 +34,10 @@ define i16 @test_bitreverse_srli_bitreverse_i16(i16 %a) nounwind {
 }
 
 define i32 @test_bitreverse_srli_bitreverse_i32(i32 %a) nounwind {
-; SDAG-LABEL: test_bitreverse_srli_bitreverse_i32:
-; SDAG:       // %bb.0:
-; SDAG-NEXT:    lsl w0, w0, #15
-; SDAG-NEXT:    ret
-;
-; GISEL-LABEL: test_bitreverse_srli_bitreverse_i32:
-; GISEL:       // %bb.0:
-; GISEL-NEXT:    rbit w8, w0
-; GISEL-NEXT:    lsr w8, w8, #15
-; GISEL-NEXT:    rbit w0, w8
-; GISEL-NEXT:    ret
+; CHECK-LABEL: test_bitreverse_srli_bitreverse_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    lsl w0, w0, #15
+; CHECK-NEXT:    ret
   %1 = call i32 @llvm.bitreverse.i32(i32 %a)
   %2 = lshr i32 %1, 15
   %3 = call i32 @llvm.bitreverse.i32(i32 %2)
@@ -70,17 +45,10 @@ define i32 @test_bitreverse_srli_bitreverse_i32(i32 %a) nounwind {
 }
 
 define i64 @test_bitreverse_srli_bitreverse_i64(i64 %a) nounwind {
-; SDAG-LABEL: test_bitreverse_srli_bitreverse_i64:
-; SDAG:       // %bb.0:
-; SDAG-NEXT:    lsl x0, x0, #33
-; SDAG-NEXT:    ret
-;
-; GISEL-LABEL: test_bitreverse_srli_bitreverse_i64:
-; GISEL:       // %bb.0:
-; GISEL-NEXT:    rbit x8, x0
-; GISEL-NEXT:    lsr x8, x8, #33
-; GISEL-NEXT:    rbit x0, x8
-; GISEL-NEXT:    ret
+; CHECK-LABEL: test_bitreverse_srli_bitreverse_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    lsl x0, x0, #33
+; CHECK-NEXT:    ret
   %1 = call i64 @llvm.bitreverse.i64(i64 %a)
   %2 = lshr i64 %1, 33
   %3 = call i64 @llvm.bitreverse.i64(i64 %2)
@@ -88,19 +56,10 @@ define i64 @test_bitreverse_srli_bitreverse_i64(i64 %a) nounwind {
 }
 
 define i8 @test_bitreverse_shli_bitreverse_i8(i8 %a) nounwind {
-; SDAG-LABEL: test_bitreverse_shli_bitreverse_i8:
-; SDAG:       // %bb.0:
-; SDAG-NEXT:    ubfx w0, w0, #3, #5
-; SDAG-NEXT:    ret
-;
-; GISEL-LABEL: test_bitreverse_shli_bitreverse_i8:
-; GISEL:       // %bb.0:
-; GISEL-NEXT:    rbit w8, w0
-; GISEL-NEXT:    lsr w8, w8, #24
-; GISEL-NEXT:    lsl w8, w8, #3
-; GISEL-NEXT:    rbit w8, w8
-; GISEL-NEXT:    lsr w0, w8, #24
-; GISEL-NEXT:    ret
+; CHECK-LABEL: test_bitreverse_shli_bitreverse_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ubfx w0, w0, #3, #5
+; CHECK-NEXT:    ret
   %1 = call i8 @llvm.bitreverse.i8(i8 %a)
   %2 = shl i8 %1, 3
   %3 = call i8 @llvm.bitreverse.i8(i8 %2)
@@ -108,19 +67,10 @@ define i8 @test_bitreverse_shli_bitreverse_i8(i8 %a) nounwind {
 }
 
 define i16 @test_bitreverse_shli_bitreverse_i16(i16 %a) nounwind {
-; SDAG-LABEL: test_bitreverse_shli_bitreverse_i16:
-; SDAG:       // %bb.0:
-; SDAG-NEXT:    ubfx w0, w0, #7, #9
-; SDAG-NEXT:    ret
-;
-; GISEL-LABEL: test_bitreverse_shli_bitreverse_i16:
-; GISEL:       // %bb.0:
-; GISEL-NEXT:    rbit w8, w0
-; GISEL-NEXT:    lsr w8, w8, #16
-; GISEL-NEXT:    lsl w8, w8, #7
-; GISEL-NEXT:    rbit w8, w8
-; GISEL-NEXT:    lsr w0, w8, #16
-; GISEL-NEXT:    ret
+; CHECK-LABEL: test_bitreverse_shli_bitreverse_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ubfx w0, w0, #7, #9
+; CHECK-NEXT:    ret
   %1 = call i16 @llvm.bitreverse.i16(i16 %a)
   %2 = shl i16 %1, 7
   %3 = call i16 @llvm.bitreverse.i16(i16 %2)
@@ -128,17 +78,10 @@ define i16 @test_bitreverse_shli_bitreverse_i16(i16 %a) nounwind {
 }
 
 define i32 @test_bitreverse_shli_bitreverse_i32(i32 %a) nounwind {
-; SDAG-LABEL: test_bitreverse_shli_bitreverse_i32:
-; SDAG:       // %bb.0:
-; SDAG-NEXT:    lsr w0, w0, #15
-; SDAG-NEXT:    ret
-;
-; GISEL-LABEL: test_bitreverse_shli_bitreverse_i32:
-; GISEL:       // %bb.0:
-; GISEL-NEXT:    rbit w8, w0
-; GISEL-NEXT:    lsl w8, w8, #15
-; GISEL-NEXT:    rbit w0, w8
-; GISEL-NEXT:    ret
+; CHECK-LABEL: test_bitreverse_shli_bitreverse_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    lsr w0, w0, #15
+; CHECK-NEXT:    ret
   %1 = call i32 @llvm.bitreverse.i32(i32 %a)
   %2 = shl i32 %1, 15
   %3 = call i32 @llvm.bitreverse.i32(i32 %2)
@@ -146,17 +89,10 @@ define i32 @test_bitreverse_shli_bitreverse_i32(i32 %a) nounwind {
 }
 
 define i64 @test_bitreverse_shli_bitreverse_i64(i64 %a) nounwind {
-; SDAG-LABEL: test_bitreverse_shli_bitreverse_i64:
-; SDAG:       // %bb.0:
-; SDAG-NEXT:    lsr x0, x0, #33
-; SDAG-NEXT:    ret
-;
-; GISEL-LABEL: test_bitreverse_shli_bitreverse_i64:
-; GISEL:       // %bb.0:
-; GISEL-NEXT:    rbit x8, x0
-; GISEL-NEXT:    lsl x8, x8, #33
-; GISEL-NEXT:    rbit x0, x8
-; GISEL-NEXT:    ret
+; CHECK-LABEL: test_bitreverse_shli_bitreverse_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    lsr x0, x0, #33
+; CHECK-NEXT:    ret
   %1 = call i64 @llvm.bitreverse.i64(i64 %a)
   %2 = shl i64 %1, 33
   %3 = call i64 @llvm.bitreverse.i64(i64 %2)


        


More information about the llvm-commits mailing list