[llvm] 41dd07b - [AArch64] Add test coverage for bitreverse(logicalshift(bitreverse(x),y)) -> logicalshift(x,y) fold
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue May 7 07:55:48 PDT 2024
Author: Simon Pilgrim
Date: 2024-05-07T15:55:41+01:00
New Revision: 41dd07bf5cbfb800797821d1ad32226e5339bcfb
URL: https://github.com/llvm/llvm-project/commit/41dd07bf5cbfb800797821d1ad32226e5339bcfb
DIFF: https://github.com/llvm/llvm-project/commit/41dd07bf5cbfb800797821d1ad32226e5339bcfb.diff
LOG: [AArch64] Add test coverage for bitreverse(logicalshift(bitreverse(x),y)) -> logicalshift(x,y) fold
DAG already performs this fold (#89897), GISel is currently missing it (patch incoming)
Added:
llvm/test/CodeGen/AArch64/GlobalISel/combine-bitreverse-shift.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-bitreverse-shift.ll b/llvm/test/CodeGen/AArch64/GlobalISel/combine-bitreverse-shift.ll
new file mode 100644
index 0000000000000..3ce94e2c40a9a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-bitreverse-shift.ll
@@ -0,0 +1,164 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-unknown-unknown | FileCheck %s --check-prefixes=SDAG
+; RUN: llc < %s -mtriple=aarch64-unknown-unknown -global-isel | FileCheck %s --check-prefixes=GISEL
+
+; These tests can be optimised
+; fold (bitreverse(srl (bitreverse c), x)) -> (shl c, x)
+; fold (bitreverse(shl (bitreverse c), x)) -> (srl c, x)
+
+declare i8 @llvm.bitreverse.i8(i8)
+declare i16 @llvm.bitreverse.i16(i16)
+declare i32 @llvm.bitreverse.i32(i32)
+declare i64 @llvm.bitreverse.i64(i64)
+
+define i8 @test_bitreverse_srli_bitreverse_i8(i8 %a) nounwind {
+; SDAG-LABEL: test_bitreverse_srli_bitreverse_i8:
+; SDAG: // %bb.0:
+; SDAG-NEXT: lsl w0, w0, #3
+; SDAG-NEXT: ret
+;
+; GISEL-LABEL: test_bitreverse_srli_bitreverse_i8:
+; GISEL: // %bb.0:
+; GISEL-NEXT: rbit w8, w0
+; GISEL-NEXT: lsr w8, w8, #24
+; GISEL-NEXT: lsr w8, w8, #3
+; GISEL-NEXT: rbit w8, w8
+; GISEL-NEXT: lsr w0, w8, #24
+; GISEL-NEXT: ret
+ %1 = call i8 @llvm.bitreverse.i8(i8 %a)
+ %2 = lshr i8 %1, 3
+ %3 = call i8 @llvm.bitreverse.i8(i8 %2)
+ ret i8 %3
+}
+
+define i16 @test_bitreverse_srli_bitreverse_i16(i16 %a) nounwind {
+; SDAG-LABEL: test_bitreverse_srli_bitreverse_i16:
+; SDAG: // %bb.0:
+; SDAG-NEXT: lsl w0, w0, #7
+; SDAG-NEXT: ret
+;
+; GISEL-LABEL: test_bitreverse_srli_bitreverse_i16:
+; GISEL: // %bb.0:
+; GISEL-NEXT: rbit w8, w0
+; GISEL-NEXT: lsr w8, w8, #16
+; GISEL-NEXT: lsr w8, w8, #7
+; GISEL-NEXT: rbit w8, w8
+; GISEL-NEXT: lsr w0, w8, #16
+; GISEL-NEXT: ret
+ %1 = call i16 @llvm.bitreverse.i16(i16 %a)
+ %2 = lshr i16 %1, 7
+ %3 = call i16 @llvm.bitreverse.i16(i16 %2)
+ ret i16 %3
+}
+
+define i32 @test_bitreverse_srli_bitreverse_i32(i32 %a) nounwind {
+; SDAG-LABEL: test_bitreverse_srli_bitreverse_i32:
+; SDAG: // %bb.0:
+; SDAG-NEXT: lsl w0, w0, #15
+; SDAG-NEXT: ret
+;
+; GISEL-LABEL: test_bitreverse_srli_bitreverse_i32:
+; GISEL: // %bb.0:
+; GISEL-NEXT: rbit w8, w0
+; GISEL-NEXT: lsr w8, w8, #15
+; GISEL-NEXT: rbit w0, w8
+; GISEL-NEXT: ret
+ %1 = call i32 @llvm.bitreverse.i32(i32 %a)
+ %2 = lshr i32 %1, 15
+ %3 = call i32 @llvm.bitreverse.i32(i32 %2)
+ ret i32 %3
+}
+
+define i64 @test_bitreverse_srli_bitreverse_i64(i64 %a) nounwind {
+; SDAG-LABEL: test_bitreverse_srli_bitreverse_i64:
+; SDAG: // %bb.0:
+; SDAG-NEXT: lsl x0, x0, #33
+; SDAG-NEXT: ret
+;
+; GISEL-LABEL: test_bitreverse_srli_bitreverse_i64:
+; GISEL: // %bb.0:
+; GISEL-NEXT: rbit x8, x0
+; GISEL-NEXT: lsr x8, x8, #33
+; GISEL-NEXT: rbit x0, x8
+; GISEL-NEXT: ret
+ %1 = call i64 @llvm.bitreverse.i64(i64 %a)
+ %2 = lshr i64 %1, 33
+ %3 = call i64 @llvm.bitreverse.i64(i64 %2)
+ ret i64 %3
+}
+
+define i8 @test_bitreverse_shli_bitreverse_i8(i8 %a) nounwind {
+; SDAG-LABEL: test_bitreverse_shli_bitreverse_i8:
+; SDAG: // %bb.0:
+; SDAG-NEXT: ubfx w0, w0, #3, #5
+; SDAG-NEXT: ret
+;
+; GISEL-LABEL: test_bitreverse_shli_bitreverse_i8:
+; GISEL: // %bb.0:
+; GISEL-NEXT: rbit w8, w0
+; GISEL-NEXT: lsr w8, w8, #24
+; GISEL-NEXT: lsl w8, w8, #3
+; GISEL-NEXT: rbit w8, w8
+; GISEL-NEXT: lsr w0, w8, #24
+; GISEL-NEXT: ret
+ %1 = call i8 @llvm.bitreverse.i8(i8 %a)
+ %2 = shl i8 %1, 3
+ %3 = call i8 @llvm.bitreverse.i8(i8 %2)
+ ret i8 %3
+}
+
+define i16 @test_bitreverse_shli_bitreverse_i16(i16 %a) nounwind {
+; SDAG-LABEL: test_bitreverse_shli_bitreverse_i16:
+; SDAG: // %bb.0:
+; SDAG-NEXT: ubfx w0, w0, #7, #9
+; SDAG-NEXT: ret
+;
+; GISEL-LABEL: test_bitreverse_shli_bitreverse_i16:
+; GISEL: // %bb.0:
+; GISEL-NEXT: rbit w8, w0
+; GISEL-NEXT: lsr w8, w8, #16
+; GISEL-NEXT: lsl w8, w8, #7
+; GISEL-NEXT: rbit w8, w8
+; GISEL-NEXT: lsr w0, w8, #16
+; GISEL-NEXT: ret
+ %1 = call i16 @llvm.bitreverse.i16(i16 %a)
+ %2 = shl i16 %1, 7
+ %3 = call i16 @llvm.bitreverse.i16(i16 %2)
+ ret i16 %3
+}
+
+define i32 @test_bitreverse_shli_bitreverse_i32(i32 %a) nounwind {
+; SDAG-LABEL: test_bitreverse_shli_bitreverse_i32:
+; SDAG: // %bb.0:
+; SDAG-NEXT: lsr w0, w0, #15
+; SDAG-NEXT: ret
+;
+; GISEL-LABEL: test_bitreverse_shli_bitreverse_i32:
+; GISEL: // %bb.0:
+; GISEL-NEXT: rbit w8, w0
+; GISEL-NEXT: lsl w8, w8, #15
+; GISEL-NEXT: rbit w0, w8
+; GISEL-NEXT: ret
+ %1 = call i32 @llvm.bitreverse.i32(i32 %a)
+ %2 = shl i32 %1, 15
+ %3 = call i32 @llvm.bitreverse.i32(i32 %2)
+ ret i32 %3
+}
+
+define i64 @test_bitreverse_shli_bitreverse_i64(i64 %a) nounwind {
+; SDAG-LABEL: test_bitreverse_shli_bitreverse_i64:
+; SDAG: // %bb.0:
+; SDAG-NEXT: lsr x0, x0, #33
+; SDAG-NEXT: ret
+;
+; GISEL-LABEL: test_bitreverse_shli_bitreverse_i64:
+; GISEL: // %bb.0:
+; GISEL-NEXT: rbit x8, x0
+; GISEL-NEXT: lsl x8, x8, #33
+; GISEL-NEXT: rbit x0, x8
+; GISEL-NEXT: ret
+ %1 = call i64 @llvm.bitreverse.i64(i64 %a)
+ %2 = shl i64 %1, 33
+ %3 = call i64 @llvm.bitreverse.i64(i64 %2)
+ ret i64 %3
+}
More information about the llvm-commits
mailing list