[llvm] 780eb9f - [DAGCombine] add tests for bitreverse-shift optimization

Chenbing Zheng via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 29 18:57:50 PDT 2022


Author: Chenbing Zheng
Date: 2022-03-30T09:50:28+08:00
New Revision: 780eb9f5864ff71afa8e2684c2aa2bcdb9bdfad7

URL: https://github.com/llvm/llvm-project/commit/780eb9f5864ff71afa8e2684c2aa2bcdb9bdfad7
DIFF: https://github.com/llvm/llvm-project/commit/780eb9f5864ff71afa8e2684c2aa2bcdb9bdfad7.diff

LOG: [DAGCombine] add tests for bitreverse-shift optimization

This patch add some tests to show some optimization opportunities
for bitreverse-shift.

Reviewed By: RKSimon

Differential Revision: https://reviews.llvm.org/D121507

Added: 
    llvm/test/CodeGen/RISCV/bitreverse-shift.ll

Modified: 
    llvm/test/CodeGen/X86/combine-bitreverse.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/bitreverse-shift.ll b/llvm/test/CodeGen/RISCV/bitreverse-shift.ll
new file mode 100644
index 0000000000000..b2a2a61afc64e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/bitreverse-shift.ll
@@ -0,0 +1,223 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+zbkb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV32ZBKB
+; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV64ZBKB
+
+; TODO: These tests can be optmised
+;       fold (bitreverse(srl (bitreverse c), x)) -> (shl c, x)
+;       fold (bitreverse(shl (bitreverse c), x)) -> (srl c, x)
+
+declare i8 @llvm.bitreverse.i8(i8)
+declare i16 @llvm.bitreverse.i16(i16)
+declare i32 @llvm.bitreverse.i32(i32)
+declare i64 @llvm.bitreverse.i64(i64)
+
+define i8 @test_bitreverse_srli_bitreverse_i8(i8 %a) nounwind {
+; RV32ZBKB-LABEL: test_bitreverse_srli_bitreverse_i8:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    rev8 a0, a0
+; RV32ZBKB-NEXT:    brev8 a0, a0
+; RV32ZBKB-NEXT:    srli a0, a0, 27
+; RV32ZBKB-NEXT:    rev8 a0, a0
+; RV32ZBKB-NEXT:    brev8 a0, a0
+; RV32ZBKB-NEXT:    srli a0, a0, 24
+; RV32ZBKB-NEXT:    ret
+;
+; RV64ZBKB-LABEL: test_bitreverse_srli_bitreverse_i8:
+; RV64ZBKB:       # %bb.0:
+; RV64ZBKB-NEXT:    rev8 a0, a0
+; RV64ZBKB-NEXT:    brev8 a0, a0
+; RV64ZBKB-NEXT:    srli a0, a0, 59
+; RV64ZBKB-NEXT:    rev8 a0, a0
+; RV64ZBKB-NEXT:    brev8 a0, a0
+; RV64ZBKB-NEXT:    srli a0, a0, 56
+; RV64ZBKB-NEXT:    ret
+    %1 = call i8 @llvm.bitreverse.i8(i8 %a)
+    %2 = lshr i8 %1, 3
+    %3 = call i8 @llvm.bitreverse.i8(i8 %2)
+    ret i8 %3
+}
+
+define i16 @test_bitreverse_srli_bitreverse_i16(i16 %a) nounwind {
+; RV32ZBKB-LABEL: test_bitreverse_srli_bitreverse_i16:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    rev8 a0, a0
+; RV32ZBKB-NEXT:    brev8 a0, a0
+; RV32ZBKB-NEXT:    srli a0, a0, 23
+; RV32ZBKB-NEXT:    rev8 a0, a0
+; RV32ZBKB-NEXT:    brev8 a0, a0
+; RV32ZBKB-NEXT:    srli a0, a0, 16
+; RV32ZBKB-NEXT:    ret
+;
+; RV64ZBKB-LABEL: test_bitreverse_srli_bitreverse_i16:
+; RV64ZBKB:       # %bb.0:
+; RV64ZBKB-NEXT:    rev8 a0, a0
+; RV64ZBKB-NEXT:    brev8 a0, a0
+; RV64ZBKB-NEXT:    srli a0, a0, 55
+; RV64ZBKB-NEXT:    rev8 a0, a0
+; RV64ZBKB-NEXT:    brev8 a0, a0
+; RV64ZBKB-NEXT:    srli a0, a0, 48
+; RV64ZBKB-NEXT:    ret
+    %1 = call i16 @llvm.bitreverse.i16(i16 %a)
+    %2 = lshr i16 %1, 7
+    %3 = call i16 @llvm.bitreverse.i16(i16 %2)
+    ret i16 %3
+}
+
+define i32 @test_bitreverse_srli_bitreverse_i32(i32 %a) nounwind {
+; RV32ZBKB-LABEL: test_bitreverse_srli_bitreverse_i32:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    rev8 a0, a0
+; RV32ZBKB-NEXT:    brev8 a0, a0
+; RV32ZBKB-NEXT:    srli a0, a0, 15
+; RV32ZBKB-NEXT:    rev8 a0, a0
+; RV32ZBKB-NEXT:    brev8 a0, a0
+; RV32ZBKB-NEXT:    ret
+;
+; RV64ZBKB-LABEL: test_bitreverse_srli_bitreverse_i32:
+; RV64ZBKB:       # %bb.0:
+; RV64ZBKB-NEXT:    rev8 a0, a0
+; RV64ZBKB-NEXT:    brev8 a0, a0
+; RV64ZBKB-NEXT:    srli a0, a0, 47
+; RV64ZBKB-NEXT:    rev8 a0, a0
+; RV64ZBKB-NEXT:    brev8 a0, a0
+; RV64ZBKB-NEXT:    srli a0, a0, 32
+; RV64ZBKB-NEXT:    ret
+    %1 = call i32 @llvm.bitreverse.i32(i32 %a)
+    %2 = lshr i32 %1, 15
+    %3 = call i32 @llvm.bitreverse.i32(i32 %2)
+    ret i32 %3
+}
+
+define i64 @test_bitreverse_srli_bitreverse_i64(i64 %a) nounwind {
+; RV32ZBKB-LABEL: test_bitreverse_srli_bitreverse_i64:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    rev8 a0, a0
+; RV32ZBKB-NEXT:    brev8 a0, a0
+; RV32ZBKB-NEXT:    srli a0, a0, 1
+; RV32ZBKB-NEXT:    rev8 a0, a0
+; RV32ZBKB-NEXT:    brev8 a1, a0
+; RV32ZBKB-NEXT:    li a0, 0
+; RV32ZBKB-NEXT:    ret
+;
+; RV64ZBKB-LABEL: test_bitreverse_srli_bitreverse_i64:
+; RV64ZBKB:       # %bb.0:
+; RV64ZBKB-NEXT:    rev8 a0, a0
+; RV64ZBKB-NEXT:    brev8 a0, a0
+; RV64ZBKB-NEXT:    srli a0, a0, 33
+; RV64ZBKB-NEXT:    rev8 a0, a0
+; RV64ZBKB-NEXT:    brev8 a0, a0
+; RV64ZBKB-NEXT:    ret
+    %1 = call i64 @llvm.bitreverse.i64(i64 %a)
+    %2 = lshr i64 %1, 33
+    %3 = call i64 @llvm.bitreverse.i64(i64 %2)
+    ret i64 %3
+}
+
+define i8 @test_bitreverse_shli_bitreverse_i8(i8 %a) nounwind {
+; RV32ZBKB-LABEL: test_bitreverse_shli_bitreverse_i8:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    rev8 a0, a0
+; RV32ZBKB-NEXT:    brev8 a0, a0
+; RV32ZBKB-NEXT:    srli a0, a0, 21
+; RV32ZBKB-NEXT:    andi a0, a0, 2040
+; RV32ZBKB-NEXT:    rev8 a0, a0
+; RV32ZBKB-NEXT:    brev8 a0, a0
+; RV32ZBKB-NEXT:    srli a0, a0, 24
+; RV32ZBKB-NEXT:    ret
+;
+; RV64ZBKB-LABEL: test_bitreverse_shli_bitreverse_i8:
+; RV64ZBKB:       # %bb.0:
+; RV64ZBKB-NEXT:    rev8 a0, a0
+; RV64ZBKB-NEXT:    brev8 a0, a0
+; RV64ZBKB-NEXT:    srli a0, a0, 53
+; RV64ZBKB-NEXT:    andi a0, a0, 2040
+; RV64ZBKB-NEXT:    rev8 a0, a0
+; RV64ZBKB-NEXT:    brev8 a0, a0
+; RV64ZBKB-NEXT:    srli a0, a0, 56
+; RV64ZBKB-NEXT:    ret
+    %1 = call i8 @llvm.bitreverse.i8(i8 %a)
+    %2 = shl i8 %1, 3
+    %3 = call i8 @llvm.bitreverse.i8(i8 %2)
+    ret i8 %3
+}
+
+define i16 @test_bitreverse_shli_bitreverse_i16(i16 %a) nounwind {
+; RV32ZBKB-LABEL: test_bitreverse_shli_bitreverse_i16:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    rev8 a0, a0
+; RV32ZBKB-NEXT:    brev8 a0, a0
+; RV32ZBKB-NEXT:    srli a0, a0, 9
+; RV32ZBKB-NEXT:    andi a0, a0, -128
+; RV32ZBKB-NEXT:    rev8 a0, a0
+; RV32ZBKB-NEXT:    brev8 a0, a0
+; RV32ZBKB-NEXT:    srli a0, a0, 16
+; RV32ZBKB-NEXT:    ret
+;
+; RV64ZBKB-LABEL: test_bitreverse_shli_bitreverse_i16:
+; RV64ZBKB:       # %bb.0:
+; RV64ZBKB-NEXT:    rev8 a0, a0
+; RV64ZBKB-NEXT:    brev8 a0, a0
+; RV64ZBKB-NEXT:    srli a0, a0, 41
+; RV64ZBKB-NEXT:    andi a0, a0, -128
+; RV64ZBKB-NEXT:    rev8 a0, a0
+; RV64ZBKB-NEXT:    brev8 a0, a0
+; RV64ZBKB-NEXT:    srli a0, a0, 48
+; RV64ZBKB-NEXT:    ret
+    %1 = call i16 @llvm.bitreverse.i16(i16 %a)
+    %2 = shl i16 %1, 7
+    %3 = call i16 @llvm.bitreverse.i16(i16 %2)
+    ret i16 %3
+}
+
+define i32 @test_bitreverse_shli_bitreverse_i32(i32 %a) nounwind {
+; RV32ZBKB-LABEL: test_bitreverse_shli_bitreverse_i32:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    rev8 a0, a0
+; RV32ZBKB-NEXT:    brev8 a0, a0
+; RV32ZBKB-NEXT:    slli a0, a0, 15
+; RV32ZBKB-NEXT:    rev8 a0, a0
+; RV32ZBKB-NEXT:    brev8 a0, a0
+; RV32ZBKB-NEXT:    ret
+;
+; RV64ZBKB-LABEL: test_bitreverse_shli_bitreverse_i32:
+; RV64ZBKB:       # %bb.0:
+; RV64ZBKB-NEXT:    rev8 a0, a0
+; RV64ZBKB-NEXT:    brev8 a0, a0
+; RV64ZBKB-NEXT:    srli a0, a0, 32
+; RV64ZBKB-NEXT:    slli a0, a0, 15
+; RV64ZBKB-NEXT:    rev8 a0, a0
+; RV64ZBKB-NEXT:    brev8 a0, a0
+; RV64ZBKB-NEXT:    srli a0, a0, 32
+; RV64ZBKB-NEXT:    ret
+    %1 = call i32 @llvm.bitreverse.i32(i32 %a)
+    %2 = shl i32 %1, 15
+    %3 = call i32 @llvm.bitreverse.i32(i32 %2)
+    ret i32 %3
+}
+
+define i64 @test_bitreverse_shli_bitreverse_i64(i64 %a) nounwind {
+; RV32ZBKB-LABEL: test_bitreverse_shli_bitreverse_i64:
+; RV32ZBKB:       # %bb.0:
+; RV32ZBKB-NEXT:    rev8 a0, a1
+; RV32ZBKB-NEXT:    brev8 a0, a0
+; RV32ZBKB-NEXT:    slli a0, a0, 1
+; RV32ZBKB-NEXT:    rev8 a0, a0
+; RV32ZBKB-NEXT:    brev8 a0, a0
+; RV32ZBKB-NEXT:    li a1, 0
+; RV32ZBKB-NEXT:    ret
+;
+; RV64ZBKB-LABEL: test_bitreverse_shli_bitreverse_i64:
+; RV64ZBKB:       # %bb.0:
+; RV64ZBKB-NEXT:    rev8 a0, a0
+; RV64ZBKB-NEXT:    brev8 a0, a0
+; RV64ZBKB-NEXT:    slli a0, a0, 33
+; RV64ZBKB-NEXT:    rev8 a0, a0
+; RV64ZBKB-NEXT:    brev8 a0, a0
+; RV64ZBKB-NEXT:    ret
+    %1 = call i64 @llvm.bitreverse.i64(i64 %a)
+    %2 = shl i64 %1, 33
+    %3 = call i64 @llvm.bitreverse.i64(i64 %2)
+    ret i64 %3
+}

diff  --git a/llvm/test/CodeGen/X86/combine-bitreverse.ll b/llvm/test/CodeGen/X86/combine-bitreverse.ll
index 3c359a5efe795..987fa7732e424 100644
--- a/llvm/test/CodeGen/X86/combine-bitreverse.ll
+++ b/llvm/test/CodeGen/X86/combine-bitreverse.ll
@@ -6,6 +6,7 @@
 ; actual output is massive at the moment as llvm.bitreverse is not yet legal.
 
 declare i32 @llvm.bitreverse.i32(i32) readnone
+declare i64 @llvm.bitreverse.i64(i64) readnone
 declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>) readnone
 
 ; fold (bitreverse undef) -> undef
@@ -37,6 +38,361 @@ define i32 @test_bitreverse_bitreverse(i32 %a0) nounwind {
   ret i32 %c
 }
 
+; TODO: fold (bitreverse(srl (bitreverse c), x)) -> (shl c, x)
+define i32 @test_bitreverse_srli_bitreverse(i32 %a0) nounwind {
+; X86-LABEL: test_bitreverse_srli_bitreverse:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    bswapl %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $252645135, %ecx # imm = 0xF0F0F0F
+; X86-NEXT:    shll $4, %ecx
+; X86-NEXT:    shrl $4, %eax
+; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
+; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
+; X86-NEXT:    shrl $2, %eax
+; X86-NEXT:    andl $858993459, %eax # imm = 0x33333333
+; X86-NEXT:    leal (%eax,%ecx,4), %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $1431655744, %ecx # imm = 0x55555540
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    andl $1431655680, %eax # imm = 0x55555500
+; X86-NEXT:    leal (%eax,%ecx,2), %eax
+; X86-NEXT:    shrl $7, %eax
+; X86-NEXT:    bswapl %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $252645121, %ecx # imm = 0xF0F0F01
+; X86-NEXT:    shll $4, %ecx
+; X86-NEXT:    shrl $4, %eax
+; X86-NEXT:    andl $252645120, %eax # imm = 0xF0F0F00
+; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $858993424, %ecx # imm = 0x33333310
+; X86-NEXT:    shrl $2, %eax
+; X86-NEXT:    andl $858993408, %eax # imm = 0x33333300
+; X86-NEXT:    leal (%eax,%ecx,4), %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $1431655765, %ecx # imm = 0x55555555
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    andl $1431655765, %eax # imm = 0x55555555
+; X86-NEXT:    leal (%eax,%ecx,2), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: test_bitreverse_srli_bitreverse:
+; X64:       # %bb.0:
+; X64-NEXT:    # kill: def $edi killed $edi def $rdi
+; X64-NEXT:    bswapl %edi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
+; X64-NEXT:    shll $4, %eax
+; X64-NEXT:    shrl $4, %edi
+; X64-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
+; X64-NEXT:    orl %eax, %edi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    andl $858993459, %eax # imm = 0x33333333
+; X64-NEXT:    shrl $2, %edi
+; X64-NEXT:    andl $858993459, %edi # imm = 0x33333333
+; X64-NEXT:    leal (%rdi,%rax,4), %eax
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    andl $1431655744, %ecx # imm = 0x55555540
+; X64-NEXT:    shrl %eax
+; X64-NEXT:    andl $1431655680, %eax # imm = 0x55555500
+; X64-NEXT:    leal (%rax,%rcx,2), %eax
+; X64-NEXT:    shrl $7, %eax
+; X64-NEXT:    bswapl %eax
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    andl $252645121, %ecx # imm = 0xF0F0F01
+; X64-NEXT:    shll $4, %ecx
+; X64-NEXT:    shrl $4, %eax
+; X64-NEXT:    andl $252645120, %eax # imm = 0xF0F0F00
+; X64-NEXT:    orl %ecx, %eax
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    andl $858993424, %ecx # imm = 0x33333310
+; X64-NEXT:    shrl $2, %eax
+; X64-NEXT:    andl $858993408, %eax # imm = 0x33333300
+; X64-NEXT:    leal (%rax,%rcx,4), %eax
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    andl $1431655765, %ecx # imm = 0x55555555
+; X64-NEXT:    shrl %eax
+; X64-NEXT:    andl $1431655765, %eax # imm = 0x55555555
+; X64-NEXT:    leal (%rax,%rcx,2), %eax
+; X64-NEXT:    retq
+  %b = call i32 @llvm.bitreverse.i32(i32 %a0)
+  %c = lshr i32 %b, 7
+  %d = call i32 @llvm.bitreverse.i32(i32 %c)
+  ret i32 %d
+}
+
+define i64 @test_bitreverse_srli_bitreverse_i64(i64 %a) nounwind {
+; X86-LABEL: test_bitreverse_srli_bitreverse_i64:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    bswapl %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $252645135, %ecx # imm = 0xF0F0F0F
+; X86-NEXT:    shll $4, %ecx
+; X86-NEXT:    shrl $4, %eax
+; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
+; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
+; X86-NEXT:    shrl $2, %eax
+; X86-NEXT:    andl $858993459, %eax # imm = 0x33333333
+; X86-NEXT:    leal (%eax,%ecx,4), %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $1431655765, %ecx # imm = 0x55555555
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    andl $1431655764, %eax # imm = 0x55555554
+; X86-NEXT:    leal (%eax,%ecx,2), %eax
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    bswapl %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $252645135, %ecx # imm = 0xF0F0F0F
+; X86-NEXT:    shll $4, %ecx
+; X86-NEXT:    shrl $4, %eax
+; X86-NEXT:    andl $252645127, %eax # imm = 0xF0F0F07
+; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
+; X86-NEXT:    shrl $2, %eax
+; X86-NEXT:    andl $858993457, %eax # imm = 0x33333331
+; X86-NEXT:    leal (%eax,%ecx,4), %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $1431655765, %ecx # imm = 0x55555555
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    andl $1431655765, %eax # imm = 0x55555555
+; X86-NEXT:    leal (%eax,%ecx,2), %edx
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: test_bitreverse_srli_bitreverse_i64:
+; X64:       # %bb.0:
+; X64-NEXT:    bswapq %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    shrq $4, %rax
+; X64-NEXT:    movabsq $1085102592571150095, %rcx # imm = 0xF0F0F0F0F0F0F0F
+; X64-NEXT:    andq %rcx, %rax
+; X64-NEXT:    andq %rcx, %rdi
+; X64-NEXT:    shlq $4, %rdi
+; X64-NEXT:    orq %rax, %rdi
+; X64-NEXT:    movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
+; X64-NEXT:    movq %rdi, %rcx
+; X64-NEXT:    andq %rax, %rcx
+; X64-NEXT:    shrq $2, %rdi
+; X64-NEXT:    andq %rax, %rdi
+; X64-NEXT:    leaq (%rdi,%rcx,4), %rax
+; X64-NEXT:    movabsq $6148914689804861440, %rcx # imm = 0x5555555500000000
+; X64-NEXT:    andq %rax, %rcx
+; X64-NEXT:    shrq %rax
+; X64-NEXT:    movabsq $6148914685509894144, %rdx # imm = 0x5555555400000000
+; X64-NEXT:    andq %rax, %rdx
+; X64-NEXT:    leaq (%rdx,%rcx,2), %rax
+; X64-NEXT:    shrq $33, %rax
+; X64-NEXT:    bswapq %rax
+; X64-NEXT:    movabsq $1085102592318504960, %rcx # imm = 0xF0F0F0F00000000
+; X64-NEXT:    andq %rax, %rcx
+; X64-NEXT:    shrq $4, %rax
+; X64-NEXT:    movabsq $1085102557958766592, %rdx # imm = 0xF0F0F0700000000
+; X64-NEXT:    andq %rax, %rdx
+; X64-NEXT:    shlq $4, %rcx
+; X64-NEXT:    orq %rdx, %rcx
+; X64-NEXT:    movabsq $3689348813882916864, %rax # imm = 0x3333333300000000
+; X64-NEXT:    andq %rcx, %rax
+; X64-NEXT:    shrq $2, %rcx
+; X64-NEXT:    movabsq $3689348805292982272, %rdx # imm = 0x3333333100000000
+; X64-NEXT:    andq %rcx, %rdx
+; X64-NEXT:    leaq (%rdx,%rax,4), %rax
+; X64-NEXT:    movabsq $6148914691236517205, %rcx # imm = 0x5555555555555555
+; X64-NEXT:    movq %rax, %rdx
+; X64-NEXT:    andq %rcx, %rdx
+; X64-NEXT:    shrq %rax
+; X64-NEXT:    andq %rcx, %rax
+; X64-NEXT:    leaq (%rax,%rdx,2), %rax
+; X64-NEXT:    retq
+    %1 = call i64 @llvm.bitreverse.i64(i64 %a)
+    %2 = lshr i64 %1, 33
+    %3 = call i64 @llvm.bitreverse.i64(i64 %2)
+    ret i64 %3
+}
+
+; TODO: fold (bitreverse(shl (bitreverse c), x)) -> (srl c, x)
+define i32 @test_bitreverse_shli_bitreverse(i32 %a0) nounwind {
+; X86-LABEL: test_bitreverse_shli_bitreverse:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    bswapl %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $252645135, %ecx # imm = 0xF0F0F0F
+; X86-NEXT:    shll $4, %ecx
+; X86-NEXT:    shrl $4, %eax
+; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
+; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
+; X86-NEXT:    shrl $2, %eax
+; X86-NEXT:    andl $858993459, %eax # imm = 0x33333333
+; X86-NEXT:    leal (%eax,%ecx,4), %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $5592405, %ecx # imm = 0x555555
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    andl $22369621, %eax # imm = 0x1555555
+; X86-NEXT:    leal (%eax,%ecx,2), %eax
+; X86-NEXT:    shll $7, %eax
+; X86-NEXT:    bswapl %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $986895, %ecx # imm = 0xF0F0F
+; X86-NEXT:    shll $4, %ecx
+; X86-NEXT:    shrl $4, %eax
+; X86-NEXT:    andl $135204623, %eax # imm = 0x80F0F0F
+; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $3355443, %ecx # imm = 0x333333
+; X86-NEXT:    shrl $2, %eax
+; X86-NEXT:    andl $36909875, %eax # imm = 0x2333333
+; X86-NEXT:    leal (%eax,%ecx,4), %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $1431655765, %ecx # imm = 0x55555555
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    andl $1431655765, %eax # imm = 0x55555555
+; X86-NEXT:    leal (%eax,%ecx,2), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: test_bitreverse_shli_bitreverse:
+; X64:       # %bb.0:
+; X64-NEXT:    # kill: def $edi killed $edi def $rdi
+; X64-NEXT:    bswapl %edi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
+; X64-NEXT:    shll $4, %eax
+; X64-NEXT:    shrl $4, %edi
+; X64-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
+; X64-NEXT:    orl %eax, %edi
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    andl $858993459, %eax # imm = 0x33333333
+; X64-NEXT:    shrl $2, %edi
+; X64-NEXT:    andl $858993459, %edi # imm = 0x33333333
+; X64-NEXT:    leal (%rdi,%rax,4), %eax
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    andl $5592405, %ecx # imm = 0x555555
+; X64-NEXT:    shrl %eax
+; X64-NEXT:    andl $22369621, %eax # imm = 0x1555555
+; X64-NEXT:    leal (%rax,%rcx,2), %eax
+; X64-NEXT:    shll $7, %eax
+; X64-NEXT:    bswapl %eax
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    andl $986895, %ecx # imm = 0xF0F0F
+; X64-NEXT:    shll $4, %ecx
+; X64-NEXT:    shrl $4, %eax
+; X64-NEXT:    andl $135204623, %eax # imm = 0x80F0F0F
+; X64-NEXT:    orl %ecx, %eax
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    andl $3355443, %ecx # imm = 0x333333
+; X64-NEXT:    shrl $2, %eax
+; X64-NEXT:    andl $36909875, %eax # imm = 0x2333333
+; X64-NEXT:    leal (%rax,%rcx,4), %eax
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    andl $1431655765, %ecx # imm = 0x55555555
+; X64-NEXT:    shrl %eax
+; X64-NEXT:    andl $1431655765, %eax # imm = 0x55555555
+; X64-NEXT:    leal (%rax,%rcx,2), %eax
+; X64-NEXT:    retq
+  %b = call i32 @llvm.bitreverse.i32(i32 %a0)
+  %c = shl i32 %b, 7
+  %d = call i32 @llvm.bitreverse.i32(i32 %c)
+  ret i32 %d
+}
+
+define i64 @test_bitreverse_shli_bitreverse_i64(i64 %a) nounwind {
+; X86-LABEL: test_bitreverse_shli_bitreverse_i64:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    bswapl %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $252645135, %ecx # imm = 0xF0F0F0F
+; X86-NEXT:    shll $4, %ecx
+; X86-NEXT:    shrl $4, %eax
+; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
+; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $858993459, %ecx # imm = 0x33333333
+; X86-NEXT:    shrl $2, %eax
+; X86-NEXT:    andl $858993459, %eax # imm = 0x33333333
+; X86-NEXT:    leal (%eax,%ecx,4), %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $357913941, %ecx # imm = 0x15555555
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    andl $1431655765, %eax # imm = 0x55555555
+; X86-NEXT:    leal (%eax,%ecx,2), %eax
+; X86-NEXT:    addl %eax, %eax
+; X86-NEXT:    bswapl %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $235867919, %ecx # imm = 0xE0F0F0F
+; X86-NEXT:    shll $4, %ecx
+; X86-NEXT:    shrl $4, %eax
+; X86-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
+; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $590558003, %ecx # imm = 0x23333333
+; X86-NEXT:    shrl $2, %eax
+; X86-NEXT:    andl $858993459, %eax # imm = 0x33333333
+; X86-NEXT:    leal (%eax,%ecx,4), %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    andl $1431655765, %ecx # imm = 0x55555555
+; X86-NEXT:    shrl %eax
+; X86-NEXT:    andl $1431655765, %eax # imm = 0x55555555
+; X86-NEXT:    leal (%eax,%ecx,2), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    retl
+;
+; X64-LABEL: test_bitreverse_shli_bitreverse_i64:
+; X64:       # %bb.0:
+; X64-NEXT:    bswapq %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    shrq $4, %rax
+; X64-NEXT:    movabsq $1085102592571150095, %rcx # imm = 0xF0F0F0F0F0F0F0F
+; X64-NEXT:    andq %rcx, %rax
+; X64-NEXT:    andq %rcx, %rdi
+; X64-NEXT:    shlq $4, %rdi
+; X64-NEXT:    orq %rax, %rdi
+; X64-NEXT:    movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
+; X64-NEXT:    movq %rdi, %rcx
+; X64-NEXT:    andq %rax, %rcx
+; X64-NEXT:    shrq $2, %rdi
+; X64-NEXT:    andq %rax, %rdi
+; X64-NEXT:    leaq (%rdi,%rcx,4), %rax
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    andl $357913941, %ecx # imm = 0x15555555
+; X64-NEXT:    shrl %eax
+; X64-NEXT:    andl $1431655765, %eax # imm = 0x55555555
+; X64-NEXT:    leal (%rax,%rcx,2), %eax
+; X64-NEXT:    shlq $33, %rax
+; X64-NEXT:    bswapq %rax
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    andl $235867919, %ecx # imm = 0xE0F0F0F
+; X64-NEXT:    shlq $4, %rcx
+; X64-NEXT:    shrq $4, %rax
+; X64-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
+; X64-NEXT:    orq %rcx, %rax
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    andl $590558003, %ecx # imm = 0x23333333
+; X64-NEXT:    shrq $2, %rax
+; X64-NEXT:    andl $858993459, %eax # imm = 0x33333333
+; X64-NEXT:    leaq (%rax,%rcx,4), %rax
+; X64-NEXT:    movabsq $6148914691236517205, %rcx # imm = 0x5555555555555555
+; X64-NEXT:    movq %rax, %rdx
+; X64-NEXT:    andq %rcx, %rdx
+; X64-NEXT:    shrq %rax
+; X64-NEXT:    andq %rcx, %rax
+; X64-NEXT:    leaq (%rax,%rdx,2), %rax
+; X64-NEXT:    retq
+    %1 = call i64 @llvm.bitreverse.i64(i64 %a)
+    %2 = shl i64 %1, 33
+    %3 = call i64 @llvm.bitreverse.i64(i64 %2)
+    ret i64 %3
+}
+
 define <4 x i32> @test_demandedbits_bitreverse(<4 x i32> %a0) nounwind {
 ; X86-LABEL: test_demandedbits_bitreverse:
 ; X86:       # %bb.0:


        


More information about the llvm-commits mailing list