[llvm] 88f33cf - [RISCV] Add bitreverse tests to bswap-ctlz-cttz-ctpop.ll. Add Zbb command lines. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Jan 23 13:46:24 PST 2022


Author: Craig Topper
Date: 2022-01-23T13:41:58-08:00
New Revision: 88f33cff4bee87ea31e129f734df232274098a78

URL: https://github.com/llvm/llvm-project/commit/88f33cff4bee87ea31e129f734df232274098a78
DIFF: https://github.com/llvm/llvm-project/commit/88f33cff4bee87ea31e129f734df232274098a78.diff

LOG: [RISCV] Add bitreverse tests to bswap-ctlz-cttz-ctpop.ll. Add Zbb command lines. NFC

Rename to include bitreverse. Add additional tests and Zbb command lines.

There's some overlapping tests with rv32zbb.ll and rv64zbb.ll. Maybe
I'll clean that up in a future patch.

Added: 
    llvm/test/CodeGen/RISCV/bswap-bitreverse-ctlz-cttz-ctpop.ll

Modified: 
    

Removed: 
    llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/bswap-bitreverse-ctlz-cttz-ctpop.ll
similarity index 58%
rename from llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
rename to llvm/test/CodeGen/RISCV/bswap-bitreverse-ctlz-cttz-ctpop.ll
index 4be19eefa948f..8a0c0db7aaa38 100644
--- a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/bswap-bitreverse-ctlz-cttz-ctpop.ll
@@ -3,10 +3,18 @@
 ; RUN:   | FileCheck %s -check-prefix=RV32I
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64I
+; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32ZBB
+; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64ZBB
 
 declare i16 @llvm.bswap.i16(i16)
 declare i32 @llvm.bswap.i32(i32)
 declare i64 @llvm.bswap.i64(i64)
+declare i8 @llvm.bitreverse.i8(i8)
+declare i16 @llvm.bitreverse.i16(i16)
+declare i32 @llvm.bitreverse.i32(i32)
+declare i64 @llvm.bitreverse.i64(i64)
 declare i8 @llvm.cttz.i8(i8, i1)
 declare i16 @llvm.cttz.i16(i16, i1)
 declare i32 @llvm.cttz.i32(i32, i1)
@@ -31,6 +39,18 @@ define i16 @test_bswap_i16(i16 %a) nounwind {
 ; RV64I-NEXT:    srli a0, a0, 56
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_bswap_i16:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    rev8 a0, a0
+; RV32ZBB-NEXT:    srli a0, a0, 16
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_bswap_i16:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    rev8 a0, a0
+; RV64ZBB-NEXT:    srli a0, a0, 48
+; RV64ZBB-NEXT:    ret
   %tmp = call i16 @llvm.bswap.i16(i16 %a)
   ret i16 %tmp
 }
@@ -67,6 +87,17 @@ define i32 @test_bswap_i32(i32 %a) nounwind {
 ; RV64I-NEXT:    or a0, a0, a2
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_bswap_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    rev8 a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_bswap_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    rev8 a0, a0
+; RV64ZBB-NEXT:    srli a0, a0, 32
+; RV64ZBB-NEXT:    ret
   %tmp = call i32 @llvm.bswap.i32(i32 %a)
   ret i32 %tmp
 }
@@ -129,15 +160,542 @@ define i64 @test_bswap_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    or a0, a0, a2
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_bswap_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    rev8 a2, a1
+; RV32ZBB-NEXT:    rev8 a1, a0
+; RV32ZBB-NEXT:    mv a0, a2
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_bswap_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    rev8 a0, a0
+; RV64ZBB-NEXT:    ret
   %tmp = call i64 @llvm.bswap.i64(i64 %a)
   ret i64 %tmp
 }
 
+define i8 @test_bitreverse_i8(i8 %a) nounwind {
+; RV32I-LABEL: test_bitreverse_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a1, a0, 15
+; RV32I-NEXT:    slli a1, a1, 4
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srli a0, a0, 28
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    andi a1, a0, 51
+; RV32I-NEXT:    slli a1, a1, 2
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    andi a0, a0, 51
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    andi a1, a0, 85
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    andi a0, a0, 85
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_bitreverse_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a1, a0, 15
+; RV64I-NEXT:    slli a1, a1, 4
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srli a0, a0, 60
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    andi a1, a0, 51
+; RV64I-NEXT:    slli a1, a1, 2
+; RV64I-NEXT:    srli a0, a0, 2
+; RV64I-NEXT:    andi a0, a0, 51
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    andi a1, a0, 85
+; RV64I-NEXT:    slli a1, a1, 1
+; RV64I-NEXT:    srli a0, a0, 1
+; RV64I-NEXT:    andi a0, a0, 85
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_bitreverse_i8:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    andi a1, a0, 15
+; RV32ZBB-NEXT:    slli a1, a1, 4
+; RV32ZBB-NEXT:    slli a0, a0, 24
+; RV32ZBB-NEXT:    srli a0, a0, 28
+; RV32ZBB-NEXT:    or a0, a0, a1
+; RV32ZBB-NEXT:    andi a1, a0, 51
+; RV32ZBB-NEXT:    slli a1, a1, 2
+; RV32ZBB-NEXT:    srli a0, a0, 2
+; RV32ZBB-NEXT:    andi a0, a0, 51
+; RV32ZBB-NEXT:    or a0, a0, a1
+; RV32ZBB-NEXT:    andi a1, a0, 85
+; RV32ZBB-NEXT:    slli a1, a1, 1
+; RV32ZBB-NEXT:    srli a0, a0, 1
+; RV32ZBB-NEXT:    andi a0, a0, 85
+; RV32ZBB-NEXT:    or a0, a0, a1
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_bitreverse_i8:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    andi a1, a0, 15
+; RV64ZBB-NEXT:    slli a1, a1, 4
+; RV64ZBB-NEXT:    slli a0, a0, 56
+; RV64ZBB-NEXT:    srli a0, a0, 60
+; RV64ZBB-NEXT:    or a0, a0, a1
+; RV64ZBB-NEXT:    andi a1, a0, 51
+; RV64ZBB-NEXT:    slli a1, a1, 2
+; RV64ZBB-NEXT:    srli a0, a0, 2
+; RV64ZBB-NEXT:    andi a0, a0, 51
+; RV64ZBB-NEXT:    or a0, a0, a1
+; RV64ZBB-NEXT:    andi a1, a0, 85
+; RV64ZBB-NEXT:    slli a1, a1, 1
+; RV64ZBB-NEXT:    srli a0, a0, 1
+; RV64ZBB-NEXT:    andi a0, a0, 85
+; RV64ZBB-NEXT:    or a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %tmp = call i8 @llvm.bitreverse.i8(i8 %a)
+  ret i8 %tmp
+}
+
+define i16 @test_bitreverse_i16(i16 %a) nounwind {
+; RV32I-LABEL: test_bitreverse_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 8
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    lui a2, 1
+; RV32I-NEXT:    addi a2, a2, -241
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 4
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 2
+; RV32I-NEXT:    lui a2, 3
+; RV32I-NEXT:    addi a2, a2, 819
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 2
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    lui a2, 5
+; RV32I-NEXT:    addi a2, a2, 1365
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_bitreverse_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 8
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 56
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    lui a2, 1
+; RV64I-NEXT:    addiw a2, a2, -241
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 4
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 2
+; RV64I-NEXT:    lui a2, 3
+; RV64I-NEXT:    addiw a2, a2, 819
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 2
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    lui a2, 5
+; RV64I-NEXT:    addiw a2, a2, 1365
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_bitreverse_i16:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    rev8 a0, a0
+; RV32ZBB-NEXT:    srli a1, a0, 12
+; RV32ZBB-NEXT:    lui a2, 15
+; RV32ZBB-NEXT:    addi a2, a2, 240
+; RV32ZBB-NEXT:    and a1, a1, a2
+; RV32ZBB-NEXT:    srli a0, a0, 20
+; RV32ZBB-NEXT:    andi a0, a0, -241
+; RV32ZBB-NEXT:    or a0, a0, a1
+; RV32ZBB-NEXT:    srli a1, a0, 2
+; RV32ZBB-NEXT:    lui a2, 3
+; RV32ZBB-NEXT:    addi a2, a2, 819
+; RV32ZBB-NEXT:    and a1, a1, a2
+; RV32ZBB-NEXT:    and a0, a0, a2
+; RV32ZBB-NEXT:    slli a0, a0, 2
+; RV32ZBB-NEXT:    or a0, a1, a0
+; RV32ZBB-NEXT:    srli a1, a0, 1
+; RV32ZBB-NEXT:    lui a2, 5
+; RV32ZBB-NEXT:    addi a2, a2, 1365
+; RV32ZBB-NEXT:    and a1, a1, a2
+; RV32ZBB-NEXT:    and a0, a0, a2
+; RV32ZBB-NEXT:    slli a0, a0, 1
+; RV32ZBB-NEXT:    or a0, a1, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_bitreverse_i16:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    rev8 a0, a0
+; RV64ZBB-NEXT:    srli a1, a0, 44
+; RV64ZBB-NEXT:    lui a2, 15
+; RV64ZBB-NEXT:    addiw a2, a2, 240
+; RV64ZBB-NEXT:    and a1, a1, a2
+; RV64ZBB-NEXT:    srli a0, a0, 52
+; RV64ZBB-NEXT:    andi a0, a0, -241
+; RV64ZBB-NEXT:    or a0, a0, a1
+; RV64ZBB-NEXT:    srli a1, a0, 2
+; RV64ZBB-NEXT:    lui a2, 3
+; RV64ZBB-NEXT:    addiw a2, a2, 819
+; RV64ZBB-NEXT:    and a1, a1, a2
+; RV64ZBB-NEXT:    and a0, a0, a2
+; RV64ZBB-NEXT:    slli a0, a0, 2
+; RV64ZBB-NEXT:    or a0, a1, a0
+; RV64ZBB-NEXT:    srli a1, a0, 1
+; RV64ZBB-NEXT:    lui a2, 5
+; RV64ZBB-NEXT:    addiw a2, a2, 1365
+; RV64ZBB-NEXT:    and a1, a1, a2
+; RV64ZBB-NEXT:    and a0, a0, a2
+; RV64ZBB-NEXT:    slli a0, a0, 1
+; RV64ZBB-NEXT:    or a0, a1, a0
+; RV64ZBB-NEXT:    ret
+  %tmp = call i16 @llvm.bitreverse.i16(i16 %a)
+  ret i16 %tmp
+}
+
+define i32 @test_bitreverse_i32(i32 %a) nounwind {
+; RV32I-LABEL: test_bitreverse_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a1, a0, 8
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    addi a2, a2, -256
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 24
+; RV32I-NEXT:    or a1, a1, a2
+; RV32I-NEXT:    slli a2, a0, 8
+; RV32I-NEXT:    lui a3, 4080
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    lui a2, 61681
+; RV32I-NEXT:    addi a2, a2, -241
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 4
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 2
+; RV32I-NEXT:    lui a2, 209715
+; RV32I-NEXT:    addi a2, a2, 819
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 2
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    lui a2, 349525
+; RV32I-NEXT:    addi a2, a2, 1365
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_bitreverse_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srliw a1, a0, 8
+; RV64I-NEXT:    lui a2, 16
+; RV64I-NEXT:    addiw a2, a2, -256
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srliw a2, a0, 24
+; RV64I-NEXT:    or a1, a1, a2
+; RV64I-NEXT:    slli a2, a0, 8
+; RV64I-NEXT:    lui a3, 4080
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    slliw a0, a0, 24
+; RV64I-NEXT:    or a0, a0, a2
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    lui a2, 61681
+; RV64I-NEXT:    addiw a2, a2, -241
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slliw a0, a0, 4
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 2
+; RV64I-NEXT:    lui a2, 209715
+; RV64I-NEXT:    addiw a2, a2, 819
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slliw a0, a0, 2
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    lui a2, 349525
+; RV64I-NEXT:    addiw a2, a2, 1365
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slliw a0, a0, 1
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_bitreverse_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    rev8 a0, a0
+; RV32ZBB-NEXT:    srli a1, a0, 4
+; RV32ZBB-NEXT:    lui a2, 61681
+; RV32ZBB-NEXT:    addi a2, a2, -241
+; RV32ZBB-NEXT:    and a1, a1, a2
+; RV32ZBB-NEXT:    and a0, a0, a2
+; RV32ZBB-NEXT:    slli a0, a0, 4
+; RV32ZBB-NEXT:    or a0, a1, a0
+; RV32ZBB-NEXT:    srli a1, a0, 2
+; RV32ZBB-NEXT:    lui a2, 209715
+; RV32ZBB-NEXT:    addi a2, a2, 819
+; RV32ZBB-NEXT:    and a1, a1, a2
+; RV32ZBB-NEXT:    and a0, a0, a2
+; RV32ZBB-NEXT:    slli a0, a0, 2
+; RV32ZBB-NEXT:    or a0, a1, a0
+; RV32ZBB-NEXT:    srli a1, a0, 1
+; RV32ZBB-NEXT:    lui a2, 349525
+; RV32ZBB-NEXT:    addi a2, a2, 1365
+; RV32ZBB-NEXT:    and a1, a1, a2
+; RV32ZBB-NEXT:    and a0, a0, a2
+; RV32ZBB-NEXT:    slli a0, a0, 1
+; RV32ZBB-NEXT:    or a0, a1, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_bitreverse_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    rev8 a0, a0
+; RV64ZBB-NEXT:    srli a1, a0, 36
+; RV64ZBB-NEXT:    lui a2, 61681
+; RV64ZBB-NEXT:    addiw a2, a2, -241
+; RV64ZBB-NEXT:    and a1, a1, a2
+; RV64ZBB-NEXT:    srli a0, a0, 28
+; RV64ZBB-NEXT:    lui a2, 986895
+; RV64ZBB-NEXT:    addiw a2, a2, 240
+; RV64ZBB-NEXT:    and a0, a0, a2
+; RV64ZBB-NEXT:    sext.w a0, a0
+; RV64ZBB-NEXT:    or a0, a1, a0
+; RV64ZBB-NEXT:    srli a1, a0, 2
+; RV64ZBB-NEXT:    lui a2, 209715
+; RV64ZBB-NEXT:    addiw a2, a2, 819
+; RV64ZBB-NEXT:    and a1, a1, a2
+; RV64ZBB-NEXT:    and a0, a0, a2
+; RV64ZBB-NEXT:    slliw a0, a0, 2
+; RV64ZBB-NEXT:    or a0, a1, a0
+; RV64ZBB-NEXT:    srli a1, a0, 1
+; RV64ZBB-NEXT:    lui a2, 349525
+; RV64ZBB-NEXT:    addiw a2, a2, 1365
+; RV64ZBB-NEXT:    and a1, a1, a2
+; RV64ZBB-NEXT:    and a0, a0, a2
+; RV64ZBB-NEXT:    slliw a0, a0, 1
+; RV64ZBB-NEXT:    or a0, a1, a0
+; RV64ZBB-NEXT:    ret
+  %tmp = call i32 @llvm.bitreverse.i32(i32 %a)
+  ret i32 %tmp
+}
+
+define i64 @test_bitreverse_i64(i64 %a) nounwind {
+; RV32I-LABEL: test_bitreverse_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a2, a1, 8
+; RV32I-NEXT:    lui a3, 16
+; RV32I-NEXT:    addi a3, a3, -256
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    srli a4, a1, 24
+; RV32I-NEXT:    or a2, a2, a4
+; RV32I-NEXT:    slli a4, a1, 8
+; RV32I-NEXT:    lui a5, 4080
+; RV32I-NEXT:    and a4, a4, a5
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    or a1, a1, a4
+; RV32I-NEXT:    or a1, a1, a2
+; RV32I-NEXT:    srli a2, a1, 4
+; RV32I-NEXT:    lui a4, 61681
+; RV32I-NEXT:    addi a4, a4, -241
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    and a1, a1, a4
+; RV32I-NEXT:    slli a1, a1, 4
+; RV32I-NEXT:    or a1, a2, a1
+; RV32I-NEXT:    srli a2, a1, 2
+; RV32I-NEXT:    lui a6, 209715
+; RV32I-NEXT:    addi a6, a6, 819
+; RV32I-NEXT:    and a2, a2, a6
+; RV32I-NEXT:    and a1, a1, a6
+; RV32I-NEXT:    slli a1, a1, 2
+; RV32I-NEXT:    or a1, a2, a1
+; RV32I-NEXT:    srli a2, a1, 1
+; RV32I-NEXT:    lui a7, 349525
+; RV32I-NEXT:    addi a7, a7, 1365
+; RV32I-NEXT:    and a2, a2, a7
+; RV32I-NEXT:    and a1, a1, a7
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    or a2, a2, a1
+; RV32I-NEXT:    srli a1, a0, 8
+; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    srli a3, a0, 24
+; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    slli a3, a0, 8
+; RV32I-NEXT:    and a3, a3, a5
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    or a0, a0, a3
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    and a1, a1, a4
+; RV32I-NEXT:    and a0, a0, a4
+; RV32I-NEXT:    slli a0, a0, 4
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 2
+; RV32I-NEXT:    and a1, a1, a6
+; RV32I-NEXT:    and a0, a0, a6
+; RV32I-NEXT:    slli a0, a0, 2
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    and a1, a1, a7
+; RV32I-NEXT:    and a0, a0, a7
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    or a1, a1, a0
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_bitreverse_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a1, a0, 24
+; RV64I-NEXT:    lui a2, 4080
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 8
+; RV64I-NEXT:    li a3, 255
+; RV64I-NEXT:    slli a4, a3, 24
+; RV64I-NEXT:    and a2, a2, a4
+; RV64I-NEXT:    or a1, a2, a1
+; RV64I-NEXT:    srli a2, a0, 40
+; RV64I-NEXT:    lui a4, 16
+; RV64I-NEXT:    addiw a4, a4, -256
+; RV64I-NEXT:    and a2, a2, a4
+; RV64I-NEXT:    srli a4, a0, 56
+; RV64I-NEXT:    or a2, a2, a4
+; RV64I-NEXT:    or a1, a1, a2
+; RV64I-NEXT:    slli a2, a0, 24
+; RV64I-NEXT:    slli a4, a3, 40
+; RV64I-NEXT:    and a2, a2, a4
+; RV64I-NEXT:    srliw a4, a0, 24
+; RV64I-NEXT:    slli a4, a4, 32
+; RV64I-NEXT:    or a2, a2, a4
+; RV64I-NEXT:    slli a4, a0, 40
+; RV64I-NEXT:    slli a3, a3, 48
+; RV64I-NEXT:    and a3, a4, a3
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    or a0, a0, a3
+; RV64I-NEXT:    lui a3, %hi(.LCPI6_0)
+; RV64I-NEXT:    ld a3, %lo(.LCPI6_0)(a3)
+; RV64I-NEXT:    or a0, a0, a2
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    and a1, a1, a3
+; RV64I-NEXT:    and a0, a0, a3
+; RV64I-NEXT:    lui a2, %hi(.LCPI6_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI6_1)(a2)
+; RV64I-NEXT:    slli a0, a0, 4
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 2
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    lui a2, %hi(.LCPI6_2)
+; RV64I-NEXT:    ld a2, %lo(.LCPI6_2)(a2)
+; RV64I-NEXT:    slli a0, a0, 2
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_bitreverse_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    rev8 a1, a1
+; RV32ZBB-NEXT:    srli a2, a1, 4
+; RV32ZBB-NEXT:    lui a3, 61681
+; RV32ZBB-NEXT:    addi a3, a3, -241
+; RV32ZBB-NEXT:    and a2, a2, a3
+; RV32ZBB-NEXT:    and a1, a1, a3
+; RV32ZBB-NEXT:    slli a1, a1, 4
+; RV32ZBB-NEXT:    or a1, a2, a1
+; RV32ZBB-NEXT:    srli a2, a1, 2
+; RV32ZBB-NEXT:    lui a4, 209715
+; RV32ZBB-NEXT:    addi a4, a4, 819
+; RV32ZBB-NEXT:    and a2, a2, a4
+; RV32ZBB-NEXT:    and a1, a1, a4
+; RV32ZBB-NEXT:    slli a1, a1, 2
+; RV32ZBB-NEXT:    or a1, a2, a1
+; RV32ZBB-NEXT:    srli a2, a1, 1
+; RV32ZBB-NEXT:    lui a5, 349525
+; RV32ZBB-NEXT:    addi a5, a5, 1365
+; RV32ZBB-NEXT:    and a2, a2, a5
+; RV32ZBB-NEXT:    and a1, a1, a5
+; RV32ZBB-NEXT:    slli a1, a1, 1
+; RV32ZBB-NEXT:    or a2, a2, a1
+; RV32ZBB-NEXT:    rev8 a0, a0
+; RV32ZBB-NEXT:    srli a1, a0, 4
+; RV32ZBB-NEXT:    and a1, a1, a3
+; RV32ZBB-NEXT:    and a0, a0, a3
+; RV32ZBB-NEXT:    slli a0, a0, 4
+; RV32ZBB-NEXT:    or a0, a1, a0
+; RV32ZBB-NEXT:    srli a1, a0, 2
+; RV32ZBB-NEXT:    and a1, a1, a4
+; RV32ZBB-NEXT:    and a0, a0, a4
+; RV32ZBB-NEXT:    slli a0, a0, 2
+; RV32ZBB-NEXT:    or a0, a1, a0
+; RV32ZBB-NEXT:    srli a1, a0, 1
+; RV32ZBB-NEXT:    and a1, a1, a5
+; RV32ZBB-NEXT:    and a0, a0, a5
+; RV32ZBB-NEXT:    slli a0, a0, 1
+; RV32ZBB-NEXT:    or a1, a1, a0
+; RV32ZBB-NEXT:    mv a0, a2
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_bitreverse_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    lui a1, %hi(.LCPI6_0)
+; RV64ZBB-NEXT:    ld a1, %lo(.LCPI6_0)(a1)
+; RV64ZBB-NEXT:    rev8 a0, a0
+; RV64ZBB-NEXT:    srli a2, a0, 4
+; RV64ZBB-NEXT:    and a2, a2, a1
+; RV64ZBB-NEXT:    and a0, a0, a1
+; RV64ZBB-NEXT:    lui a1, %hi(.LCPI6_1)
+; RV64ZBB-NEXT:    ld a1, %lo(.LCPI6_1)(a1)
+; RV64ZBB-NEXT:    slli a0, a0, 4
+; RV64ZBB-NEXT:    or a0, a2, a0
+; RV64ZBB-NEXT:    srli a2, a0, 2
+; RV64ZBB-NEXT:    and a2, a2, a1
+; RV64ZBB-NEXT:    and a0, a0, a1
+; RV64ZBB-NEXT:    lui a1, %hi(.LCPI6_2)
+; RV64ZBB-NEXT:    ld a1, %lo(.LCPI6_2)(a1)
+; RV64ZBB-NEXT:    slli a0, a0, 2
+; RV64ZBB-NEXT:    or a0, a2, a0
+; RV64ZBB-NEXT:    srli a2, a0, 1
+; RV64ZBB-NEXT:    and a2, a2, a1
+; RV64ZBB-NEXT:    and a0, a0, a1
+; RV64ZBB-NEXT:    slli a0, a0, 1
+; RV64ZBB-NEXT:    or a0, a2, a0
+; RV64ZBB-NEXT:    ret
+  %tmp = call i64 @llvm.bitreverse.i64(i64 %a)
+  ret i64 %tmp
+}
+
 define i8 @test_cttz_i8(i8 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a1, a0, 255
-; RV32I-NEXT:    beqz a1, .LBB3_2
+; RV32I-NEXT:    beqz a1, .LBB7_2
 ; RV32I-NEXT:  # %bb.1: # %cond.false
 ; RV32I-NEXT:    addi a1, a0, -1
 ; RV32I-NEXT:    not a0, a0
@@ -153,14 +711,14 @@ define i8 @test_cttz_i8(i8 %a) nounwind {
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    andi a0, a0, 15
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB3_2:
+; RV32I-NEXT:  .LBB7_2:
 ; RV32I-NEXT:    li a0, 8
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: test_cttz_i8:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    andi a1, a0, 255
-; RV64I-NEXT:    beqz a1, .LBB3_2
+; RV64I-NEXT:    beqz a1, .LBB7_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    addi a1, a0, -1
 ; RV64I-NEXT:    not a0, a0
@@ -176,9 +734,21 @@ define i8 @test_cttz_i8(i8 %a) nounwind {
 ; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    andi a0, a0, 15
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB3_2:
+; RV64I-NEXT:  .LBB7_2:
 ; RV64I-NEXT:    li a0, 8
 ; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_cttz_i8:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    ori a0, a0, 256
+; RV32ZBB-NEXT:    ctz a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_cttz_i8:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    ori a0, a0, 256
+; RV64ZBB-NEXT:    ctz a0, a0
+; RV64ZBB-NEXT:    ret
   %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 false)
   ret i8 %tmp
 }
@@ -188,7 +758,7 @@ define i16 @test_cttz_i16(i16 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a1, a0, 16
 ; RV32I-NEXT:    srli a1, a1, 16
-; RV32I-NEXT:    beqz a1, .LBB4_2
+; RV32I-NEXT:    beqz a1, .LBB8_2
 ; RV32I-NEXT:  # %bb.1: # %cond.false
 ; RV32I-NEXT:    addi a1, a0, -1
 ; RV32I-NEXT:    not a0, a0
@@ -214,7 +784,7 @@ define i16 @test_cttz_i16(i16 %a) nounwind {
 ; RV32I-NEXT:    slli a0, a0, 19
 ; RV32I-NEXT:    srli a0, a0, 27
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB4_2:
+; RV32I-NEXT:  .LBB8_2:
 ; RV32I-NEXT:    li a0, 16
 ; RV32I-NEXT:    ret
 ;
@@ -222,7 +792,7 @@ define i16 @test_cttz_i16(i16 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a1, a0, 48
 ; RV64I-NEXT:    srli a1, a1, 48
-; RV64I-NEXT:    beqz a1, .LBB4_2
+; RV64I-NEXT:    beqz a1, .LBB8_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    addi a1, a0, -1
 ; RV64I-NEXT:    not a0, a0
@@ -248,9 +818,23 @@ define i16 @test_cttz_i16(i16 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 51
 ; RV64I-NEXT:    srli a0, a0, 59
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB4_2:
+; RV64I-NEXT:  .LBB8_2:
 ; RV64I-NEXT:    li a0, 16
 ; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_cttz_i16:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lui a1, 16
+; RV32ZBB-NEXT:    or a0, a0, a1
+; RV32ZBB-NEXT:    ctz a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_cttz_i16:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    lui a1, 16
+; RV64ZBB-NEXT:    or a0, a0, a1
+; RV64ZBB-NEXT:    ctz a0, a0
+; RV64ZBB-NEXT:    ret
   %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 false)
   ret i16 %tmp
 }
@@ -258,7 +842,7 @@ define i16 @test_cttz_i16(i16 %a) nounwind {
 define i32 @test_cttz_i32(i32 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a0, .LBB5_2
+; RV32I-NEXT:    beqz a0, .LBB9_2
 ; RV32I-NEXT:  # %bb.1: # %cond.false
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
@@ -288,14 +872,14 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB5_2:
+; RV32I-NEXT:  .LBB9_2:
 ; RV32I-NEXT:    li a0, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: test_cttz_i32:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sext.w a1, a0
-; RV64I-NEXT:    beqz a1, .LBB5_2
+; RV64I-NEXT:    beqz a1, .LBB9_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
@@ -325,9 +909,19 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB5_2:
+; RV64I-NEXT:  .LBB9_2:
 ; RV64I-NEXT:    li a0, 32
 ; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_cttz_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    ctz a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_cttz_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    ctzw a0, a0
+; RV64ZBB-NEXT:    ret
   %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false)
   ret i32 %tmp
 }
@@ -335,7 +929,7 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
 define i32 @test_ctlz_i32(i32 %a) nounwind {
 ; RV32I-LABEL: test_ctlz_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a0, .LBB6_2
+; RV32I-NEXT:    beqz a0, .LBB10_2
 ; RV32I-NEXT:  # %bb.1: # %cond.false
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
@@ -373,14 +967,14 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB6_2:
+; RV32I-NEXT:  .LBB10_2:
 ; RV32I-NEXT:    li a0, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: test_ctlz_i32:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sext.w a1, a0
-; RV64I-NEXT:    beqz a1, .LBB6_2
+; RV64I-NEXT:    beqz a1, .LBB10_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
@@ -418,9 +1012,19 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB6_2:
+; RV64I-NEXT:  .LBB10_2:
 ; RV64I-NEXT:    li a0, 32
 ; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_ctlz_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    clz a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_ctlz_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    clzw a0, a0
+; RV64ZBB-NEXT:    ret
   %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false)
   ret i32 %tmp
 }
@@ -478,14 +1082,14 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, s6
 ; RV32I-NEXT:    mv a1, s3
 ; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    bnez s2, .LBB7_2
+; RV32I-NEXT:    bnez s2, .LBB11_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    addi a0, a0, 32
-; RV32I-NEXT:    j .LBB7_3
-; RV32I-NEXT:  .LBB7_2:
+; RV32I-NEXT:    j .LBB11_3
+; RV32I-NEXT:  .LBB11_2:
 ; RV32I-NEXT:    srli a0, s0, 24
-; RV32I-NEXT:  .LBB7_3:
+; RV32I-NEXT:  .LBB11_3:
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -500,39 +1104,57 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
 ;
 ; RV64I-LABEL: test_cttz_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a0, .LBB7_2
+; RV64I-NEXT:    beqz a0, .LBB11_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a1, a0, -1
 ; RV64I-NEXT:    not a0, a0
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI7_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI7_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI7_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI7_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI11_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI11_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI11_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI11_1)(a2)
 ; RV64I-NEXT:    srli a3, a0, 1
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    sub a0, a0, a1
 ; RV64I-NEXT:    and a1, a0, a2
 ; RV64I-NEXT:    srli a0, a0, 2
 ; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a2, %hi(.LCPI7_2)
-; RV64I-NEXT:    ld a2, %lo(.LCPI7_2)(a2)
+; RV64I-NEXT:    lui a2, %hi(.LCPI11_2)
+; RV64I-NEXT:    ld a2, %lo(.LCPI11_2)(a2)
 ; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    srli a1, a0, 4
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a1, %hi(.LCPI7_3)
-; RV64I-NEXT:    ld a1, %lo(.LCPI7_3)(a1)
+; RV64I-NEXT:    lui a1, %hi(.LCPI11_3)
+; RV64I-NEXT:    ld a1, %lo(.LCPI11_3)(a1)
 ; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    srli a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB7_2:
+; RV64I-NEXT:  .LBB11_2:
 ; RV64I-NEXT:    li a0, 64
 ; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_cttz_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    bnez a0, .LBB11_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    ctz a0, a1
+; RV32ZBB-NEXT:    addi a0, a0, 32
+; RV32ZBB-NEXT:    li a1, 0
+; RV32ZBB-NEXT:    ret
+; RV32ZBB-NEXT:  .LBB11_2:
+; RV32ZBB-NEXT:    ctz a0, a0
+; RV32ZBB-NEXT:    li a1, 0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_cttz_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    ctz a0, a0
+; RV64ZBB-NEXT:    ret
   %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 false)
   ret i64 %tmp
 }
@@ -571,6 +1193,16 @@ define i8 @test_cttz_i8_zero_undef(i8 %a) nounwind {
 ; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    andi a0, a0, 15
 ; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_cttz_i8_zero_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    ctz a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_cttz_i8_zero_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    ctz a0, a0
+; RV64ZBB-NEXT:    ret
   %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 true)
   ret i8 %tmp
 }
@@ -629,6 +1261,16 @@ define i16 @test_cttz_i16_zero_undef(i16 %a) nounwind {
 ; RV64I-NEXT:    slli a0, a0, 51
 ; RV64I-NEXT:    srli a0, a0, 59
 ; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_cttz_i16_zero_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    ctz a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_cttz_i16_zero_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    ctz a0, a0
+; RV64ZBB-NEXT:    ret
   %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 true)
   ret i16 %tmp
 }
@@ -695,6 +1337,16 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_cttz_i32_zero_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    ctz a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_cttz_i32_zero_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    ctzw a0, a0
+; RV64ZBB-NEXT:    ret
   %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 true)
   ret i32 %tmp
 }
@@ -752,14 +1404,14 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, s6
 ; RV32I-NEXT:    mv a1, s3
 ; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    bnez s2, .LBB11_2
+; RV32I-NEXT:    bnez s2, .LBB15_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    addi a0, a0, 32
-; RV32I-NEXT:    j .LBB11_3
-; RV32I-NEXT:  .LBB11_2:
+; RV32I-NEXT:    j .LBB15_3
+; RV32I-NEXT:  .LBB15_2:
 ; RV32I-NEXT:    srli a0, s0, 24
-; RV32I-NEXT:  .LBB11_3:
+; RV32I-NEXT:  .LBB15_3:
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -779,29 +1431,47 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
 ; RV64I-NEXT:    addi a1, a0, -1
 ; RV64I-NEXT:    not a0, a0
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI11_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI11_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI11_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI11_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI15_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI15_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI15_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI15_1)(a2)
 ; RV64I-NEXT:    srli a3, a0, 1
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    sub a0, a0, a1
 ; RV64I-NEXT:    and a1, a0, a2
 ; RV64I-NEXT:    srli a0, a0, 2
 ; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a2, %hi(.LCPI11_2)
-; RV64I-NEXT:    ld a2, %lo(.LCPI11_2)(a2)
+; RV64I-NEXT:    lui a2, %hi(.LCPI15_2)
+; RV64I-NEXT:    ld a2, %lo(.LCPI15_2)(a2)
 ; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    srli a1, a0, 4
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a1, %hi(.LCPI11_3)
-; RV64I-NEXT:    ld a1, %lo(.LCPI11_3)(a1)
+; RV64I-NEXT:    lui a1, %hi(.LCPI15_3)
+; RV64I-NEXT:    ld a1, %lo(.LCPI15_3)(a1)
 ; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    srli a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_cttz_i64_zero_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    bnez a0, .LBB15_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    ctz a0, a1
+; RV32ZBB-NEXT:    addi a0, a0, 32
+; RV32ZBB-NEXT:    li a1, 0
+; RV32ZBB-NEXT:    ret
+; RV32ZBB-NEXT:  .LBB15_2:
+; RV32ZBB-NEXT:    ctz a0, a0
+; RV32ZBB-NEXT:    li a1, 0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_cttz_i64_zero_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    ctz a0, a0
+; RV64ZBB-NEXT:    ret
   %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 true)
   ret i64 %tmp
 }
@@ -862,6 +1532,16 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_ctpop_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    cpop a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_ctpop_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    cpopw a0, a0
+; RV64ZBB-NEXT:    ret
   %1 = call i32 @llvm.ctpop.i32(i32 %a)
   ret i32 %1
 }
@@ -928,29 +1608,42 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, %hi(.LCPI13_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI13_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI13_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI13_1)(a2)
+; RV64I-NEXT:    lui a1, %hi(.LCPI17_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI17_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI17_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI17_1)(a2)
 ; RV64I-NEXT:    srli a3, a0, 1
 ; RV64I-NEXT:    and a1, a3, a1
 ; RV64I-NEXT:    sub a0, a0, a1
 ; RV64I-NEXT:    and a1, a0, a2
 ; RV64I-NEXT:    srli a0, a0, 2
 ; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a2, %hi(.LCPI13_2)
-; RV64I-NEXT:    ld a2, %lo(.LCPI13_2)(a2)
+; RV64I-NEXT:    lui a2, %hi(.LCPI17_2)
+; RV64I-NEXT:    ld a2, %lo(.LCPI17_2)(a2)
 ; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    srli a1, a0, 4
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a1, %hi(.LCPI13_3)
-; RV64I-NEXT:    ld a1, %lo(.LCPI13_3)(a1)
+; RV64I-NEXT:    lui a1, %hi(.LCPI17_3)
+; RV64I-NEXT:    ld a1, %lo(.LCPI17_3)(a1)
 ; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    srli a0, a0, 56
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_ctpop_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    cpop a1, a1
+; RV32ZBB-NEXT:    cpop a0, a0
+; RV32ZBB-NEXT:    add a0, a0, a1
+; RV32ZBB-NEXT:    li a1, 0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_ctpop_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    cpop a0, a0
+; RV64ZBB-NEXT:    ret
   %1 = call i64 @llvm.ctpop.i64(i64 %a)
   ret i64 %1
 }
@@ -987,6 +1680,18 @@ define i32 @test_parity_i32(i32 %a) {
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    andi a0, a0, 1
 ; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_parity_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    cpop a0, a0
+; RV32ZBB-NEXT:    andi a0, a0, 1
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_parity_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    cpopw a0, a0
+; RV64ZBB-NEXT:    andi a0, a0, 1
+; RV64ZBB-NEXT:    ret
   %1 = call i32 @llvm.ctpop.i32(i32 %a)
   %2 = and i32 %1, 1
   ret i32 %2
@@ -1026,6 +1731,20 @@ define i64 @test_parity_i64(i64 %a) {
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    andi a0, a0, 1
 ; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_parity_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    xor a0, a0, a1
+; RV32ZBB-NEXT:    cpop a0, a0
+; RV32ZBB-NEXT:    andi a0, a0, 1
+; RV32ZBB-NEXT:    li a1, 0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_parity_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    cpop a0, a0
+; RV64ZBB-NEXT:    andi a0, a0, 1
+; RV64ZBB-NEXT:    ret
   %1 = call i64 @llvm.ctpop.i64(i64 %a)
   %2 = and i64 %1, 1
   ret i64 %2


        


More information about the llvm-commits mailing list