[llvm] dcd751b - [RISCV] Split bswap-bitreverse-ctlz-cttz-ctpop.ll into two files bswap/bitreverse and ctlz/cttz/ctpop. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 27 21:12:20 PST 2022


Author: Craig Topper
Date: 2022-01-27T21:12:03-08:00
New Revision: dcd751b26ea612b57e4d74568959027cadb2b4b8

URL: https://github.com/llvm/llvm-project/commit/dcd751b26ea612b57e4d74568959027cadb2b4b8
DIFF: https://github.com/llvm/llvm-project/commit/dcd751b26ea612b57e4d74568959027cadb2b4b8.diff

LOG: [RISCV] Split bswap-bitreverse-ctlz-cttz-ctpop.ll into two files bswap/bitreverse and ctlz/cttz/ctpop. NFC

Add Zbkb command lines to the bswap/bitreverse test.

Added: 
    llvm/test/CodeGen/RISCV/bswap-bitreverse.ll
    llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll

Modified: 
    

Removed: 
    llvm/test/CodeGen/RISCV/bswap-bitreverse-ctlz-cttz-ctpop.ll


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/bswap-bitreverse-ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/bswap-bitreverse-ctlz-cttz-ctpop.ll
deleted file mode 100644
index f2f0494a6fb8..000000000000
--- a/llvm/test/CodeGen/RISCV/bswap-bitreverse-ctlz-cttz-ctpop.ll
+++ /dev/null
@@ -1,2441 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV32I
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV64I
-; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV32ZBB
-; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV64ZBB
-
-declare i16 @llvm.bswap.i16(i16)
-declare i32 @llvm.bswap.i32(i32)
-declare i64 @llvm.bswap.i64(i64)
-declare i8 @llvm.bitreverse.i8(i8)
-declare i16 @llvm.bitreverse.i16(i16)
-declare i32 @llvm.bitreverse.i32(i32)
-declare i64 @llvm.bitreverse.i64(i64)
-declare i8 @llvm.cttz.i8(i8, i1)
-declare i16 @llvm.cttz.i16(i16, i1)
-declare i32 @llvm.cttz.i32(i32, i1)
-declare i64 @llvm.cttz.i64(i64, i1)
-declare i32 @llvm.ctlz.i32(i32, i1)
-declare i32 @llvm.ctpop.i32(i32)
-declare i64 @llvm.ctpop.i64(i64)
-
-define i16 @test_bswap_i16(i16 %a) nounwind {
-; RV32I-LABEL: test_bswap_i16:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 8
-; RV32I-NEXT:    slli a0, a0, 16
-; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_bswap_i16:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 8
-; RV64I-NEXT:    slli a0, a0, 48
-; RV64I-NEXT:    srli a0, a0, 56
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_bswap_i16:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    rev8 a0, a0
-; RV32ZBB-NEXT:    srli a0, a0, 16
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_bswap_i16:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    rev8 a0, a0
-; RV64ZBB-NEXT:    srli a0, a0, 48
-; RV64ZBB-NEXT:    ret
-  %tmp = call i16 @llvm.bswap.i16(i16 %a)
-  ret i16 %tmp
-}
-
-define i32 @test_bswap_i32(i32 %a) nounwind {
-; RV32I-LABEL: test_bswap_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a1, a0, 8
-; RV32I-NEXT:    lui a2, 16
-; RV32I-NEXT:    addi a2, a2, -256
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 24
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    slli a2, a0, 8
-; RV32I-NEXT:    lui a3, 4080
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_bswap_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srliw a1, a0, 8
-; RV64I-NEXT:    lui a2, 16
-; RV64I-NEXT:    addiw a2, a2, -256
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srliw a2, a0, 24
-; RV64I-NEXT:    or a1, a1, a2
-; RV64I-NEXT:    slli a2, a0, 8
-; RV64I-NEXT:    lui a3, 4080
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    slliw a0, a0, 24
-; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_bswap_i32:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    rev8 a0, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_bswap_i32:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    rev8 a0, a0
-; RV64ZBB-NEXT:    srli a0, a0, 32
-; RV64ZBB-NEXT:    ret
-  %tmp = call i32 @llvm.bswap.i32(i32 %a)
-  ret i32 %tmp
-}
-
-define i64 @test_bswap_i64(i64 %a) nounwind {
-; RV32I-LABEL: test_bswap_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a2, a1, 8
-; RV32I-NEXT:    lui a3, 16
-; RV32I-NEXT:    addi a3, a3, -256
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    srli a4, a1, 24
-; RV32I-NEXT:    or a2, a2, a4
-; RV32I-NEXT:    slli a4, a1, 8
-; RV32I-NEXT:    lui a5, 4080
-; RV32I-NEXT:    and a4, a4, a5
-; RV32I-NEXT:    slli a1, a1, 24
-; RV32I-NEXT:    or a1, a1, a4
-; RV32I-NEXT:    or a2, a1, a2
-; RV32I-NEXT:    srli a1, a0, 8
-; RV32I-NEXT:    and a1, a1, a3
-; RV32I-NEXT:    srli a3, a0, 24
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    slli a3, a0, 8
-; RV32I-NEXT:    and a3, a3, a5
-; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    or a0, a0, a3
-; RV32I-NEXT:    or a1, a0, a1
-; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_bswap_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a1, a0, 24
-; RV64I-NEXT:    lui a2, 4080
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 8
-; RV64I-NEXT:    li a3, 255
-; RV64I-NEXT:    slli a4, a3, 24
-; RV64I-NEXT:    and a2, a2, a4
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    srli a2, a0, 40
-; RV64I-NEXT:    lui a4, 16
-; RV64I-NEXT:    addiw a4, a4, -256
-; RV64I-NEXT:    and a2, a2, a4
-; RV64I-NEXT:    srli a4, a0, 56
-; RV64I-NEXT:    or a2, a2, a4
-; RV64I-NEXT:    or a1, a1, a2
-; RV64I-NEXT:    slli a2, a0, 24
-; RV64I-NEXT:    slli a4, a3, 40
-; RV64I-NEXT:    and a2, a2, a4
-; RV64I-NEXT:    srliw a4, a0, 24
-; RV64I-NEXT:    slli a4, a4, 32
-; RV64I-NEXT:    or a2, a2, a4
-; RV64I-NEXT:    slli a4, a0, 40
-; RV64I-NEXT:    slli a3, a3, 48
-; RV64I-NEXT:    and a3, a4, a3
-; RV64I-NEXT:    slli a0, a0, 56
-; RV64I-NEXT:    or a0, a0, a3
-; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_bswap_i64:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    rev8 a2, a1
-; RV32ZBB-NEXT:    rev8 a1, a0
-; RV32ZBB-NEXT:    mv a0, a2
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_bswap_i64:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    rev8 a0, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i64 @llvm.bswap.i64(i64 %a)
-  ret i64 %tmp
-}
-
-define i8 @test_bitreverse_i8(i8 %a) nounwind {
-; RV32I-LABEL: test_bitreverse_i8:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    andi a1, a0, 15
-; RV32I-NEXT:    slli a1, a1, 4
-; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    srli a0, a0, 28
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    andi a1, a0, 51
-; RV32I-NEXT:    slli a1, a1, 2
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    andi a0, a0, 51
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    andi a1, a0, 85
-; RV32I-NEXT:    slli a1, a1, 1
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    andi a0, a0, 85
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_bitreverse_i8:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    andi a1, a0, 15
-; RV64I-NEXT:    slli a1, a1, 4
-; RV64I-NEXT:    slli a0, a0, 56
-; RV64I-NEXT:    srli a0, a0, 60
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    andi a1, a0, 51
-; RV64I-NEXT:    slli a1, a1, 2
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    andi a0, a0, 51
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    andi a1, a0, 85
-; RV64I-NEXT:    slli a1, a1, 1
-; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    andi a0, a0, 85
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_bitreverse_i8:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    andi a1, a0, 15
-; RV32ZBB-NEXT:    slli a1, a1, 4
-; RV32ZBB-NEXT:    slli a0, a0, 24
-; RV32ZBB-NEXT:    srli a0, a0, 28
-; RV32ZBB-NEXT:    or a0, a0, a1
-; RV32ZBB-NEXT:    andi a1, a0, 51
-; RV32ZBB-NEXT:    slli a1, a1, 2
-; RV32ZBB-NEXT:    srli a0, a0, 2
-; RV32ZBB-NEXT:    andi a0, a0, 51
-; RV32ZBB-NEXT:    or a0, a0, a1
-; RV32ZBB-NEXT:    andi a1, a0, 85
-; RV32ZBB-NEXT:    slli a1, a1, 1
-; RV32ZBB-NEXT:    srli a0, a0, 1
-; RV32ZBB-NEXT:    andi a0, a0, 85
-; RV32ZBB-NEXT:    or a0, a0, a1
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_bitreverse_i8:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    andi a1, a0, 15
-; RV64ZBB-NEXT:    slli a1, a1, 4
-; RV64ZBB-NEXT:    slli a0, a0, 56
-; RV64ZBB-NEXT:    srli a0, a0, 60
-; RV64ZBB-NEXT:    or a0, a0, a1
-; RV64ZBB-NEXT:    andi a1, a0, 51
-; RV64ZBB-NEXT:    slli a1, a1, 2
-; RV64ZBB-NEXT:    srli a0, a0, 2
-; RV64ZBB-NEXT:    andi a0, a0, 51
-; RV64ZBB-NEXT:    or a0, a0, a1
-; RV64ZBB-NEXT:    andi a1, a0, 85
-; RV64ZBB-NEXT:    slli a1, a1, 1
-; RV64ZBB-NEXT:    srli a0, a0, 1
-; RV64ZBB-NEXT:    andi a0, a0, 85
-; RV64ZBB-NEXT:    or a0, a0, a1
-; RV64ZBB-NEXT:    ret
-  %tmp = call i8 @llvm.bitreverse.i8(i8 %a)
-  ret i8 %tmp
-}
-
-define i16 @test_bitreverse_i16(i16 %a) nounwind {
-; RV32I-LABEL: test_bitreverse_i16:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 8
-; RV32I-NEXT:    slli a0, a0, 16
-; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    lui a2, 1
-; RV32I-NEXT:    addi a2, a2, -241
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 4
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 2
-; RV32I-NEXT:    lui a2, 3
-; RV32I-NEXT:    addi a2, a2, 819
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 5
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 1
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_bitreverse_i16:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 8
-; RV64I-NEXT:    slli a0, a0, 48
-; RV64I-NEXT:    srli a0, a0, 56
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    lui a2, 1
-; RV64I-NEXT:    addiw a2, a2, -241
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slli a0, a0, 4
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 2
-; RV64I-NEXT:    lui a2, 3
-; RV64I-NEXT:    addiw a2, a2, 819
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slli a0, a0, 2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 5
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slli a0, a0, 1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_bitreverse_i16:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    rev8 a0, a0
-; RV32ZBB-NEXT:    srli a1, a0, 12
-; RV32ZBB-NEXT:    lui a2, 15
-; RV32ZBB-NEXT:    addi a2, a2, 240
-; RV32ZBB-NEXT:    and a1, a1, a2
-; RV32ZBB-NEXT:    srli a0, a0, 20
-; RV32ZBB-NEXT:    andi a0, a0, -241
-; RV32ZBB-NEXT:    or a0, a0, a1
-; RV32ZBB-NEXT:    srli a1, a0, 2
-; RV32ZBB-NEXT:    lui a2, 3
-; RV32ZBB-NEXT:    addi a2, a2, 819
-; RV32ZBB-NEXT:    and a1, a1, a2
-; RV32ZBB-NEXT:    and a0, a0, a2
-; RV32ZBB-NEXT:    slli a0, a0, 2
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    srli a1, a0, 1
-; RV32ZBB-NEXT:    lui a2, 5
-; RV32ZBB-NEXT:    addi a2, a2, 1365
-; RV32ZBB-NEXT:    and a1, a1, a2
-; RV32ZBB-NEXT:    and a0, a0, a2
-; RV32ZBB-NEXT:    slli a0, a0, 1
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_bitreverse_i16:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    rev8 a0, a0
-; RV64ZBB-NEXT:    srli a1, a0, 44
-; RV64ZBB-NEXT:    lui a2, 15
-; RV64ZBB-NEXT:    addiw a2, a2, 240
-; RV64ZBB-NEXT:    and a1, a1, a2
-; RV64ZBB-NEXT:    srli a0, a0, 52
-; RV64ZBB-NEXT:    andi a0, a0, -241
-; RV64ZBB-NEXT:    or a0, a0, a1
-; RV64ZBB-NEXT:    srli a1, a0, 2
-; RV64ZBB-NEXT:    lui a2, 3
-; RV64ZBB-NEXT:    addiw a2, a2, 819
-; RV64ZBB-NEXT:    and a1, a1, a2
-; RV64ZBB-NEXT:    and a0, a0, a2
-; RV64ZBB-NEXT:    slli a0, a0, 2
-; RV64ZBB-NEXT:    or a0, a1, a0
-; RV64ZBB-NEXT:    srli a1, a0, 1
-; RV64ZBB-NEXT:    lui a2, 5
-; RV64ZBB-NEXT:    addiw a2, a2, 1365
-; RV64ZBB-NEXT:    and a1, a1, a2
-; RV64ZBB-NEXT:    and a0, a0, a2
-; RV64ZBB-NEXT:    slli a0, a0, 1
-; RV64ZBB-NEXT:    or a0, a1, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i16 @llvm.bitreverse.i16(i16 %a)
-  ret i16 %tmp
-}
-
-define i32 @test_bitreverse_i32(i32 %a) nounwind {
-; RV32I-LABEL: test_bitreverse_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a1, a0, 8
-; RV32I-NEXT:    lui a2, 16
-; RV32I-NEXT:    addi a2, a2, -256
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    srli a2, a0, 24
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    slli a2, a0, 8
-; RV32I-NEXT:    lui a3, 4080
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    lui a2, 61681
-; RV32I-NEXT:    addi a2, a2, -241
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 4
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 2
-; RV32I-NEXT:    lui a2, 209715
-; RV32I-NEXT:    addi a2, a2, 819
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 1
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_bitreverse_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srliw a1, a0, 8
-; RV64I-NEXT:    lui a2, 16
-; RV64I-NEXT:    addiw a2, a2, -256
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srliw a2, a0, 24
-; RV64I-NEXT:    or a1, a1, a2
-; RV64I-NEXT:    slli a2, a0, 8
-; RV64I-NEXT:    lui a3, 4080
-; RV64I-NEXT:    and a2, a2, a3
-; RV64I-NEXT:    slliw a0, a0, 24
-; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    lui a2, 61681
-; RV64I-NEXT:    addiw a2, a2, -241
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slliw a0, a0, 4
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 2
-; RV64I-NEXT:    lui a2, 209715
-; RV64I-NEXT:    addiw a2, a2, 819
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slliw a0, a0, 2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slliw a0, a0, 1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_bitreverse_i32:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    rev8 a0, a0
-; RV32ZBB-NEXT:    srli a1, a0, 4
-; RV32ZBB-NEXT:    lui a2, 61681
-; RV32ZBB-NEXT:    addi a2, a2, -241
-; RV32ZBB-NEXT:    and a1, a1, a2
-; RV32ZBB-NEXT:    and a0, a0, a2
-; RV32ZBB-NEXT:    slli a0, a0, 4
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    srli a1, a0, 2
-; RV32ZBB-NEXT:    lui a2, 209715
-; RV32ZBB-NEXT:    addi a2, a2, 819
-; RV32ZBB-NEXT:    and a1, a1, a2
-; RV32ZBB-NEXT:    and a0, a0, a2
-; RV32ZBB-NEXT:    slli a0, a0, 2
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    srli a1, a0, 1
-; RV32ZBB-NEXT:    lui a2, 349525
-; RV32ZBB-NEXT:    addi a2, a2, 1365
-; RV32ZBB-NEXT:    and a1, a1, a2
-; RV32ZBB-NEXT:    and a0, a0, a2
-; RV32ZBB-NEXT:    slli a0, a0, 1
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_bitreverse_i32:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    rev8 a0, a0
-; RV64ZBB-NEXT:    srli a1, a0, 36
-; RV64ZBB-NEXT:    lui a2, 61681
-; RV64ZBB-NEXT:    addiw a2, a2, -241
-; RV64ZBB-NEXT:    and a1, a1, a2
-; RV64ZBB-NEXT:    srli a0, a0, 28
-; RV64ZBB-NEXT:    lui a2, 986895
-; RV64ZBB-NEXT:    addiw a2, a2, 240
-; RV64ZBB-NEXT:    and a0, a0, a2
-; RV64ZBB-NEXT:    sext.w a0, a0
-; RV64ZBB-NEXT:    or a0, a1, a0
-; RV64ZBB-NEXT:    srli a1, a0, 2
-; RV64ZBB-NEXT:    lui a2, 209715
-; RV64ZBB-NEXT:    addiw a2, a2, 819
-; RV64ZBB-NEXT:    and a1, a1, a2
-; RV64ZBB-NEXT:    and a0, a0, a2
-; RV64ZBB-NEXT:    slliw a0, a0, 2
-; RV64ZBB-NEXT:    or a0, a1, a0
-; RV64ZBB-NEXT:    srli a1, a0, 1
-; RV64ZBB-NEXT:    lui a2, 349525
-; RV64ZBB-NEXT:    addiw a2, a2, 1365
-; RV64ZBB-NEXT:    and a1, a1, a2
-; RV64ZBB-NEXT:    and a0, a0, a2
-; RV64ZBB-NEXT:    slliw a0, a0, 1
-; RV64ZBB-NEXT:    or a0, a1, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i32 @llvm.bitreverse.i32(i32 %a)
-  ret i32 %tmp
-}
-
-define i64 @test_bitreverse_i64(i64 %a) nounwind {
-; RV32I-LABEL: test_bitreverse_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a2, a1, 8
-; RV32I-NEXT:    lui a3, 16
-; RV32I-NEXT:    addi a3, a3, -256
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    srli a4, a1, 24
-; RV32I-NEXT:    or a2, a2, a4
-; RV32I-NEXT:    slli a4, a1, 8
-; RV32I-NEXT:    lui a5, 4080
-; RV32I-NEXT:    and a4, a4, a5
-; RV32I-NEXT:    slli a1, a1, 24
-; RV32I-NEXT:    or a1, a1, a4
-; RV32I-NEXT:    or a1, a1, a2
-; RV32I-NEXT:    srli a2, a1, 4
-; RV32I-NEXT:    lui a4, 61681
-; RV32I-NEXT:    addi a4, a4, -241
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    slli a1, a1, 4
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    srli a2, a1, 2
-; RV32I-NEXT:    lui a6, 209715
-; RV32I-NEXT:    addi a6, a6, 819
-; RV32I-NEXT:    and a2, a2, a6
-; RV32I-NEXT:    and a1, a1, a6
-; RV32I-NEXT:    slli a1, a1, 2
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    srli a2, a1, 1
-; RV32I-NEXT:    lui a7, 349525
-; RV32I-NEXT:    addi a7, a7, 1365
-; RV32I-NEXT:    and a2, a2, a7
-; RV32I-NEXT:    and a1, a1, a7
-; RV32I-NEXT:    slli a1, a1, 1
-; RV32I-NEXT:    or a2, a2, a1
-; RV32I-NEXT:    srli a1, a0, 8
-; RV32I-NEXT:    and a1, a1, a3
-; RV32I-NEXT:    srli a3, a0, 24
-; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:    slli a3, a0, 8
-; RV32I-NEXT:    and a3, a3, a5
-; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    or a0, a0, a3
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    slli a0, a0, 4
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 2
-; RV32I-NEXT:    and a1, a1, a6
-; RV32I-NEXT:    and a0, a0, a6
-; RV32I-NEXT:    slli a0, a0, 2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    and a1, a1, a7
-; RV32I-NEXT:    and a0, a0, a7
-; RV32I-NEXT:    slli a0, a0, 1
-; RV32I-NEXT:    or a1, a1, a0
-; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_bitreverse_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a1, a0, 24
-; RV64I-NEXT:    lui a2, 4080
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    srli a2, a0, 8
-; RV64I-NEXT:    li a3, 255
-; RV64I-NEXT:    slli a4, a3, 24
-; RV64I-NEXT:    and a2, a2, a4
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    srli a2, a0, 40
-; RV64I-NEXT:    lui a4, 16
-; RV64I-NEXT:    addiw a4, a4, -256
-; RV64I-NEXT:    and a2, a2, a4
-; RV64I-NEXT:    srli a4, a0, 56
-; RV64I-NEXT:    or a2, a2, a4
-; RV64I-NEXT:    or a1, a1, a2
-; RV64I-NEXT:    slli a2, a0, 24
-; RV64I-NEXT:    slli a4, a3, 40
-; RV64I-NEXT:    and a2, a2, a4
-; RV64I-NEXT:    srliw a4, a0, 24
-; RV64I-NEXT:    slli a4, a4, 32
-; RV64I-NEXT:    or a2, a2, a4
-; RV64I-NEXT:    slli a4, a0, 40
-; RV64I-NEXT:    slli a3, a3, 48
-; RV64I-NEXT:    and a3, a4, a3
-; RV64I-NEXT:    slli a0, a0, 56
-; RV64I-NEXT:    or a0, a0, a3
-; RV64I-NEXT:    lui a3, %hi(.LCPI6_0)
-; RV64I-NEXT:    ld a3, %lo(.LCPI6_0)(a3)
-; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    and a1, a1, a3
-; RV64I-NEXT:    and a0, a0, a3
-; RV64I-NEXT:    lui a2, %hi(.LCPI6_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI6_1)(a2)
-; RV64I-NEXT:    slli a0, a0, 4
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 2
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a2, %hi(.LCPI6_2)
-; RV64I-NEXT:    ld a2, %lo(.LCPI6_2)(a2)
-; RV64I-NEXT:    slli a0, a0, 2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slli a0, a0, 1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_bitreverse_i64:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    rev8 a1, a1
-; RV32ZBB-NEXT:    srli a2, a1, 4
-; RV32ZBB-NEXT:    lui a3, 61681
-; RV32ZBB-NEXT:    addi a3, a3, -241
-; RV32ZBB-NEXT:    and a2, a2, a3
-; RV32ZBB-NEXT:    and a1, a1, a3
-; RV32ZBB-NEXT:    slli a1, a1, 4
-; RV32ZBB-NEXT:    or a1, a2, a1
-; RV32ZBB-NEXT:    srli a2, a1, 2
-; RV32ZBB-NEXT:    lui a4, 209715
-; RV32ZBB-NEXT:    addi a4, a4, 819
-; RV32ZBB-NEXT:    and a2, a2, a4
-; RV32ZBB-NEXT:    and a1, a1, a4
-; RV32ZBB-NEXT:    slli a1, a1, 2
-; RV32ZBB-NEXT:    or a1, a2, a1
-; RV32ZBB-NEXT:    srli a2, a1, 1
-; RV32ZBB-NEXT:    lui a5, 349525
-; RV32ZBB-NEXT:    addi a5, a5, 1365
-; RV32ZBB-NEXT:    and a2, a2, a5
-; RV32ZBB-NEXT:    and a1, a1, a5
-; RV32ZBB-NEXT:    slli a1, a1, 1
-; RV32ZBB-NEXT:    or a2, a2, a1
-; RV32ZBB-NEXT:    rev8 a0, a0
-; RV32ZBB-NEXT:    srli a1, a0, 4
-; RV32ZBB-NEXT:    and a1, a1, a3
-; RV32ZBB-NEXT:    and a0, a0, a3
-; RV32ZBB-NEXT:    slli a0, a0, 4
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    srli a1, a0, 2
-; RV32ZBB-NEXT:    and a1, a1, a4
-; RV32ZBB-NEXT:    and a0, a0, a4
-; RV32ZBB-NEXT:    slli a0, a0, 2
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    srli a1, a0, 1
-; RV32ZBB-NEXT:    and a1, a1, a5
-; RV32ZBB-NEXT:    and a0, a0, a5
-; RV32ZBB-NEXT:    slli a0, a0, 1
-; RV32ZBB-NEXT:    or a1, a1, a0
-; RV32ZBB-NEXT:    mv a0, a2
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_bitreverse_i64:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    lui a1, %hi(.LCPI6_0)
-; RV64ZBB-NEXT:    ld a1, %lo(.LCPI6_0)(a1)
-; RV64ZBB-NEXT:    rev8 a0, a0
-; RV64ZBB-NEXT:    srli a2, a0, 4
-; RV64ZBB-NEXT:    and a2, a2, a1
-; RV64ZBB-NEXT:    and a0, a0, a1
-; RV64ZBB-NEXT:    lui a1, %hi(.LCPI6_1)
-; RV64ZBB-NEXT:    ld a1, %lo(.LCPI6_1)(a1)
-; RV64ZBB-NEXT:    slli a0, a0, 4
-; RV64ZBB-NEXT:    or a0, a2, a0
-; RV64ZBB-NEXT:    srli a2, a0, 2
-; RV64ZBB-NEXT:    and a2, a2, a1
-; RV64ZBB-NEXT:    and a0, a0, a1
-; RV64ZBB-NEXT:    lui a1, %hi(.LCPI6_2)
-; RV64ZBB-NEXT:    ld a1, %lo(.LCPI6_2)(a1)
-; RV64ZBB-NEXT:    slli a0, a0, 2
-; RV64ZBB-NEXT:    or a0, a2, a0
-; RV64ZBB-NEXT:    srli a2, a0, 1
-; RV64ZBB-NEXT:    and a2, a2, a1
-; RV64ZBB-NEXT:    and a0, a0, a1
-; RV64ZBB-NEXT:    slli a0, a0, 1
-; RV64ZBB-NEXT:    or a0, a2, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i64 @llvm.bitreverse.i64(i64 %a)
-  ret i64 %tmp
-}
-
-define i16 @test_bswap_bitreverse_i16(i16 %a) nounwind {
-; RV32I-LABEL: test_bswap_bitreverse_i16:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    lui a2, 1
-; RV32I-NEXT:    addi a2, a2, -241
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 4
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 2
-; RV32I-NEXT:    lui a2, 3
-; RV32I-NEXT:    addi a2, a2, 819
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 5
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 1
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_bswap_bitreverse_i16:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    lui a2, 1
-; RV64I-NEXT:    addiw a2, a2, -241
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slli a0, a0, 4
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 2
-; RV64I-NEXT:    lui a2, 3
-; RV64I-NEXT:    addiw a2, a2, 819
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slli a0, a0, 2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 5
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slli a0, a0, 1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_bswap_bitreverse_i16:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    srli a1, a0, 4
-; RV32ZBB-NEXT:    lui a2, 1
-; RV32ZBB-NEXT:    addi a2, a2, -241
-; RV32ZBB-NEXT:    and a1, a1, a2
-; RV32ZBB-NEXT:    and a0, a0, a2
-; RV32ZBB-NEXT:    slli a0, a0, 4
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    srli a1, a0, 2
-; RV32ZBB-NEXT:    lui a2, 3
-; RV32ZBB-NEXT:    addi a2, a2, 819
-; RV32ZBB-NEXT:    and a1, a1, a2
-; RV32ZBB-NEXT:    and a0, a0, a2
-; RV32ZBB-NEXT:    slli a0, a0, 2
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    srli a1, a0, 1
-; RV32ZBB-NEXT:    lui a2, 5
-; RV32ZBB-NEXT:    addi a2, a2, 1365
-; RV32ZBB-NEXT:    and a1, a1, a2
-; RV32ZBB-NEXT:    and a0, a0, a2
-; RV32ZBB-NEXT:    slli a0, a0, 1
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_bswap_bitreverse_i16:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    srli a1, a0, 4
-; RV64ZBB-NEXT:    lui a2, 1
-; RV64ZBB-NEXT:    addiw a2, a2, -241
-; RV64ZBB-NEXT:    and a1, a1, a2
-; RV64ZBB-NEXT:    and a0, a0, a2
-; RV64ZBB-NEXT:    slli a0, a0, 4
-; RV64ZBB-NEXT:    or a0, a1, a0
-; RV64ZBB-NEXT:    srli a1, a0, 2
-; RV64ZBB-NEXT:    lui a2, 3
-; RV64ZBB-NEXT:    addiw a2, a2, 819
-; RV64ZBB-NEXT:    and a1, a1, a2
-; RV64ZBB-NEXT:    and a0, a0, a2
-; RV64ZBB-NEXT:    slli a0, a0, 2
-; RV64ZBB-NEXT:    or a0, a1, a0
-; RV64ZBB-NEXT:    srli a1, a0, 1
-; RV64ZBB-NEXT:    lui a2, 5
-; RV64ZBB-NEXT:    addiw a2, a2, 1365
-; RV64ZBB-NEXT:    and a1, a1, a2
-; RV64ZBB-NEXT:    and a0, a0, a2
-; RV64ZBB-NEXT:    slli a0, a0, 1
-; RV64ZBB-NEXT:    or a0, a1, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i16 @llvm.bswap.i16(i16 %a)
-  %tmp2 = call i16 @llvm.bitreverse.i16(i16 %tmp)
-  ret i16 %tmp2
-}
-
-define i32 @test_bswap_bitreverse_i32(i32 %a) nounwind {
-; RV32I-LABEL: test_bswap_bitreverse_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    lui a2, 61681
-; RV32I-NEXT:    addi a2, a2, -241
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 4
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 2
-; RV32I-NEXT:    lui a2, 209715
-; RV32I-NEXT:    addi a2, a2, 819
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 1
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_bswap_bitreverse_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    lui a2, 61681
-; RV64I-NEXT:    addiw a2, a2, -241
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slliw a0, a0, 4
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 2
-; RV64I-NEXT:    lui a2, 209715
-; RV64I-NEXT:    addiw a2, a2, 819
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slliw a0, a0, 2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slliw a0, a0, 1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_bswap_bitreverse_i32:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    srli a1, a0, 4
-; RV32ZBB-NEXT:    lui a2, 61681
-; RV32ZBB-NEXT:    addi a2, a2, -241
-; RV32ZBB-NEXT:    and a1, a1, a2
-; RV32ZBB-NEXT:    and a0, a0, a2
-; RV32ZBB-NEXT:    slli a0, a0, 4
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    srli a1, a0, 2
-; RV32ZBB-NEXT:    lui a2, 209715
-; RV32ZBB-NEXT:    addi a2, a2, 819
-; RV32ZBB-NEXT:    and a1, a1, a2
-; RV32ZBB-NEXT:    and a0, a0, a2
-; RV32ZBB-NEXT:    slli a0, a0, 2
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    srli a1, a0, 1
-; RV32ZBB-NEXT:    lui a2, 349525
-; RV32ZBB-NEXT:    addi a2, a2, 1365
-; RV32ZBB-NEXT:    and a1, a1, a2
-; RV32ZBB-NEXT:    and a0, a0, a2
-; RV32ZBB-NEXT:    slli a0, a0, 1
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_bswap_bitreverse_i32:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    srli a1, a0, 4
-; RV64ZBB-NEXT:    lui a2, 61681
-; RV64ZBB-NEXT:    addiw a2, a2, -241
-; RV64ZBB-NEXT:    and a1, a1, a2
-; RV64ZBB-NEXT:    and a0, a0, a2
-; RV64ZBB-NEXT:    slliw a0, a0, 4
-; RV64ZBB-NEXT:    or a0, a1, a0
-; RV64ZBB-NEXT:    srli a1, a0, 2
-; RV64ZBB-NEXT:    lui a2, 209715
-; RV64ZBB-NEXT:    addiw a2, a2, 819
-; RV64ZBB-NEXT:    and a1, a1, a2
-; RV64ZBB-NEXT:    and a0, a0, a2
-; RV64ZBB-NEXT:    slliw a0, a0, 2
-; RV64ZBB-NEXT:    or a0, a1, a0
-; RV64ZBB-NEXT:    srli a1, a0, 1
-; RV64ZBB-NEXT:    lui a2, 349525
-; RV64ZBB-NEXT:    addiw a2, a2, 1365
-; RV64ZBB-NEXT:    and a1, a1, a2
-; RV64ZBB-NEXT:    and a0, a0, a2
-; RV64ZBB-NEXT:    slliw a0, a0, 1
-; RV64ZBB-NEXT:    or a0, a1, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i32 @llvm.bswap.i32(i32 %a)
-  %tmp2 = call i32 @llvm.bitreverse.i32(i32 %tmp)
-  ret i32 %tmp2
-}
-
-define i64 @test_bswap_bitreverse_i64(i64 %a) nounwind {
-; RV32I-LABEL: test_bswap_bitreverse_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a2, a0, 4
-; RV32I-NEXT:    lui a3, 61681
-; RV32I-NEXT:    addi a3, a3, -241
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    and a0, a0, a3
-; RV32I-NEXT:    slli a0, a0, 4
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    srli a2, a0, 2
-; RV32I-NEXT:    lui a4, 209715
-; RV32I-NEXT:    addi a4, a4, 819
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    slli a0, a0, 2
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    srli a2, a0, 1
-; RV32I-NEXT:    lui a5, 349525
-; RV32I-NEXT:    addi a5, a5, 1365
-; RV32I-NEXT:    and a2, a2, a5
-; RV32I-NEXT:    and a0, a0, a5
-; RV32I-NEXT:    slli a0, a0, 1
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    srli a2, a1, 4
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    and a1, a1, a3
-; RV32I-NEXT:    slli a1, a1, 4
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    srli a2, a1, 2
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    slli a1, a1, 2
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    srli a2, a1, 1
-; RV32I-NEXT:    and a2, a2, a5
-; RV32I-NEXT:    and a1, a1, a5
-; RV32I-NEXT:    slli a1, a1, 1
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_bswap_bitreverse_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI9_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI9_0)(a1)
-; RV64I-NEXT:    srli a2, a0, 4
-; RV64I-NEXT:    and a2, a2, a1
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI9_1)
-; RV64I-NEXT:    ld a1, %lo(.LCPI9_1)(a1)
-; RV64I-NEXT:    slli a0, a0, 4
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    srli a2, a0, 2
-; RV64I-NEXT:    and a2, a2, a1
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI9_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI9_2)(a1)
-; RV64I-NEXT:    slli a0, a0, 2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    srli a2, a0, 1
-; RV64I-NEXT:    and a2, a2, a1
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    slli a0, a0, 1
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_bswap_bitreverse_i64:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    srli a2, a0, 4
-; RV32ZBB-NEXT:    lui a3, 61681
-; RV32ZBB-NEXT:    addi a3, a3, -241
-; RV32ZBB-NEXT:    and a2, a2, a3
-; RV32ZBB-NEXT:    and a0, a0, a3
-; RV32ZBB-NEXT:    slli a0, a0, 4
-; RV32ZBB-NEXT:    or a0, a2, a0
-; RV32ZBB-NEXT:    srli a2, a0, 2
-; RV32ZBB-NEXT:    lui a4, 209715
-; RV32ZBB-NEXT:    addi a4, a4, 819
-; RV32ZBB-NEXT:    and a2, a2, a4
-; RV32ZBB-NEXT:    and a0, a0, a4
-; RV32ZBB-NEXT:    slli a0, a0, 2
-; RV32ZBB-NEXT:    or a0, a2, a0
-; RV32ZBB-NEXT:    srli a2, a0, 1
-; RV32ZBB-NEXT:    lui a5, 349525
-; RV32ZBB-NEXT:    addi a5, a5, 1365
-; RV32ZBB-NEXT:    and a2, a2, a5
-; RV32ZBB-NEXT:    and a0, a0, a5
-; RV32ZBB-NEXT:    slli a0, a0, 1
-; RV32ZBB-NEXT:    or a0, a2, a0
-; RV32ZBB-NEXT:    srli a2, a1, 4
-; RV32ZBB-NEXT:    and a2, a2, a3
-; RV32ZBB-NEXT:    and a1, a1, a3
-; RV32ZBB-NEXT:    slli a1, a1, 4
-; RV32ZBB-NEXT:    or a1, a2, a1
-; RV32ZBB-NEXT:    srli a2, a1, 2
-; RV32ZBB-NEXT:    and a2, a2, a4
-; RV32ZBB-NEXT:    and a1, a1, a4
-; RV32ZBB-NEXT:    slli a1, a1, 2
-; RV32ZBB-NEXT:    or a1, a2, a1
-; RV32ZBB-NEXT:    srli a2, a1, 1
-; RV32ZBB-NEXT:    and a2, a2, a5
-; RV32ZBB-NEXT:    and a1, a1, a5
-; RV32ZBB-NEXT:    slli a1, a1, 1
-; RV32ZBB-NEXT:    or a1, a2, a1
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_bswap_bitreverse_i64:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    lui a1, %hi(.LCPI9_0)
-; RV64ZBB-NEXT:    ld a1, %lo(.LCPI9_0)(a1)
-; RV64ZBB-NEXT:    srli a2, a0, 4
-; RV64ZBB-NEXT:    and a2, a2, a1
-; RV64ZBB-NEXT:    and a0, a0, a1
-; RV64ZBB-NEXT:    lui a1, %hi(.LCPI9_1)
-; RV64ZBB-NEXT:    ld a1, %lo(.LCPI9_1)(a1)
-; RV64ZBB-NEXT:    slli a0, a0, 4
-; RV64ZBB-NEXT:    or a0, a2, a0
-; RV64ZBB-NEXT:    srli a2, a0, 2
-; RV64ZBB-NEXT:    and a2, a2, a1
-; RV64ZBB-NEXT:    and a0, a0, a1
-; RV64ZBB-NEXT:    lui a1, %hi(.LCPI9_2)
-; RV64ZBB-NEXT:    ld a1, %lo(.LCPI9_2)(a1)
-; RV64ZBB-NEXT:    slli a0, a0, 2
-; RV64ZBB-NEXT:    or a0, a2, a0
-; RV64ZBB-NEXT:    srli a2, a0, 1
-; RV64ZBB-NEXT:    and a2, a2, a1
-; RV64ZBB-NEXT:    and a0, a0, a1
-; RV64ZBB-NEXT:    slli a0, a0, 1
-; RV64ZBB-NEXT:    or a0, a2, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i64 @llvm.bswap.i64(i64 %a)
-  %tmp2 = call i64 @llvm.bitreverse.i64(i64 %tmp)
-  ret i64 %tmp2
-}
-
-define i16 @test_bitreverse_bswap_i16(i16 %a) nounwind {
-; RV32I-LABEL: test_bitreverse_bswap_i16:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    lui a2, 1
-; RV32I-NEXT:    addi a2, a2, -241
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 4
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 2
-; RV32I-NEXT:    lui a2, 3
-; RV32I-NEXT:    addi a2, a2, 819
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 5
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 1
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_bitreverse_bswap_i16:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    lui a2, 1
-; RV64I-NEXT:    addiw a2, a2, -241
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slli a0, a0, 4
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 2
-; RV64I-NEXT:    lui a2, 3
-; RV64I-NEXT:    addiw a2, a2, 819
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slli a0, a0, 2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 5
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slli a0, a0, 1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_bitreverse_bswap_i16:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    srli a1, a0, 4
-; RV32ZBB-NEXT:    lui a2, 1
-; RV32ZBB-NEXT:    addi a2, a2, -241
-; RV32ZBB-NEXT:    and a1, a1, a2
-; RV32ZBB-NEXT:    and a0, a0, a2
-; RV32ZBB-NEXT:    slli a0, a0, 4
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    srli a1, a0, 2
-; RV32ZBB-NEXT:    lui a2, 3
-; RV32ZBB-NEXT:    addi a2, a2, 819
-; RV32ZBB-NEXT:    and a1, a1, a2
-; RV32ZBB-NEXT:    and a0, a0, a2
-; RV32ZBB-NEXT:    slli a0, a0, 2
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    srli a1, a0, 1
-; RV32ZBB-NEXT:    lui a2, 5
-; RV32ZBB-NEXT:    addi a2, a2, 1365
-; RV32ZBB-NEXT:    and a1, a1, a2
-; RV32ZBB-NEXT:    and a0, a0, a2
-; RV32ZBB-NEXT:    slli a0, a0, 1
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_bitreverse_bswap_i16:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    srli a1, a0, 4
-; RV64ZBB-NEXT:    lui a2, 1
-; RV64ZBB-NEXT:    addiw a2, a2, -241
-; RV64ZBB-NEXT:    and a1, a1, a2
-; RV64ZBB-NEXT:    and a0, a0, a2
-; RV64ZBB-NEXT:    slli a0, a0, 4
-; RV64ZBB-NEXT:    or a0, a1, a0
-; RV64ZBB-NEXT:    srli a1, a0, 2
-; RV64ZBB-NEXT:    lui a2, 3
-; RV64ZBB-NEXT:    addiw a2, a2, 819
-; RV64ZBB-NEXT:    and a1, a1, a2
-; RV64ZBB-NEXT:    and a0, a0, a2
-; RV64ZBB-NEXT:    slli a0, a0, 2
-; RV64ZBB-NEXT:    or a0, a1, a0
-; RV64ZBB-NEXT:    srli a1, a0, 1
-; RV64ZBB-NEXT:    lui a2, 5
-; RV64ZBB-NEXT:    addiw a2, a2, 1365
-; RV64ZBB-NEXT:    and a1, a1, a2
-; RV64ZBB-NEXT:    and a0, a0, a2
-; RV64ZBB-NEXT:    slli a0, a0, 1
-; RV64ZBB-NEXT:    or a0, a1, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i16 @llvm.bitreverse.i16(i16 %a)
-  %tmp2 = call i16 @llvm.bswap.i16(i16 %tmp)
-  ret i16 %tmp2
-}
-
-define i32 @test_bitreverse_bswap_i32(i32 %a) nounwind {
-; RV32I-LABEL: test_bitreverse_bswap_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    lui a2, 61681
-; RV32I-NEXT:    addi a2, a2, -241
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 4
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 2
-; RV32I-NEXT:    lui a2, 209715
-; RV32I-NEXT:    addi a2, a2, 819
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    slli a0, a0, 1
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_bitreverse_bswap_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    lui a2, 61681
-; RV64I-NEXT:    addiw a2, a2, -241
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slliw a0, a0, 4
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 2
-; RV64I-NEXT:    lui a2, 209715
-; RV64I-NEXT:    addiw a2, a2, 819
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slliw a0, a0, 2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    slliw a0, a0, 1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_bitreverse_bswap_i32:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    srli a1, a0, 4
-; RV32ZBB-NEXT:    lui a2, 61681
-; RV32ZBB-NEXT:    addi a2, a2, -241
-; RV32ZBB-NEXT:    and a1, a1, a2
-; RV32ZBB-NEXT:    and a0, a0, a2
-; RV32ZBB-NEXT:    slli a0, a0, 4
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    srli a1, a0, 2
-; RV32ZBB-NEXT:    lui a2, 209715
-; RV32ZBB-NEXT:    addi a2, a2, 819
-; RV32ZBB-NEXT:    and a1, a1, a2
-; RV32ZBB-NEXT:    and a0, a0, a2
-; RV32ZBB-NEXT:    slli a0, a0, 2
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    srli a1, a0, 1
-; RV32ZBB-NEXT:    lui a2, 349525
-; RV32ZBB-NEXT:    addi a2, a2, 1365
-; RV32ZBB-NEXT:    and a1, a1, a2
-; RV32ZBB-NEXT:    and a0, a0, a2
-; RV32ZBB-NEXT:    slli a0, a0, 1
-; RV32ZBB-NEXT:    or a0, a1, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_bitreverse_bswap_i32:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    srli a1, a0, 4
-; RV64ZBB-NEXT:    lui a2, 61681
-; RV64ZBB-NEXT:    addiw a2, a2, -241
-; RV64ZBB-NEXT:    and a1, a1, a2
-; RV64ZBB-NEXT:    and a0, a0, a2
-; RV64ZBB-NEXT:    slliw a0, a0, 4
-; RV64ZBB-NEXT:    or a0, a1, a0
-; RV64ZBB-NEXT:    srli a1, a0, 2
-; RV64ZBB-NEXT:    lui a2, 209715
-; RV64ZBB-NEXT:    addiw a2, a2, 819
-; RV64ZBB-NEXT:    and a1, a1, a2
-; RV64ZBB-NEXT:    and a0, a0, a2
-; RV64ZBB-NEXT:    slliw a0, a0, 2
-; RV64ZBB-NEXT:    or a0, a1, a0
-; RV64ZBB-NEXT:    srli a1, a0, 1
-; RV64ZBB-NEXT:    lui a2, 349525
-; RV64ZBB-NEXT:    addiw a2, a2, 1365
-; RV64ZBB-NEXT:    and a1, a1, a2
-; RV64ZBB-NEXT:    and a0, a0, a2
-; RV64ZBB-NEXT:    slliw a0, a0, 1
-; RV64ZBB-NEXT:    or a0, a1, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i32 @llvm.bitreverse.i32(i32 %a)
-  %tmp2 = call i32 @llvm.bswap.i32(i32 %tmp)
-  ret i32 %tmp2
-}
-
-define i64 @test_bitreverse_bswap_i64(i64 %a) nounwind {
-; RV32I-LABEL: test_bitreverse_bswap_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a2, a0, 4
-; RV32I-NEXT:    lui a3, 61681
-; RV32I-NEXT:    addi a3, a3, -241
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    and a0, a0, a3
-; RV32I-NEXT:    slli a0, a0, 4
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    srli a2, a0, 2
-; RV32I-NEXT:    lui a4, 209715
-; RV32I-NEXT:    addi a4, a4, 819
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    and a0, a0, a4
-; RV32I-NEXT:    slli a0, a0, 2
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    srli a2, a0, 1
-; RV32I-NEXT:    lui a5, 349525
-; RV32I-NEXT:    addi a5, a5, 1365
-; RV32I-NEXT:    and a2, a2, a5
-; RV32I-NEXT:    and a0, a0, a5
-; RV32I-NEXT:    slli a0, a0, 1
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    srli a2, a1, 4
-; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    and a1, a1, a3
-; RV32I-NEXT:    slli a1, a1, 4
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    srli a2, a1, 2
-; RV32I-NEXT:    and a2, a2, a4
-; RV32I-NEXT:    and a1, a1, a4
-; RV32I-NEXT:    slli a1, a1, 2
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    srli a2, a1, 1
-; RV32I-NEXT:    and a2, a2, a5
-; RV32I-NEXT:    and a1, a1, a5
-; RV32I-NEXT:    slli a1, a1, 1
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_bitreverse_bswap_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, %hi(.LCPI12_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI12_0)(a1)
-; RV64I-NEXT:    srli a2, a0, 4
-; RV64I-NEXT:    and a2, a2, a1
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI12_1)
-; RV64I-NEXT:    ld a1, %lo(.LCPI12_1)(a1)
-; RV64I-NEXT:    slli a0, a0, 4
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    srli a2, a0, 2
-; RV64I-NEXT:    and a2, a2, a1
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI12_2)
-; RV64I-NEXT:    ld a1, %lo(.LCPI12_2)(a1)
-; RV64I-NEXT:    slli a0, a0, 2
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    srli a2, a0, 1
-; RV64I-NEXT:    and a2, a2, a1
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    slli a0, a0, 1
-; RV64I-NEXT:    or a0, a2, a0
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_bitreverse_bswap_i64:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    srli a2, a0, 4
-; RV32ZBB-NEXT:    lui a3, 61681
-; RV32ZBB-NEXT:    addi a3, a3, -241
-; RV32ZBB-NEXT:    and a2, a2, a3
-; RV32ZBB-NEXT:    and a0, a0, a3
-; RV32ZBB-NEXT:    slli a0, a0, 4
-; RV32ZBB-NEXT:    or a0, a2, a0
-; RV32ZBB-NEXT:    srli a2, a0, 2
-; RV32ZBB-NEXT:    lui a4, 209715
-; RV32ZBB-NEXT:    addi a4, a4, 819
-; RV32ZBB-NEXT:    and a2, a2, a4
-; RV32ZBB-NEXT:    and a0, a0, a4
-; RV32ZBB-NEXT:    slli a0, a0, 2
-; RV32ZBB-NEXT:    or a0, a2, a0
-; RV32ZBB-NEXT:    srli a2, a0, 1
-; RV32ZBB-NEXT:    lui a5, 349525
-; RV32ZBB-NEXT:    addi a5, a5, 1365
-; RV32ZBB-NEXT:    and a2, a2, a5
-; RV32ZBB-NEXT:    and a0, a0, a5
-; RV32ZBB-NEXT:    slli a0, a0, 1
-; RV32ZBB-NEXT:    or a0, a2, a0
-; RV32ZBB-NEXT:    srli a2, a1, 4
-; RV32ZBB-NEXT:    and a2, a2, a3
-; RV32ZBB-NEXT:    and a1, a1, a3
-; RV32ZBB-NEXT:    slli a1, a1, 4
-; RV32ZBB-NEXT:    or a1, a2, a1
-; RV32ZBB-NEXT:    srli a2, a1, 2
-; RV32ZBB-NEXT:    and a2, a2, a4
-; RV32ZBB-NEXT:    and a1, a1, a4
-; RV32ZBB-NEXT:    slli a1, a1, 2
-; RV32ZBB-NEXT:    or a1, a2, a1
-; RV32ZBB-NEXT:    srli a2, a1, 1
-; RV32ZBB-NEXT:    and a2, a2, a5
-; RV32ZBB-NEXT:    and a1, a1, a5
-; RV32ZBB-NEXT:    slli a1, a1, 1
-; RV32ZBB-NEXT:    or a1, a2, a1
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_bitreverse_bswap_i64:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    lui a1, %hi(.LCPI12_0)
-; RV64ZBB-NEXT:    ld a1, %lo(.LCPI12_0)(a1)
-; RV64ZBB-NEXT:    srli a2, a0, 4
-; RV64ZBB-NEXT:    and a2, a2, a1
-; RV64ZBB-NEXT:    and a0, a0, a1
-; RV64ZBB-NEXT:    lui a1, %hi(.LCPI12_1)
-; RV64ZBB-NEXT:    ld a1, %lo(.LCPI12_1)(a1)
-; RV64ZBB-NEXT:    slli a0, a0, 4
-; RV64ZBB-NEXT:    or a0, a2, a0
-; RV64ZBB-NEXT:    srli a2, a0, 2
-; RV64ZBB-NEXT:    and a2, a2, a1
-; RV64ZBB-NEXT:    and a0, a0, a1
-; RV64ZBB-NEXT:    lui a1, %hi(.LCPI12_2)
-; RV64ZBB-NEXT:    ld a1, %lo(.LCPI12_2)(a1)
-; RV64ZBB-NEXT:    slli a0, a0, 2
-; RV64ZBB-NEXT:    or a0, a2, a0
-; RV64ZBB-NEXT:    srli a2, a0, 1
-; RV64ZBB-NEXT:    and a2, a2, a1
-; RV64ZBB-NEXT:    and a0, a0, a1
-; RV64ZBB-NEXT:    slli a0, a0, 1
-; RV64ZBB-NEXT:    or a0, a2, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i64 @llvm.bitreverse.i64(i64 %a)
-  %tmp2 = call i64 @llvm.bswap.i64(i64 %tmp)
-  ret i64 %tmp2
-}
-
-define i8 @test_cttz_i8(i8 %a) nounwind {
-; RV32I-LABEL: test_cttz_i8:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    andi a1, a0, 255
-; RV32I-NEXT:    beqz a1, .LBB13_2
-; RV32I-NEXT:  # %bb.1: # %cond.false
-; RV32I-NEXT:    addi a1, a0, -1
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    andi a1, a1, 85
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    andi a1, a0, 51
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    andi a0, a0, 51
-; RV32I-NEXT:    add a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    andi a0, a0, 15
-; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB13_2:
-; RV32I-NEXT:    li a0, 8
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_cttz_i8:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    andi a1, a0, 255
-; RV64I-NEXT:    beqz a1, .LBB13_2
-; RV64I-NEXT:  # %bb.1: # %cond.false
-; RV64I-NEXT:    addi a1, a0, -1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    andi a1, a1, 85
-; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    andi a1, a0, 51
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    andi a0, a0, 51
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    addw a0, a0, a1
-; RV64I-NEXT:    andi a0, a0, 15
-; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB13_2:
-; RV64I-NEXT:    li a0, 8
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_cttz_i8:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    ori a0, a0, 256
-; RV32ZBB-NEXT:    ctz a0, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_cttz_i8:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    ori a0, a0, 256
-; RV64ZBB-NEXT:    ctz a0, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 false)
-  ret i8 %tmp
-}
-
-define i16 @test_cttz_i16(i16 %a) nounwind {
-; RV32I-LABEL: test_cttz_i16:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a1, a0, 16
-; RV32I-NEXT:    srli a1, a1, 16
-; RV32I-NEXT:    beqz a1, .LBB14_2
-; RV32I-NEXT:  # %bb.1: # %cond.false
-; RV32I-NEXT:    addi a1, a0, -1
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 5
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    lui a1, 3
-; RV32I-NEXT:    addi a1, a1, 819
-; RV32I-NEXT:    and a2, a0, a1
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    add a0, a2, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    lui a1, 1
-; RV32I-NEXT:    addi a1, a1, -241
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    slli a1, a0, 8
-; RV32I-NEXT:    add a0, a1, a0
-; RV32I-NEXT:    slli a0, a0, 19
-; RV32I-NEXT:    srli a0, a0, 27
-; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB14_2:
-; RV32I-NEXT:    li a0, 16
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_cttz_i16:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 48
-; RV64I-NEXT:    srli a1, a1, 48
-; RV64I-NEXT:    beqz a1, .LBB14_2
-; RV64I-NEXT:  # %bb.1: # %cond.false
-; RV64I-NEXT:    addi a1, a0, -1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 5
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    lui a1, 3
-; RV64I-NEXT:    addiw a1, a1, 819
-; RV64I-NEXT:    and a2, a0, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    add a0, a2, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    lui a1, 1
-; RV64I-NEXT:    addiw a1, a1, -241
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    slliw a1, a0, 8
-; RV64I-NEXT:    addw a0, a1, a0
-; RV64I-NEXT:    slli a0, a0, 51
-; RV64I-NEXT:    srli a0, a0, 59
-; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB14_2:
-; RV64I-NEXT:    li a0, 16
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_cttz_i16:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    lui a1, 16
-; RV32ZBB-NEXT:    or a0, a0, a1
-; RV32ZBB-NEXT:    ctz a0, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_cttz_i16:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    lui a1, 16
-; RV64ZBB-NEXT:    or a0, a0, a1
-; RV64ZBB-NEXT:    ctz a0, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 false)
-  ret i16 %tmp
-}
-
-define i32 @test_cttz_i32(i32 %a) nounwind {
-; RV32I-LABEL: test_cttz_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a0, .LBB15_2
-; RV32I-NEXT:  # %bb.1: # %cond.false
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, a0, -1
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    lui a1, 209715
-; RV32I-NEXT:    addi a1, a1, 819
-; RV32I-NEXT:    and a2, a0, a1
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    add a0, a2, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    lui a1, 61681
-; RV32I-NEXT:    addi a1, a1, -241
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    lui a1, 4112
-; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB15_2:
-; RV32I-NEXT:    li a0, 32
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_cttz_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a1, a0
-; RV64I-NEXT:    beqz a1, .LBB15_2
-; RV64I-NEXT:  # %bb.1: # %cond.false
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addiw a1, a0, -1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    subw a0, a0, a1
-; RV64I-NEXT:    lui a1, 209715
-; RV64I-NEXT:    addiw a1, a1, 819
-; RV64I-NEXT:    and a2, a0, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    add a0, a2, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    lui a1, 61681
-; RV64I-NEXT:    addiw a1, a1, -241
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 4112
-; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
-; RV64I-NEXT:    srliw a0, a0, 24
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB15_2:
-; RV64I-NEXT:    li a0, 32
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_cttz_i32:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    ctz a0, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_cttz_i32:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    ctzw a0, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false)
-  ret i32 %tmp
-}
-
-define i32 @test_ctlz_i32(i32 %a) nounwind {
-; RV32I-LABEL: test_ctlz_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a0, .LBB16_2
-; RV32I-NEXT:  # %bb.1: # %cond.false
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 2
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 8
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 16
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    lui a1, 209715
-; RV32I-NEXT:    addi a1, a1, 819
-; RV32I-NEXT:    and a2, a0, a1
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    add a0, a2, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    lui a1, 61681
-; RV32I-NEXT:    addi a1, a1, -241
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    lui a1, 4112
-; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB16_2:
-; RV32I-NEXT:    li a0, 32
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_ctlz_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a1, a0
-; RV64I-NEXT:    beqz a1, .LBB16_2
-; RV64I-NEXT:  # %bb.1: # %cond.false
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    srliw a1, a0, 1
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    srliw a1, a0, 2
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    srliw a1, a0, 4
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    srliw a1, a0, 8
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    srliw a1, a0, 16
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    subw a0, a0, a1
-; RV64I-NEXT:    lui a1, 209715
-; RV64I-NEXT:    addiw a1, a1, 819
-; RV64I-NEXT:    and a2, a0, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    add a0, a2, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    lui a1, 61681
-; RV64I-NEXT:    addiw a1, a1, -241
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 4112
-; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
-; RV64I-NEXT:    srliw a0, a0, 24
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB16_2:
-; RV64I-NEXT:    li a0, 32
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_ctlz_i32:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    clz a0, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_ctlz_i32:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    clzw a0, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false)
-  ret i32 %tmp
-}
-
-define i64 @test_cttz_i64(i64 %a) nounwind {
-; RV32I-LABEL: test_cttz_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    mv s2, a0
-; RV32I-NEXT:    addi a0, a0, -1
-; RV32I-NEXT:    not a1, s2
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi s4, a2, 1365
-; RV32I-NEXT:    and a1, a1, s4
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    lui a1, 209715
-; RV32I-NEXT:    addi s5, a1, 819
-; RV32I-NEXT:    and a1, a0, s5
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, s5
-; RV32I-NEXT:    add a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    lui a1, 61681
-; RV32I-NEXT:    addi s6, a1, -241
-; RV32I-NEXT:    and a0, a0, s6
-; RV32I-NEXT:    lui a1, 4112
-; RV32I-NEXT:    addi s3, a1, 257
-; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    addi a0, s1, -1
-; RV32I-NEXT:    not a1, s1
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    and a1, a1, s4
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    and a1, a0, s5
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, s5
-; RV32I-NEXT:    add a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    and a0, a0, s6
-; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    bnez s2, .LBB17_2
-; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    addi a0, a0, 32
-; RV32I-NEXT:    j .LBB17_3
-; RV32I-NEXT:  .LBB17_2:
-; RV32I-NEXT:    srli a0, s0, 24
-; RV32I-NEXT:  .LBB17_3:
-; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 32
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_cttz_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a0, .LBB17_2
-; RV64I-NEXT:  # %bb.1: # %cond.false
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a1, a0, -1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI17_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI17_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI17_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI17_1)(a2)
-; RV64I-NEXT:    srli a3, a0, 1
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    and a1, a0, a2
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a2, %hi(.LCPI17_2)
-; RV64I-NEXT:    ld a2, %lo(.LCPI17_2)(a2)
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a1, %hi(.LCPI17_3)
-; RV64I-NEXT:    ld a1, %lo(.LCPI17_3)(a1)
-; RV64I-NEXT:    call __muldi3 at plt
-; RV64I-NEXT:    srli a0, a0, 56
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB17_2:
-; RV64I-NEXT:    li a0, 64
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_cttz_i64:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    bnez a0, .LBB17_2
-; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    ctz a0, a1
-; RV32ZBB-NEXT:    addi a0, a0, 32
-; RV32ZBB-NEXT:    li a1, 0
-; RV32ZBB-NEXT:    ret
-; RV32ZBB-NEXT:  .LBB17_2:
-; RV32ZBB-NEXT:    ctz a0, a0
-; RV32ZBB-NEXT:    li a1, 0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_cttz_i64:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    ctz a0, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 false)
-  ret i64 %tmp
-}
-
-define i8 @test_cttz_i8_zero_undef(i8 %a) nounwind {
-; RV32I-LABEL: test_cttz_i8_zero_undef:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, a0, -1
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    andi a1, a1, 85
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    andi a1, a0, 51
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    andi a0, a0, 51
-; RV32I-NEXT:    add a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    andi a0, a0, 15
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_cttz_i8_zero_undef:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, a0, -1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    andi a1, a1, 85
-; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    andi a1, a0, 51
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    andi a0, a0, 51
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    addw a0, a0, a1
-; RV64I-NEXT:    andi a0, a0, 15
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_cttz_i8_zero_undef:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    ctz a0, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_cttz_i8_zero_undef:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    ctz a0, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 true)
-  ret i8 %tmp
-}
-
-define i16 @test_cttz_i16_zero_undef(i16 %a) nounwind {
-; RV32I-LABEL: test_cttz_i16_zero_undef:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, a0, -1
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 5
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    lui a1, 3
-; RV32I-NEXT:    addi a1, a1, 819
-; RV32I-NEXT:    and a2, a0, a1
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    add a0, a2, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    lui a1, 1
-; RV32I-NEXT:    addi a1, a1, -241
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    slli a1, a0, 8
-; RV32I-NEXT:    add a0, a1, a0
-; RV32I-NEXT:    slli a0, a0, 19
-; RV32I-NEXT:    srli a0, a0, 27
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_cttz_i16_zero_undef:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, a0, -1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 5
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    lui a1, 3
-; RV64I-NEXT:    addiw a1, a1, 819
-; RV64I-NEXT:    and a2, a0, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    add a0, a2, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    lui a1, 1
-; RV64I-NEXT:    addiw a1, a1, -241
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    slliw a1, a0, 8
-; RV64I-NEXT:    addw a0, a1, a0
-; RV64I-NEXT:    slli a0, a0, 51
-; RV64I-NEXT:    srli a0, a0, 59
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_cttz_i16_zero_undef:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    ctz a0, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_cttz_i16_zero_undef:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    ctz a0, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 true)
-  ret i16 %tmp
-}
-
-define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
-; RV32I-LABEL: test_cttz_i32_zero_undef:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, a0, -1
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    lui a1, 209715
-; RV32I-NEXT:    addi a1, a1, 819
-; RV32I-NEXT:    and a2, a0, a1
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    add a0, a2, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    lui a1, 61681
-; RV32I-NEXT:    addi a1, a1, -241
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    lui a1, 4112
-; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_cttz_i32_zero_undef:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addiw a1, a0, -1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    subw a0, a0, a1
-; RV64I-NEXT:    lui a1, 209715
-; RV64I-NEXT:    addiw a1, a1, 819
-; RV64I-NEXT:    and a2, a0, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    add a0, a2, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    lui a1, 61681
-; RV64I-NEXT:    addiw a1, a1, -241
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 4112
-; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
-; RV64I-NEXT:    srliw a0, a0, 24
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_cttz_i32_zero_undef:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    ctz a0, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_cttz_i32_zero_undef:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    ctzw a0, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 true)
-  ret i32 %tmp
-}
-
-define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
-; RV32I-LABEL: test_cttz_i64_zero_undef:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    mv s2, a0
-; RV32I-NEXT:    addi a0, a0, -1
-; RV32I-NEXT:    not a1, s2
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi s4, a2, 1365
-; RV32I-NEXT:    and a1, a1, s4
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    lui a1, 209715
-; RV32I-NEXT:    addi s5, a1, 819
-; RV32I-NEXT:    and a1, a0, s5
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, s5
-; RV32I-NEXT:    add a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    lui a1, 61681
-; RV32I-NEXT:    addi s6, a1, -241
-; RV32I-NEXT:    and a0, a0, s6
-; RV32I-NEXT:    lui a1, 4112
-; RV32I-NEXT:    addi s3, a1, 257
-; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    addi a0, s1, -1
-; RV32I-NEXT:    not a1, s1
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    and a1, a1, s4
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    and a1, a0, s5
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, s5
-; RV32I-NEXT:    add a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    and a0, a0, s6
-; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    bnez s2, .LBB21_2
-; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    addi a0, a0, 32
-; RV32I-NEXT:    j .LBB21_3
-; RV32I-NEXT:  .LBB21_2:
-; RV32I-NEXT:    srli a0, s0, 24
-; RV32I-NEXT:  .LBB21_3:
-; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 32
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_cttz_i64_zero_undef:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a1, a0, -1
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, %hi(.LCPI21_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI21_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI21_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI21_1)(a2)
-; RV64I-NEXT:    srli a3, a0, 1
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    and a1, a0, a2
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a2, %hi(.LCPI21_2)
-; RV64I-NEXT:    ld a2, %lo(.LCPI21_2)(a2)
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a1, %hi(.LCPI21_3)
-; RV64I-NEXT:    ld a1, %lo(.LCPI21_3)(a1)
-; RV64I-NEXT:    call __muldi3 at plt
-; RV64I-NEXT:    srli a0, a0, 56
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_cttz_i64_zero_undef:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    bnez a0, .LBB21_2
-; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    ctz a0, a1
-; RV32ZBB-NEXT:    addi a0, a0, 32
-; RV32ZBB-NEXT:    li a1, 0
-; RV32ZBB-NEXT:    ret
-; RV32ZBB-NEXT:  .LBB21_2:
-; RV32ZBB-NEXT:    ctz a0, a0
-; RV32ZBB-NEXT:    li a1, 0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_cttz_i64_zero_undef:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    ctz a0, a0
-; RV64ZBB-NEXT:    ret
-  %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 true)
-  ret i64 %tmp
-}
-
-define i32 @test_ctpop_i32(i32 %a) nounwind {
-; RV32I-LABEL: test_ctpop_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi a2, a2, 1365
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    lui a1, 209715
-; RV32I-NEXT:    addi a1, a1, 819
-; RV32I-NEXT:    and a2, a0, a1
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    add a0, a2, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    lui a1, 61681
-; RV32I-NEXT:    addi a1, a1, -241
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    lui a1, 4112
-; RV32I-NEXT:    addi a1, a1, 257
-; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_ctpop_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    lui a2, 349525
-; RV64I-NEXT:    addiw a2, a2, 1365
-; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    subw a0, a0, a1
-; RV64I-NEXT:    lui a1, 209715
-; RV64I-NEXT:    addiw a1, a1, 819
-; RV64I-NEXT:    and a2, a0, a1
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    add a0, a2, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    lui a1, 61681
-; RV64I-NEXT:    addiw a1, a1, -241
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 4112
-; RV64I-NEXT:    addiw a1, a1, 257
-; RV64I-NEXT:    call __muldi3 at plt
-; RV64I-NEXT:    srliw a0, a0, 24
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_ctpop_i32:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    cpop a0, a0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_ctpop_i32:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    cpopw a0, a0
-; RV64ZBB-NEXT:    ret
-  %1 = call i32 @llvm.ctpop.i32(i32 %a)
-  ret i32 %1
-}
-
-define i64 @test_ctpop_i64(i64 %a) nounwind {
-; RV32I-LABEL: test_ctpop_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -32
-; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    srli a0, a1, 1
-; RV32I-NEXT:    lui a2, 349525
-; RV32I-NEXT:    addi s2, a2, 1365
-; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    sub a0, a1, a0
-; RV32I-NEXT:    lui a1, 209715
-; RV32I-NEXT:    addi s3, a1, 819
-; RV32I-NEXT:    and a1, a0, s3
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    add a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    lui a1, 61681
-; RV32I-NEXT:    addi s4, a1, -241
-; RV32I-NEXT:    and a0, a0, s4
-; RV32I-NEXT:    lui a1, 4112
-; RV32I-NEXT:    addi s1, a1, 257
-; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    srli s5, a0, 24
-; RV32I-NEXT:    srli a0, s0, 1
-; RV32I-NEXT:    and a0, a0, s2
-; RV32I-NEXT:    sub a0, s0, a0
-; RV32I-NEXT:    and a1, a0, s3
-; RV32I-NEXT:    srli a0, a0, 2
-; RV32I-NEXT:    and a0, a0, s3
-; RV32I-NEXT:    add a0, a1, a0
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    and a0, a0, s4
-; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    call __mulsi3 at plt
-; RV32I-NEXT:    srli a0, a0, 24
-; RV32I-NEXT:    add a0, a0, s5
-; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 32
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_ctpop_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, %hi(.LCPI23_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI23_0)(a1)
-; RV64I-NEXT:    lui a2, %hi(.LCPI23_1)
-; RV64I-NEXT:    ld a2, %lo(.LCPI23_1)(a2)
-; RV64I-NEXT:    srli a3, a0, 1
-; RV64I-NEXT:    and a1, a3, a1
-; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    and a1, a0, a2
-; RV64I-NEXT:    srli a0, a0, 2
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a2, %hi(.LCPI23_2)
-; RV64I-NEXT:    ld a2, %lo(.LCPI23_2)(a2)
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    lui a1, %hi(.LCPI23_3)
-; RV64I-NEXT:    ld a1, %lo(.LCPI23_3)(a1)
-; RV64I-NEXT:    call __muldi3 at plt
-; RV64I-NEXT:    srli a0, a0, 56
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_ctpop_i64:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    cpop a1, a1
-; RV32ZBB-NEXT:    cpop a0, a0
-; RV32ZBB-NEXT:    add a0, a0, a1
-; RV32ZBB-NEXT:    li a1, 0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_ctpop_i64:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    cpop a0, a0
-; RV64ZBB-NEXT:    ret
-  %1 = call i64 @llvm.ctpop.i64(i64 %a)
-  ret i64 %1
-}
-
-define i32 @test_parity_i32(i32 %a) {
-; RV32I-LABEL: test_parity_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a1, a0, 16
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 8
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 2
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    andi a0, a0, 1
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_parity_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 32
-; RV64I-NEXT:    srli a1, a1, 32
-; RV64I-NEXT:    srliw a0, a0, 16
-; RV64I-NEXT:    xor a0, a1, a0
-; RV64I-NEXT:    srli a1, a0, 8
-; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 2
-; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    andi a0, a0, 1
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_parity_i32:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    cpop a0, a0
-; RV32ZBB-NEXT:    andi a0, a0, 1
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_parity_i32:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    cpopw a0, a0
-; RV64ZBB-NEXT:    andi a0, a0, 1
-; RV64ZBB-NEXT:    ret
-  %1 = call i32 @llvm.ctpop.i32(i32 %a)
-  %2 = and i32 %1, 1
-  ret i32 %2
-}
-
-define i64 @test_parity_i64(i64 %a) {
-; RV32I-LABEL: test_parity_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 16
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 8
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 4
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 2
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    srli a1, a0, 1
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    andi a0, a0, 1
-; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: test_parity_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a1, a0, 32
-; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 16
-; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 8
-; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 4
-; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 2
-; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    srli a1, a0, 1
-; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    andi a0, a0, 1
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: test_parity_i64:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    xor a0, a0, a1
-; RV32ZBB-NEXT:    cpop a0, a0
-; RV32ZBB-NEXT:    andi a0, a0, 1
-; RV32ZBB-NEXT:    li a1, 0
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: test_parity_i64:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    cpop a0, a0
-; RV64ZBB-NEXT:    andi a0, a0, 1
-; RV64ZBB-NEXT:    ret
-  %1 = call i64 @llvm.ctpop.i64(i64 %a)
-  %2 = and i64 %1, 1
-  ret i64 %2
-}

diff  --git a/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll b/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll
new file mode 100644
index 000000000000..c6c98485cfc7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll
@@ -0,0 +1,1379 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64I
+; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV32ZB
+; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV64ZB
+; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV32ZB
+; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefixes=RV64ZB
+
+declare i16 @llvm.bswap.i16(i16)
+declare i32 @llvm.bswap.i32(i32)
+declare i64 @llvm.bswap.i64(i64)
+declare i8 @llvm.bitreverse.i8(i8)
+declare i16 @llvm.bitreverse.i16(i16)
+declare i32 @llvm.bitreverse.i32(i32)
+declare i64 @llvm.bitreverse.i64(i64)
+
+define i16 @test_bswap_i16(i16 %a) nounwind {
+; RV32I-LABEL: test_bswap_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 8
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_bswap_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 8
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 56
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZB-LABEL: test_bswap_i16:
+; RV32ZB:       # %bb.0:
+; RV32ZB-NEXT:    rev8 a0, a0
+; RV32ZB-NEXT:    srli a0, a0, 16
+; RV32ZB-NEXT:    ret
+;
+; RV64ZB-LABEL: test_bswap_i16:
+; RV64ZB:       # %bb.0:
+; RV64ZB-NEXT:    rev8 a0, a0
+; RV64ZB-NEXT:    srli a0, a0, 48
+; RV64ZB-NEXT:    ret
+  %tmp = call i16 @llvm.bswap.i16(i16 %a)
+  ret i16 %tmp
+}
+
+define i32 @test_bswap_i32(i32 %a) nounwind {
+; RV32I-LABEL: test_bswap_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a1, a0, 8
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    addi a2, a2, -256
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 24
+; RV32I-NEXT:    or a1, a1, a2
+; RV32I-NEXT:    slli a2, a0, 8
+; RV32I-NEXT:    lui a3, 4080
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_bswap_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srliw a1, a0, 8
+; RV64I-NEXT:    lui a2, 16
+; RV64I-NEXT:    addiw a2, a2, -256
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srliw a2, a0, 24
+; RV64I-NEXT:    or a1, a1, a2
+; RV64I-NEXT:    slli a2, a0, 8
+; RV64I-NEXT:    lui a3, 4080
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    slliw a0, a0, 24
+; RV64I-NEXT:    or a0, a0, a2
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZB-LABEL: test_bswap_i32:
+; RV32ZB:       # %bb.0:
+; RV32ZB-NEXT:    rev8 a0, a0
+; RV32ZB-NEXT:    ret
+;
+; RV64ZB-LABEL: test_bswap_i32:
+; RV64ZB:       # %bb.0:
+; RV64ZB-NEXT:    rev8 a0, a0
+; RV64ZB-NEXT:    srli a0, a0, 32
+; RV64ZB-NEXT:    ret
+  %tmp = call i32 @llvm.bswap.i32(i32 %a)
+  ret i32 %tmp
+}
+
+define i64 @test_bswap_i64(i64 %a) nounwind {
+; RV32I-LABEL: test_bswap_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a2, a1, 8
+; RV32I-NEXT:    lui a3, 16
+; RV32I-NEXT:    addi a3, a3, -256
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    srli a4, a1, 24
+; RV32I-NEXT:    or a2, a2, a4
+; RV32I-NEXT:    slli a4, a1, 8
+; RV32I-NEXT:    lui a5, 4080
+; RV32I-NEXT:    and a4, a4, a5
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    or a1, a1, a4
+; RV32I-NEXT:    or a2, a1, a2
+; RV32I-NEXT:    srli a1, a0, 8
+; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    srli a3, a0, 24
+; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    slli a3, a0, 8
+; RV32I-NEXT:    and a3, a3, a5
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    or a0, a0, a3
+; RV32I-NEXT:    or a1, a0, a1
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_bswap_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a1, a0, 24
+; RV64I-NEXT:    lui a2, 4080
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 8
+; RV64I-NEXT:    li a3, 255
+; RV64I-NEXT:    slli a4, a3, 24
+; RV64I-NEXT:    and a2, a2, a4
+; RV64I-NEXT:    or a1, a2, a1
+; RV64I-NEXT:    srli a2, a0, 40
+; RV64I-NEXT:    lui a4, 16
+; RV64I-NEXT:    addiw a4, a4, -256
+; RV64I-NEXT:    and a2, a2, a4
+; RV64I-NEXT:    srli a4, a0, 56
+; RV64I-NEXT:    or a2, a2, a4
+; RV64I-NEXT:    or a1, a1, a2
+; RV64I-NEXT:    slli a2, a0, 24
+; RV64I-NEXT:    slli a4, a3, 40
+; RV64I-NEXT:    and a2, a2, a4
+; RV64I-NEXT:    srliw a4, a0, 24
+; RV64I-NEXT:    slli a4, a4, 32
+; RV64I-NEXT:    or a2, a2, a4
+; RV64I-NEXT:    slli a4, a0, 40
+; RV64I-NEXT:    slli a3, a3, 48
+; RV64I-NEXT:    and a3, a4, a3
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    or a0, a0, a3
+; RV64I-NEXT:    or a0, a0, a2
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZB-LABEL: test_bswap_i64:
+; RV32ZB:       # %bb.0:
+; RV32ZB-NEXT:    rev8 a2, a1
+; RV32ZB-NEXT:    rev8 a1, a0
+; RV32ZB-NEXT:    mv a0, a2
+; RV32ZB-NEXT:    ret
+;
+; RV64ZB-LABEL: test_bswap_i64:
+; RV64ZB:       # %bb.0:
+; RV64ZB-NEXT:    rev8 a0, a0
+; RV64ZB-NEXT:    ret
+  %tmp = call i64 @llvm.bswap.i64(i64 %a)
+  ret i64 %tmp
+}
+
+define i8 @test_bitreverse_i8(i8 %a) nounwind {
+; RV32I-LABEL: test_bitreverse_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a1, a0, 15
+; RV32I-NEXT:    slli a1, a1, 4
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srli a0, a0, 28
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    andi a1, a0, 51
+; RV32I-NEXT:    slli a1, a1, 2
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    andi a0, a0, 51
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    andi a1, a0, 85
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    andi a0, a0, 85
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_bitreverse_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a1, a0, 15
+; RV64I-NEXT:    slli a1, a1, 4
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srli a0, a0, 60
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    andi a1, a0, 51
+; RV64I-NEXT:    slli a1, a1, 2
+; RV64I-NEXT:    srli a0, a0, 2
+; RV64I-NEXT:    andi a0, a0, 51
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    andi a1, a0, 85
+; RV64I-NEXT:    slli a1, a1, 1
+; RV64I-NEXT:    srli a0, a0, 1
+; RV64I-NEXT:    andi a0, a0, 85
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZB-LABEL: test_bitreverse_i8:
+; RV32ZB:       # %bb.0:
+; RV32ZB-NEXT:    andi a1, a0, 15
+; RV32ZB-NEXT:    slli a1, a1, 4
+; RV32ZB-NEXT:    slli a0, a0, 24
+; RV32ZB-NEXT:    srli a0, a0, 28
+; RV32ZB-NEXT:    or a0, a0, a1
+; RV32ZB-NEXT:    andi a1, a0, 51
+; RV32ZB-NEXT:    slli a1, a1, 2
+; RV32ZB-NEXT:    srli a0, a0, 2
+; RV32ZB-NEXT:    andi a0, a0, 51
+; RV32ZB-NEXT:    or a0, a0, a1
+; RV32ZB-NEXT:    andi a1, a0, 85
+; RV32ZB-NEXT:    slli a1, a1, 1
+; RV32ZB-NEXT:    srli a0, a0, 1
+; RV32ZB-NEXT:    andi a0, a0, 85
+; RV32ZB-NEXT:    or a0, a0, a1
+; RV32ZB-NEXT:    ret
+;
+; RV64ZB-LABEL: test_bitreverse_i8:
+; RV64ZB:       # %bb.0:
+; RV64ZB-NEXT:    andi a1, a0, 15
+; RV64ZB-NEXT:    slli a1, a1, 4
+; RV64ZB-NEXT:    slli a0, a0, 56
+; RV64ZB-NEXT:    srli a0, a0, 60
+; RV64ZB-NEXT:    or a0, a0, a1
+; RV64ZB-NEXT:    andi a1, a0, 51
+; RV64ZB-NEXT:    slli a1, a1, 2
+; RV64ZB-NEXT:    srli a0, a0, 2
+; RV64ZB-NEXT:    andi a0, a0, 51
+; RV64ZB-NEXT:    or a0, a0, a1
+; RV64ZB-NEXT:    andi a1, a0, 85
+; RV64ZB-NEXT:    slli a1, a1, 1
+; RV64ZB-NEXT:    srli a0, a0, 1
+; RV64ZB-NEXT:    andi a0, a0, 85
+; RV64ZB-NEXT:    or a0, a0, a1
+; RV64ZB-NEXT:    ret
+  %tmp = call i8 @llvm.bitreverse.i8(i8 %a)
+  ret i8 %tmp
+}
+
+define i16 @test_bitreverse_i16(i16 %a) nounwind {
+; RV32I-LABEL: test_bitreverse_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 8
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    lui a2, 1
+; RV32I-NEXT:    addi a2, a2, -241
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 4
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 2
+; RV32I-NEXT:    lui a2, 3
+; RV32I-NEXT:    addi a2, a2, 819
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 2
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    lui a2, 5
+; RV32I-NEXT:    addi a2, a2, 1365
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_bitreverse_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 8
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 56
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    lui a2, 1
+; RV64I-NEXT:    addiw a2, a2, -241
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 4
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 2
+; RV64I-NEXT:    lui a2, 3
+; RV64I-NEXT:    addiw a2, a2, 819
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 2
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    lui a2, 5
+; RV64I-NEXT:    addiw a2, a2, 1365
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZB-LABEL: test_bitreverse_i16:
+; RV32ZB:       # %bb.0:
+; RV32ZB-NEXT:    rev8 a0, a0
+; RV32ZB-NEXT:    srli a1, a0, 12
+; RV32ZB-NEXT:    lui a2, 15
+; RV32ZB-NEXT:    addi a2, a2, 240
+; RV32ZB-NEXT:    and a1, a1, a2
+; RV32ZB-NEXT:    srli a0, a0, 20
+; RV32ZB-NEXT:    andi a0, a0, -241
+; RV32ZB-NEXT:    or a0, a0, a1
+; RV32ZB-NEXT:    srli a1, a0, 2
+; RV32ZB-NEXT:    lui a2, 3
+; RV32ZB-NEXT:    addi a2, a2, 819
+; RV32ZB-NEXT:    and a1, a1, a2
+; RV32ZB-NEXT:    and a0, a0, a2
+; RV32ZB-NEXT:    slli a0, a0, 2
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    srli a1, a0, 1
+; RV32ZB-NEXT:    lui a2, 5
+; RV32ZB-NEXT:    addi a2, a2, 1365
+; RV32ZB-NEXT:    and a1, a1, a2
+; RV32ZB-NEXT:    and a0, a0, a2
+; RV32ZB-NEXT:    slli a0, a0, 1
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    ret
+;
+; RV64ZB-LABEL: test_bitreverse_i16:
+; RV64ZB:       # %bb.0:
+; RV64ZB-NEXT:    rev8 a0, a0
+; RV64ZB-NEXT:    srli a1, a0, 44
+; RV64ZB-NEXT:    lui a2, 15
+; RV64ZB-NEXT:    addiw a2, a2, 240
+; RV64ZB-NEXT:    and a1, a1, a2
+; RV64ZB-NEXT:    srli a0, a0, 52
+; RV64ZB-NEXT:    andi a0, a0, -241
+; RV64ZB-NEXT:    or a0, a0, a1
+; RV64ZB-NEXT:    srli a1, a0, 2
+; RV64ZB-NEXT:    lui a2, 3
+; RV64ZB-NEXT:    addiw a2, a2, 819
+; RV64ZB-NEXT:    and a1, a1, a2
+; RV64ZB-NEXT:    and a0, a0, a2
+; RV64ZB-NEXT:    slli a0, a0, 2
+; RV64ZB-NEXT:    or a0, a1, a0
+; RV64ZB-NEXT:    srli a1, a0, 1
+; RV64ZB-NEXT:    lui a2, 5
+; RV64ZB-NEXT:    addiw a2, a2, 1365
+; RV64ZB-NEXT:    and a1, a1, a2
+; RV64ZB-NEXT:    and a0, a0, a2
+; RV64ZB-NEXT:    slli a0, a0, 1
+; RV64ZB-NEXT:    or a0, a1, a0
+; RV64ZB-NEXT:    ret
+  %tmp = call i16 @llvm.bitreverse.i16(i16 %a)
+  ret i16 %tmp
+}
+
+define i32 @test_bitreverse_i32(i32 %a) nounwind {
+; RV32I-LABEL: test_bitreverse_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a1, a0, 8
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    addi a2, a2, -256
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a2, a0, 24
+; RV32I-NEXT:    or a1, a1, a2
+; RV32I-NEXT:    slli a2, a0, 8
+; RV32I-NEXT:    lui a3, 4080
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    lui a2, 61681
+; RV32I-NEXT:    addi a2, a2, -241
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 4
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 2
+; RV32I-NEXT:    lui a2, 209715
+; RV32I-NEXT:    addi a2, a2, 819
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 2
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    lui a2, 349525
+; RV32I-NEXT:    addi a2, a2, 1365
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_bitreverse_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srliw a1, a0, 8
+; RV64I-NEXT:    lui a2, 16
+; RV64I-NEXT:    addiw a2, a2, -256
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srliw a2, a0, 24
+; RV64I-NEXT:    or a1, a1, a2
+; RV64I-NEXT:    slli a2, a0, 8
+; RV64I-NEXT:    lui a3, 4080
+; RV64I-NEXT:    and a2, a2, a3
+; RV64I-NEXT:    slliw a0, a0, 24
+; RV64I-NEXT:    or a0, a0, a2
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    lui a2, 61681
+; RV64I-NEXT:    addiw a2, a2, -241
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slliw a0, a0, 4
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 2
+; RV64I-NEXT:    lui a2, 209715
+; RV64I-NEXT:    addiw a2, a2, 819
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slliw a0, a0, 2
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    lui a2, 349525
+; RV64I-NEXT:    addiw a2, a2, 1365
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slliw a0, a0, 1
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZB-LABEL: test_bitreverse_i32:
+; RV32ZB:       # %bb.0:
+; RV32ZB-NEXT:    rev8 a0, a0
+; RV32ZB-NEXT:    srli a1, a0, 4
+; RV32ZB-NEXT:    lui a2, 61681
+; RV32ZB-NEXT:    addi a2, a2, -241
+; RV32ZB-NEXT:    and a1, a1, a2
+; RV32ZB-NEXT:    and a0, a0, a2
+; RV32ZB-NEXT:    slli a0, a0, 4
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    srli a1, a0, 2
+; RV32ZB-NEXT:    lui a2, 209715
+; RV32ZB-NEXT:    addi a2, a2, 819
+; RV32ZB-NEXT:    and a1, a1, a2
+; RV32ZB-NEXT:    and a0, a0, a2
+; RV32ZB-NEXT:    slli a0, a0, 2
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    srli a1, a0, 1
+; RV32ZB-NEXT:    lui a2, 349525
+; RV32ZB-NEXT:    addi a2, a2, 1365
+; RV32ZB-NEXT:    and a1, a1, a2
+; RV32ZB-NEXT:    and a0, a0, a2
+; RV32ZB-NEXT:    slli a0, a0, 1
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    ret
+;
+; RV64ZB-LABEL: test_bitreverse_i32:
+; RV64ZB:       # %bb.0:
+; RV64ZB-NEXT:    rev8 a0, a0
+; RV64ZB-NEXT:    srli a1, a0, 36
+; RV64ZB-NEXT:    lui a2, 61681
+; RV64ZB-NEXT:    addiw a2, a2, -241
+; RV64ZB-NEXT:    and a1, a1, a2
+; RV64ZB-NEXT:    srli a0, a0, 28
+; RV64ZB-NEXT:    lui a2, 986895
+; RV64ZB-NEXT:    addiw a2, a2, 240
+; RV64ZB-NEXT:    and a0, a0, a2
+; RV64ZB-NEXT:    sext.w a0, a0
+; RV64ZB-NEXT:    or a0, a1, a0
+; RV64ZB-NEXT:    srli a1, a0, 2
+; RV64ZB-NEXT:    lui a2, 209715
+; RV64ZB-NEXT:    addiw a2, a2, 819
+; RV64ZB-NEXT:    and a1, a1, a2
+; RV64ZB-NEXT:    and a0, a0, a2
+; RV64ZB-NEXT:    slliw a0, a0, 2
+; RV64ZB-NEXT:    or a0, a1, a0
+; RV64ZB-NEXT:    srli a1, a0, 1
+; RV64ZB-NEXT:    lui a2, 349525
+; RV64ZB-NEXT:    addiw a2, a2, 1365
+; RV64ZB-NEXT:    and a1, a1, a2
+; RV64ZB-NEXT:    and a0, a0, a2
+; RV64ZB-NEXT:    slliw a0, a0, 1
+; RV64ZB-NEXT:    or a0, a1, a0
+; RV64ZB-NEXT:    ret
+  %tmp = call i32 @llvm.bitreverse.i32(i32 %a)
+  ret i32 %tmp
+}
+
+define i64 @test_bitreverse_i64(i64 %a) nounwind {
+; RV32I-LABEL: test_bitreverse_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a2, a1, 8
+; RV32I-NEXT:    lui a3, 16
+; RV32I-NEXT:    addi a3, a3, -256
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    srli a4, a1, 24
+; RV32I-NEXT:    or a2, a2, a4
+; RV32I-NEXT:    slli a4, a1, 8
+; RV32I-NEXT:    lui a5, 4080
+; RV32I-NEXT:    and a4, a4, a5
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    or a1, a1, a4
+; RV32I-NEXT:    or a1, a1, a2
+; RV32I-NEXT:    srli a2, a1, 4
+; RV32I-NEXT:    lui a4, 61681
+; RV32I-NEXT:    addi a4, a4, -241
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    and a1, a1, a4
+; RV32I-NEXT:    slli a1, a1, 4
+; RV32I-NEXT:    or a1, a2, a1
+; RV32I-NEXT:    srli a2, a1, 2
+; RV32I-NEXT:    lui a6, 209715
+; RV32I-NEXT:    addi a6, a6, 819
+; RV32I-NEXT:    and a2, a2, a6
+; RV32I-NEXT:    and a1, a1, a6
+; RV32I-NEXT:    slli a1, a1, 2
+; RV32I-NEXT:    or a1, a2, a1
+; RV32I-NEXT:    srli a2, a1, 1
+; RV32I-NEXT:    lui a7, 349525
+; RV32I-NEXT:    addi a7, a7, 1365
+; RV32I-NEXT:    and a2, a2, a7
+; RV32I-NEXT:    and a1, a1, a7
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    or a2, a2, a1
+; RV32I-NEXT:    srli a1, a0, 8
+; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    srli a3, a0, 24
+; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    slli a3, a0, 8
+; RV32I-NEXT:    and a3, a3, a5
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    or a0, a0, a3
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    and a1, a1, a4
+; RV32I-NEXT:    and a0, a0, a4
+; RV32I-NEXT:    slli a0, a0, 4
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 2
+; RV32I-NEXT:    and a1, a1, a6
+; RV32I-NEXT:    and a0, a0, a6
+; RV32I-NEXT:    slli a0, a0, 2
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    and a1, a1, a7
+; RV32I-NEXT:    and a0, a0, a7
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    or a1, a1, a0
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_bitreverse_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a1, a0, 24
+; RV64I-NEXT:    lui a2, 4080
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a2, a0, 8
+; RV64I-NEXT:    li a3, 255
+; RV64I-NEXT:    slli a4, a3, 24
+; RV64I-NEXT:    and a2, a2, a4
+; RV64I-NEXT:    or a1, a2, a1
+; RV64I-NEXT:    srli a2, a0, 40
+; RV64I-NEXT:    lui a4, 16
+; RV64I-NEXT:    addiw a4, a4, -256
+; RV64I-NEXT:    and a2, a2, a4
+; RV64I-NEXT:    srli a4, a0, 56
+; RV64I-NEXT:    or a2, a2, a4
+; RV64I-NEXT:    or a1, a1, a2
+; RV64I-NEXT:    slli a2, a0, 24
+; RV64I-NEXT:    slli a4, a3, 40
+; RV64I-NEXT:    and a2, a2, a4
+; RV64I-NEXT:    srliw a4, a0, 24
+; RV64I-NEXT:    slli a4, a4, 32
+; RV64I-NEXT:    or a2, a2, a4
+; RV64I-NEXT:    slli a4, a0, 40
+; RV64I-NEXT:    slli a3, a3, 48
+; RV64I-NEXT:    and a3, a4, a3
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    or a0, a0, a3
+; RV64I-NEXT:    lui a3, %hi(.LCPI6_0)
+; RV64I-NEXT:    ld a3, %lo(.LCPI6_0)(a3)
+; RV64I-NEXT:    or a0, a0, a2
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    and a1, a1, a3
+; RV64I-NEXT:    and a0, a0, a3
+; RV64I-NEXT:    lui a2, %hi(.LCPI6_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI6_1)(a2)
+; RV64I-NEXT:    slli a0, a0, 4
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 2
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    lui a2, %hi(.LCPI6_2)
+; RV64I-NEXT:    ld a2, %lo(.LCPI6_2)(a2)
+; RV64I-NEXT:    slli a0, a0, 2
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZB-LABEL: test_bitreverse_i64:
+; RV32ZB:       # %bb.0:
+; RV32ZB-NEXT:    rev8 a1, a1
+; RV32ZB-NEXT:    srli a2, a1, 4
+; RV32ZB-NEXT:    lui a3, 61681
+; RV32ZB-NEXT:    addi a3, a3, -241
+; RV32ZB-NEXT:    and a2, a2, a3
+; RV32ZB-NEXT:    and a1, a1, a3
+; RV32ZB-NEXT:    slli a1, a1, 4
+; RV32ZB-NEXT:    or a1, a2, a1
+; RV32ZB-NEXT:    srli a2, a1, 2
+; RV32ZB-NEXT:    lui a4, 209715
+; RV32ZB-NEXT:    addi a4, a4, 819
+; RV32ZB-NEXT:    and a2, a2, a4
+; RV32ZB-NEXT:    and a1, a1, a4
+; RV32ZB-NEXT:    slli a1, a1, 2
+; RV32ZB-NEXT:    or a1, a2, a1
+; RV32ZB-NEXT:    srli a2, a1, 1
+; RV32ZB-NEXT:    lui a5, 349525
+; RV32ZB-NEXT:    addi a5, a5, 1365
+; RV32ZB-NEXT:    and a2, a2, a5
+; RV32ZB-NEXT:    and a1, a1, a5
+; RV32ZB-NEXT:    slli a1, a1, 1
+; RV32ZB-NEXT:    or a2, a2, a1
+; RV32ZB-NEXT:    rev8 a0, a0
+; RV32ZB-NEXT:    srli a1, a0, 4
+; RV32ZB-NEXT:    and a1, a1, a3
+; RV32ZB-NEXT:    and a0, a0, a3
+; RV32ZB-NEXT:    slli a0, a0, 4
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    srli a1, a0, 2
+; RV32ZB-NEXT:    and a1, a1, a4
+; RV32ZB-NEXT:    and a0, a0, a4
+; RV32ZB-NEXT:    slli a0, a0, 2
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    srli a1, a0, 1
+; RV32ZB-NEXT:    and a1, a1, a5
+; RV32ZB-NEXT:    and a0, a0, a5
+; RV32ZB-NEXT:    slli a0, a0, 1
+; RV32ZB-NEXT:    or a1, a1, a0
+; RV32ZB-NEXT:    mv a0, a2
+; RV32ZB-NEXT:    ret
+;
+; RV64ZB-LABEL: test_bitreverse_i64:
+; RV64ZB:       # %bb.0:
+; RV64ZB-NEXT:    lui a1, %hi(.LCPI6_0)
+; RV64ZB-NEXT:    ld a1, %lo(.LCPI6_0)(a1)
+; RV64ZB-NEXT:    rev8 a0, a0
+; RV64ZB-NEXT:    srli a2, a0, 4
+; RV64ZB-NEXT:    and a2, a2, a1
+; RV64ZB-NEXT:    and a0, a0, a1
+; RV64ZB-NEXT:    lui a1, %hi(.LCPI6_1)
+; RV64ZB-NEXT:    ld a1, %lo(.LCPI6_1)(a1)
+; RV64ZB-NEXT:    slli a0, a0, 4
+; RV64ZB-NEXT:    or a0, a2, a0
+; RV64ZB-NEXT:    srli a2, a0, 2
+; RV64ZB-NEXT:    and a2, a2, a1
+; RV64ZB-NEXT:    and a0, a0, a1
+; RV64ZB-NEXT:    lui a1, %hi(.LCPI6_2)
+; RV64ZB-NEXT:    ld a1, %lo(.LCPI6_2)(a1)
+; RV64ZB-NEXT:    slli a0, a0, 2
+; RV64ZB-NEXT:    or a0, a2, a0
+; RV64ZB-NEXT:    srli a2, a0, 1
+; RV64ZB-NEXT:    and a2, a2, a1
+; RV64ZB-NEXT:    and a0, a0, a1
+; RV64ZB-NEXT:    slli a0, a0, 1
+; RV64ZB-NEXT:    or a0, a2, a0
+; RV64ZB-NEXT:    ret
+  %tmp = call i64 @llvm.bitreverse.i64(i64 %a)
+  ret i64 %tmp
+}
+
+define i16 @test_bswap_bitreverse_i16(i16 %a) nounwind {
+; RV32I-LABEL: test_bswap_bitreverse_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    lui a2, 1
+; RV32I-NEXT:    addi a2, a2, -241
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 4
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 2
+; RV32I-NEXT:    lui a2, 3
+; RV32I-NEXT:    addi a2, a2, 819
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 2
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    lui a2, 5
+; RV32I-NEXT:    addi a2, a2, 1365
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_bswap_bitreverse_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    lui a2, 1
+; RV64I-NEXT:    addiw a2, a2, -241
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 4
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 2
+; RV64I-NEXT:    lui a2, 3
+; RV64I-NEXT:    addiw a2, a2, 819
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 2
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    lui a2, 5
+; RV64I-NEXT:    addiw a2, a2, 1365
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZB-LABEL: test_bswap_bitreverse_i16:
+; RV32ZB:       # %bb.0:
+; RV32ZB-NEXT:    srli a1, a0, 4
+; RV32ZB-NEXT:    lui a2, 1
+; RV32ZB-NEXT:    addi a2, a2, -241
+; RV32ZB-NEXT:    and a1, a1, a2
+; RV32ZB-NEXT:    and a0, a0, a2
+; RV32ZB-NEXT:    slli a0, a0, 4
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    srli a1, a0, 2
+; RV32ZB-NEXT:    lui a2, 3
+; RV32ZB-NEXT:    addi a2, a2, 819
+; RV32ZB-NEXT:    and a1, a1, a2
+; RV32ZB-NEXT:    and a0, a0, a2
+; RV32ZB-NEXT:    slli a0, a0, 2
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    srli a1, a0, 1
+; RV32ZB-NEXT:    lui a2, 5
+; RV32ZB-NEXT:    addi a2, a2, 1365
+; RV32ZB-NEXT:    and a1, a1, a2
+; RV32ZB-NEXT:    and a0, a0, a2
+; RV32ZB-NEXT:    slli a0, a0, 1
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    ret
+;
+; RV64ZB-LABEL: test_bswap_bitreverse_i16:
+; RV64ZB:       # %bb.0:
+; RV64ZB-NEXT:    srli a1, a0, 4
+; RV64ZB-NEXT:    lui a2, 1
+; RV64ZB-NEXT:    addiw a2, a2, -241
+; RV64ZB-NEXT:    and a1, a1, a2
+; RV64ZB-NEXT:    and a0, a0, a2
+; RV64ZB-NEXT:    slli a0, a0, 4
+; RV64ZB-NEXT:    or a0, a1, a0
+; RV64ZB-NEXT:    srli a1, a0, 2
+; RV64ZB-NEXT:    lui a2, 3
+; RV64ZB-NEXT:    addiw a2, a2, 819
+; RV64ZB-NEXT:    and a1, a1, a2
+; RV64ZB-NEXT:    and a0, a0, a2
+; RV64ZB-NEXT:    slli a0, a0, 2
+; RV64ZB-NEXT:    or a0, a1, a0
+; RV64ZB-NEXT:    srli a1, a0, 1
+; RV64ZB-NEXT:    lui a2, 5
+; RV64ZB-NEXT:    addiw a2, a2, 1365
+; RV64ZB-NEXT:    and a1, a1, a2
+; RV64ZB-NEXT:    and a0, a0, a2
+; RV64ZB-NEXT:    slli a0, a0, 1
+; RV64ZB-NEXT:    or a0, a1, a0
+; RV64ZB-NEXT:    ret
+  %tmp = call i16 @llvm.bswap.i16(i16 %a)
+  %tmp2 = call i16 @llvm.bitreverse.i16(i16 %tmp)
+  ret i16 %tmp2
+}
+
+define i32 @test_bswap_bitreverse_i32(i32 %a) nounwind {
+; RV32I-LABEL: test_bswap_bitreverse_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    lui a2, 61681
+; RV32I-NEXT:    addi a2, a2, -241
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 4
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 2
+; RV32I-NEXT:    lui a2, 209715
+; RV32I-NEXT:    addi a2, a2, 819
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 2
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    lui a2, 349525
+; RV32I-NEXT:    addi a2, a2, 1365
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_bswap_bitreverse_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    lui a2, 61681
+; RV64I-NEXT:    addiw a2, a2, -241
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slliw a0, a0, 4
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 2
+; RV64I-NEXT:    lui a2, 209715
+; RV64I-NEXT:    addiw a2, a2, 819
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slliw a0, a0, 2
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    lui a2, 349525
+; RV64I-NEXT:    addiw a2, a2, 1365
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slliw a0, a0, 1
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZB-LABEL: test_bswap_bitreverse_i32:
+; RV32ZB:       # %bb.0:
+; RV32ZB-NEXT:    srli a1, a0, 4
+; RV32ZB-NEXT:    lui a2, 61681
+; RV32ZB-NEXT:    addi a2, a2, -241
+; RV32ZB-NEXT:    and a1, a1, a2
+; RV32ZB-NEXT:    and a0, a0, a2
+; RV32ZB-NEXT:    slli a0, a0, 4
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    srli a1, a0, 2
+; RV32ZB-NEXT:    lui a2, 209715
+; RV32ZB-NEXT:    addi a2, a2, 819
+; RV32ZB-NEXT:    and a1, a1, a2
+; RV32ZB-NEXT:    and a0, a0, a2
+; RV32ZB-NEXT:    slli a0, a0, 2
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    srli a1, a0, 1
+; RV32ZB-NEXT:    lui a2, 349525
+; RV32ZB-NEXT:    addi a2, a2, 1365
+; RV32ZB-NEXT:    and a1, a1, a2
+; RV32ZB-NEXT:    and a0, a0, a2
+; RV32ZB-NEXT:    slli a0, a0, 1
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    ret
+;
+; RV64ZB-LABEL: test_bswap_bitreverse_i32:
+; RV64ZB:       # %bb.0:
+; RV64ZB-NEXT:    srli a1, a0, 4
+; RV64ZB-NEXT:    lui a2, 61681
+; RV64ZB-NEXT:    addiw a2, a2, -241
+; RV64ZB-NEXT:    and a1, a1, a2
+; RV64ZB-NEXT:    and a0, a0, a2
+; RV64ZB-NEXT:    slliw a0, a0, 4
+; RV64ZB-NEXT:    or a0, a1, a0
+; RV64ZB-NEXT:    srli a1, a0, 2
+; RV64ZB-NEXT:    lui a2, 209715
+; RV64ZB-NEXT:    addiw a2, a2, 819
+; RV64ZB-NEXT:    and a1, a1, a2
+; RV64ZB-NEXT:    and a0, a0, a2
+; RV64ZB-NEXT:    slliw a0, a0, 2
+; RV64ZB-NEXT:    or a0, a1, a0
+; RV64ZB-NEXT:    srli a1, a0, 1
+; RV64ZB-NEXT:    lui a2, 349525
+; RV64ZB-NEXT:    addiw a2, a2, 1365
+; RV64ZB-NEXT:    and a1, a1, a2
+; RV64ZB-NEXT:    and a0, a0, a2
+; RV64ZB-NEXT:    slliw a0, a0, 1
+; RV64ZB-NEXT:    or a0, a1, a0
+; RV64ZB-NEXT:    ret
+  %tmp = call i32 @llvm.bswap.i32(i32 %a)
+  %tmp2 = call i32 @llvm.bitreverse.i32(i32 %tmp)
+  ret i32 %tmp2
+}
+
+define i64 @test_bswap_bitreverse_i64(i64 %a) nounwind {
+; RV32I-LABEL: test_bswap_bitreverse_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a2, a0, 4
+; RV32I-NEXT:    lui a3, 61681
+; RV32I-NEXT:    addi a3, a3, -241
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    and a0, a0, a3
+; RV32I-NEXT:    slli a0, a0, 4
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    srli a2, a0, 2
+; RV32I-NEXT:    lui a4, 209715
+; RV32I-NEXT:    addi a4, a4, 819
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    and a0, a0, a4
+; RV32I-NEXT:    slli a0, a0, 2
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    srli a2, a0, 1
+; RV32I-NEXT:    lui a5, 349525
+; RV32I-NEXT:    addi a5, a5, 1365
+; RV32I-NEXT:    and a2, a2, a5
+; RV32I-NEXT:    and a0, a0, a5
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    srli a2, a1, 4
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    slli a1, a1, 4
+; RV32I-NEXT:    or a1, a2, a1
+; RV32I-NEXT:    srli a2, a1, 2
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    and a1, a1, a4
+; RV32I-NEXT:    slli a1, a1, 2
+; RV32I-NEXT:    or a1, a2, a1
+; RV32I-NEXT:    srli a2, a1, 1
+; RV32I-NEXT:    and a2, a2, a5
+; RV32I-NEXT:    and a1, a1, a5
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    or a1, a2, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_bswap_bitreverse_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a1, %hi(.LCPI9_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI9_0)(a1)
+; RV64I-NEXT:    srli a2, a0, 4
+; RV64I-NEXT:    and a2, a2, a1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    lui a1, %hi(.LCPI9_1)
+; RV64I-NEXT:    ld a1, %lo(.LCPI9_1)(a1)
+; RV64I-NEXT:    slli a0, a0, 4
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    srli a2, a0, 2
+; RV64I-NEXT:    and a2, a2, a1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    lui a1, %hi(.LCPI9_2)
+; RV64I-NEXT:    ld a1, %lo(.LCPI9_2)(a1)
+; RV64I-NEXT:    slli a0, a0, 2
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    srli a2, a0, 1
+; RV64I-NEXT:    and a2, a2, a1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZB-LABEL: test_bswap_bitreverse_i64:
+; RV32ZB:       # %bb.0:
+; RV32ZB-NEXT:    srli a2, a0, 4
+; RV32ZB-NEXT:    lui a3, 61681
+; RV32ZB-NEXT:    addi a3, a3, -241
+; RV32ZB-NEXT:    and a2, a2, a3
+; RV32ZB-NEXT:    and a0, a0, a3
+; RV32ZB-NEXT:    slli a0, a0, 4
+; RV32ZB-NEXT:    or a0, a2, a0
+; RV32ZB-NEXT:    srli a2, a0, 2
+; RV32ZB-NEXT:    lui a4, 209715
+; RV32ZB-NEXT:    addi a4, a4, 819
+; RV32ZB-NEXT:    and a2, a2, a4
+; RV32ZB-NEXT:    and a0, a0, a4
+; RV32ZB-NEXT:    slli a0, a0, 2
+; RV32ZB-NEXT:    or a0, a2, a0
+; RV32ZB-NEXT:    srli a2, a0, 1
+; RV32ZB-NEXT:    lui a5, 349525
+; RV32ZB-NEXT:    addi a5, a5, 1365
+; RV32ZB-NEXT:    and a2, a2, a5
+; RV32ZB-NEXT:    and a0, a0, a5
+; RV32ZB-NEXT:    slli a0, a0, 1
+; RV32ZB-NEXT:    or a0, a2, a0
+; RV32ZB-NEXT:    srli a2, a1, 4
+; RV32ZB-NEXT:    and a2, a2, a3
+; RV32ZB-NEXT:    and a1, a1, a3
+; RV32ZB-NEXT:    slli a1, a1, 4
+; RV32ZB-NEXT:    or a1, a2, a1
+; RV32ZB-NEXT:    srli a2, a1, 2
+; RV32ZB-NEXT:    and a2, a2, a4
+; RV32ZB-NEXT:    and a1, a1, a4
+; RV32ZB-NEXT:    slli a1, a1, 2
+; RV32ZB-NEXT:    or a1, a2, a1
+; RV32ZB-NEXT:    srli a2, a1, 1
+; RV32ZB-NEXT:    and a2, a2, a5
+; RV32ZB-NEXT:    and a1, a1, a5
+; RV32ZB-NEXT:    slli a1, a1, 1
+; RV32ZB-NEXT:    or a1, a2, a1
+; RV32ZB-NEXT:    ret
+;
+; RV64ZB-LABEL: test_bswap_bitreverse_i64:
+; RV64ZB:       # %bb.0:
+; RV64ZB-NEXT:    lui a1, %hi(.LCPI9_0)
+; RV64ZB-NEXT:    ld a1, %lo(.LCPI9_0)(a1)
+; RV64ZB-NEXT:    srli a2, a0, 4
+; RV64ZB-NEXT:    and a2, a2, a1
+; RV64ZB-NEXT:    and a0, a0, a1
+; RV64ZB-NEXT:    lui a1, %hi(.LCPI9_1)
+; RV64ZB-NEXT:    ld a1, %lo(.LCPI9_1)(a1)
+; RV64ZB-NEXT:    slli a0, a0, 4
+; RV64ZB-NEXT:    or a0, a2, a0
+; RV64ZB-NEXT:    srli a2, a0, 2
+; RV64ZB-NEXT:    and a2, a2, a1
+; RV64ZB-NEXT:    and a0, a0, a1
+; RV64ZB-NEXT:    lui a1, %hi(.LCPI9_2)
+; RV64ZB-NEXT:    ld a1, %lo(.LCPI9_2)(a1)
+; RV64ZB-NEXT:    slli a0, a0, 2
+; RV64ZB-NEXT:    or a0, a2, a0
+; RV64ZB-NEXT:    srli a2, a0, 1
+; RV64ZB-NEXT:    and a2, a2, a1
+; RV64ZB-NEXT:    and a0, a0, a1
+; RV64ZB-NEXT:    slli a0, a0, 1
+; RV64ZB-NEXT:    or a0, a2, a0
+; RV64ZB-NEXT:    ret
+  %tmp = call i64 @llvm.bswap.i64(i64 %a)
+  %tmp2 = call i64 @llvm.bitreverse.i64(i64 %tmp)
+  ret i64 %tmp2
+}
+
+define i16 @test_bitreverse_bswap_i16(i16 %a) nounwind {
+; RV32I-LABEL: test_bitreverse_bswap_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    lui a2, 1
+; RV32I-NEXT:    addi a2, a2, -241
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 4
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 2
+; RV32I-NEXT:    lui a2, 3
+; RV32I-NEXT:    addi a2, a2, 819
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 2
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    lui a2, 5
+; RV32I-NEXT:    addi a2, a2, 1365
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_bitreverse_bswap_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    lui a2, 1
+; RV64I-NEXT:    addiw a2, a2, -241
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 4
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 2
+; RV64I-NEXT:    lui a2, 3
+; RV64I-NEXT:    addiw a2, a2, 819
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 2
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    lui a2, 5
+; RV64I-NEXT:    addiw a2, a2, 1365
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZB-LABEL: test_bitreverse_bswap_i16:
+; RV32ZB:       # %bb.0:
+; RV32ZB-NEXT:    srli a1, a0, 4
+; RV32ZB-NEXT:    lui a2, 1
+; RV32ZB-NEXT:    addi a2, a2, -241
+; RV32ZB-NEXT:    and a1, a1, a2
+; RV32ZB-NEXT:    and a0, a0, a2
+; RV32ZB-NEXT:    slli a0, a0, 4
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    srli a1, a0, 2
+; RV32ZB-NEXT:    lui a2, 3
+; RV32ZB-NEXT:    addi a2, a2, 819
+; RV32ZB-NEXT:    and a1, a1, a2
+; RV32ZB-NEXT:    and a0, a0, a2
+; RV32ZB-NEXT:    slli a0, a0, 2
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    srli a1, a0, 1
+; RV32ZB-NEXT:    lui a2, 5
+; RV32ZB-NEXT:    addi a2, a2, 1365
+; RV32ZB-NEXT:    and a1, a1, a2
+; RV32ZB-NEXT:    and a0, a0, a2
+; RV32ZB-NEXT:    slli a0, a0, 1
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    ret
+;
+; RV64ZB-LABEL: test_bitreverse_bswap_i16:
+; RV64ZB:       # %bb.0:
+; RV64ZB-NEXT:    srli a1, a0, 4
+; RV64ZB-NEXT:    lui a2, 1
+; RV64ZB-NEXT:    addiw a2, a2, -241
+; RV64ZB-NEXT:    and a1, a1, a2
+; RV64ZB-NEXT:    and a0, a0, a2
+; RV64ZB-NEXT:    slli a0, a0, 4
+; RV64ZB-NEXT:    or a0, a1, a0
+; RV64ZB-NEXT:    srli a1, a0, 2
+; RV64ZB-NEXT:    lui a2, 3
+; RV64ZB-NEXT:    addiw a2, a2, 819
+; RV64ZB-NEXT:    and a1, a1, a2
+; RV64ZB-NEXT:    and a0, a0, a2
+; RV64ZB-NEXT:    slli a0, a0, 2
+; RV64ZB-NEXT:    or a0, a1, a0
+; RV64ZB-NEXT:    srli a1, a0, 1
+; RV64ZB-NEXT:    lui a2, 5
+; RV64ZB-NEXT:    addiw a2, a2, 1365
+; RV64ZB-NEXT:    and a1, a1, a2
+; RV64ZB-NEXT:    and a0, a0, a2
+; RV64ZB-NEXT:    slli a0, a0, 1
+; RV64ZB-NEXT:    or a0, a1, a0
+; RV64ZB-NEXT:    ret
+  %tmp = call i16 @llvm.bitreverse.i16(i16 %a)
+  %tmp2 = call i16 @llvm.bswap.i16(i16 %tmp)
+  ret i16 %tmp2
+}
+
+define i32 @test_bitreverse_bswap_i32(i32 %a) nounwind {
+; RV32I-LABEL: test_bitreverse_bswap_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    lui a2, 61681
+; RV32I-NEXT:    addi a2, a2, -241
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 4
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 2
+; RV32I-NEXT:    lui a2, 209715
+; RV32I-NEXT:    addi a2, a2, 819
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 2
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    lui a2, 349525
+; RV32I-NEXT:    addi a2, a2, 1365
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_bitreverse_bswap_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    lui a2, 61681
+; RV64I-NEXT:    addiw a2, a2, -241
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slliw a0, a0, 4
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 2
+; RV64I-NEXT:    lui a2, 209715
+; RV64I-NEXT:    addiw a2, a2, 819
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slliw a0, a0, 2
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    lui a2, 349525
+; RV64I-NEXT:    addiw a2, a2, 1365
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slliw a0, a0, 1
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZB-LABEL: test_bitreverse_bswap_i32:
+; RV32ZB:       # %bb.0:
+; RV32ZB-NEXT:    srli a1, a0, 4
+; RV32ZB-NEXT:    lui a2, 61681
+; RV32ZB-NEXT:    addi a2, a2, -241
+; RV32ZB-NEXT:    and a1, a1, a2
+; RV32ZB-NEXT:    and a0, a0, a2
+; RV32ZB-NEXT:    slli a0, a0, 4
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    srli a1, a0, 2
+; RV32ZB-NEXT:    lui a2, 209715
+; RV32ZB-NEXT:    addi a2, a2, 819
+; RV32ZB-NEXT:    and a1, a1, a2
+; RV32ZB-NEXT:    and a0, a0, a2
+; RV32ZB-NEXT:    slli a0, a0, 2
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    srli a1, a0, 1
+; RV32ZB-NEXT:    lui a2, 349525
+; RV32ZB-NEXT:    addi a2, a2, 1365
+; RV32ZB-NEXT:    and a1, a1, a2
+; RV32ZB-NEXT:    and a0, a0, a2
+; RV32ZB-NEXT:    slli a0, a0, 1
+; RV32ZB-NEXT:    or a0, a1, a0
+; RV32ZB-NEXT:    ret
+;
+; RV64ZB-LABEL: test_bitreverse_bswap_i32:
+; RV64ZB:       # %bb.0:
+; RV64ZB-NEXT:    srli a1, a0, 4
+; RV64ZB-NEXT:    lui a2, 61681
+; RV64ZB-NEXT:    addiw a2, a2, -241
+; RV64ZB-NEXT:    and a1, a1, a2
+; RV64ZB-NEXT:    and a0, a0, a2
+; RV64ZB-NEXT:    slliw a0, a0, 4
+; RV64ZB-NEXT:    or a0, a1, a0
+; RV64ZB-NEXT:    srli a1, a0, 2
+; RV64ZB-NEXT:    lui a2, 209715
+; RV64ZB-NEXT:    addiw a2, a2, 819
+; RV64ZB-NEXT:    and a1, a1, a2
+; RV64ZB-NEXT:    and a0, a0, a2
+; RV64ZB-NEXT:    slliw a0, a0, 2
+; RV64ZB-NEXT:    or a0, a1, a0
+; RV64ZB-NEXT:    srli a1, a0, 1
+; RV64ZB-NEXT:    lui a2, 349525
+; RV64ZB-NEXT:    addiw a2, a2, 1365
+; RV64ZB-NEXT:    and a1, a1, a2
+; RV64ZB-NEXT:    and a0, a0, a2
+; RV64ZB-NEXT:    slliw a0, a0, 1
+; RV64ZB-NEXT:    or a0, a1, a0
+; RV64ZB-NEXT:    ret
+  %tmp = call i32 @llvm.bitreverse.i32(i32 %a)
+  %tmp2 = call i32 @llvm.bswap.i32(i32 %tmp)
+  ret i32 %tmp2
+}
+
+define i64 @test_bitreverse_bswap_i64(i64 %a) nounwind {
+; RV32I-LABEL: test_bitreverse_bswap_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a2, a0, 4
+; RV32I-NEXT:    lui a3, 61681
+; RV32I-NEXT:    addi a3, a3, -241
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    and a0, a0, a3
+; RV32I-NEXT:    slli a0, a0, 4
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    srli a2, a0, 2
+; RV32I-NEXT:    lui a4, 209715
+; RV32I-NEXT:    addi a4, a4, 819
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    and a0, a0, a4
+; RV32I-NEXT:    slli a0, a0, 2
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    srli a2, a0, 1
+; RV32I-NEXT:    lui a5, 349525
+; RV32I-NEXT:    addi a5, a5, 1365
+; RV32I-NEXT:    and a2, a2, a5
+; RV32I-NEXT:    and a0, a0, a5
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    srli a2, a1, 4
+; RV32I-NEXT:    and a2, a2, a3
+; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    slli a1, a1, 4
+; RV32I-NEXT:    or a1, a2, a1
+; RV32I-NEXT:    srli a2, a1, 2
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    and a1, a1, a4
+; RV32I-NEXT:    slli a1, a1, 2
+; RV32I-NEXT:    or a1, a2, a1
+; RV32I-NEXT:    srli a2, a1, 1
+; RV32I-NEXT:    and a2, a2, a5
+; RV32I-NEXT:    and a1, a1, a5
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    or a1, a2, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_bitreverse_bswap_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a1, %hi(.LCPI12_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI12_0)(a1)
+; RV64I-NEXT:    srli a2, a0, 4
+; RV64I-NEXT:    and a2, a2, a1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    lui a1, %hi(.LCPI12_1)
+; RV64I-NEXT:    ld a1, %lo(.LCPI12_1)(a1)
+; RV64I-NEXT:    slli a0, a0, 4
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    srli a2, a0, 2
+; RV64I-NEXT:    and a2, a2, a1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    lui a1, %hi(.LCPI12_2)
+; RV64I-NEXT:    ld a1, %lo(.LCPI12_2)(a1)
+; RV64I-NEXT:    slli a0, a0, 2
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    srli a2, a0, 1
+; RV64I-NEXT:    and a2, a2, a1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZB-LABEL: test_bitreverse_bswap_i64:
+; RV32ZB:       # %bb.0:
+; RV32ZB-NEXT:    srli a2, a0, 4
+; RV32ZB-NEXT:    lui a3, 61681
+; RV32ZB-NEXT:    addi a3, a3, -241
+; RV32ZB-NEXT:    and a2, a2, a3
+; RV32ZB-NEXT:    and a0, a0, a3
+; RV32ZB-NEXT:    slli a0, a0, 4
+; RV32ZB-NEXT:    or a0, a2, a0
+; RV32ZB-NEXT:    srli a2, a0, 2
+; RV32ZB-NEXT:    lui a4, 209715
+; RV32ZB-NEXT:    addi a4, a4, 819
+; RV32ZB-NEXT:    and a2, a2, a4
+; RV32ZB-NEXT:    and a0, a0, a4
+; RV32ZB-NEXT:    slli a0, a0, 2
+; RV32ZB-NEXT:    or a0, a2, a0
+; RV32ZB-NEXT:    srli a2, a0, 1
+; RV32ZB-NEXT:    lui a5, 349525
+; RV32ZB-NEXT:    addi a5, a5, 1365
+; RV32ZB-NEXT:    and a2, a2, a5
+; RV32ZB-NEXT:    and a0, a0, a5
+; RV32ZB-NEXT:    slli a0, a0, 1
+; RV32ZB-NEXT:    or a0, a2, a0
+; RV32ZB-NEXT:    srli a2, a1, 4
+; RV32ZB-NEXT:    and a2, a2, a3
+; RV32ZB-NEXT:    and a1, a1, a3
+; RV32ZB-NEXT:    slli a1, a1, 4
+; RV32ZB-NEXT:    or a1, a2, a1
+; RV32ZB-NEXT:    srli a2, a1, 2
+; RV32ZB-NEXT:    and a2, a2, a4
+; RV32ZB-NEXT:    and a1, a1, a4
+; RV32ZB-NEXT:    slli a1, a1, 2
+; RV32ZB-NEXT:    or a1, a2, a1
+; RV32ZB-NEXT:    srli a2, a1, 1
+; RV32ZB-NEXT:    and a2, a2, a5
+; RV32ZB-NEXT:    and a1, a1, a5
+; RV32ZB-NEXT:    slli a1, a1, 1
+; RV32ZB-NEXT:    or a1, a2, a1
+; RV32ZB-NEXT:    ret
+;
+; RV64ZB-LABEL: test_bitreverse_bswap_i64:
+; RV64ZB:       # %bb.0:
+; RV64ZB-NEXT:    lui a1, %hi(.LCPI12_0)
+; RV64ZB-NEXT:    ld a1, %lo(.LCPI12_0)(a1)
+; RV64ZB-NEXT:    srli a2, a0, 4
+; RV64ZB-NEXT:    and a2, a2, a1
+; RV64ZB-NEXT:    and a0, a0, a1
+; RV64ZB-NEXT:    lui a1, %hi(.LCPI12_1)
+; RV64ZB-NEXT:    ld a1, %lo(.LCPI12_1)(a1)
+; RV64ZB-NEXT:    slli a0, a0, 4
+; RV64ZB-NEXT:    or a0, a2, a0
+; RV64ZB-NEXT:    srli a2, a0, 2
+; RV64ZB-NEXT:    and a2, a2, a1
+; RV64ZB-NEXT:    and a0, a0, a1
+; RV64ZB-NEXT:    lui a1, %hi(.LCPI12_2)
+; RV64ZB-NEXT:    ld a1, %lo(.LCPI12_2)(a1)
+; RV64ZB-NEXT:    slli a0, a0, 2
+; RV64ZB-NEXT:    or a0, a2, a0
+; RV64ZB-NEXT:    srli a2, a0, 1
+; RV64ZB-NEXT:    and a2, a2, a1
+; RV64ZB-NEXT:    and a0, a0, a1
+; RV64ZB-NEXT:    slli a0, a0, 1
+; RV64ZB-NEXT:    or a0, a2, a0
+; RV64ZB-NEXT:    ret
+  %tmp = call i64 @llvm.bitreverse.i64(i64 %a)
+  %tmp2 = call i64 @llvm.bswap.i64(i64 %tmp)
+  ret i64 %tmp2
+}

diff  --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
new file mode 100644
index 000000000000..89592c6ecbb2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
@@ -0,0 +1,1076 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64I
+; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32ZBB
+; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64ZBB
+
+declare i8 @llvm.cttz.i8(i8, i1)
+declare i16 @llvm.cttz.i16(i16, i1)
+declare i32 @llvm.cttz.i32(i32, i1)
+declare i64 @llvm.cttz.i64(i64, i1)
+declare i32 @llvm.ctlz.i32(i32, i1)
+declare i32 @llvm.ctpop.i32(i32)
+declare i64 @llvm.ctpop.i64(i64)
+
+define i8 @test_cttz_i8(i8 %a) nounwind {
+; RV32I-LABEL: test_cttz_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a1, a0, 255
+; RV32I-NEXT:    beqz a1, .LBB0_2
+; RV32I-NEXT:  # %bb.1: # %cond.false
+; RV32I-NEXT:    addi a1, a0, -1
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    andi a1, a1, 85
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    andi a1, a0, 51
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    andi a0, a0, 51
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    andi a0, a0, 15
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB0_2:
+; RV32I-NEXT:    li a0, 8
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_cttz_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a1, a0, 255
+; RV64I-NEXT:    beqz a1, .LBB0_2
+; RV64I-NEXT:  # %bb.1: # %cond.false
+; RV64I-NEXT:    addi a1, a0, -1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    andi a1, a1, 85
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    andi a1, a0, 51
+; RV64I-NEXT:    srli a0, a0, 2
+; RV64I-NEXT:    andi a0, a0, 51
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    andi a0, a0, 15
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB0_2:
+; RV64I-NEXT:    li a0, 8
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_cttz_i8:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    ori a0, a0, 256
+; RV32ZBB-NEXT:    ctz a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_cttz_i8:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    ori a0, a0, 256
+; RV64ZBB-NEXT:    ctz a0, a0
+; RV64ZBB-NEXT:    ret
+  %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 false)
+  ret i8 %tmp
+}
+
+define i16 @test_cttz_i16(i16 %a) nounwind {
+; RV32I-LABEL: test_cttz_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srli a1, a1, 16
+; RV32I-NEXT:    beqz a1, .LBB1_2
+; RV32I-NEXT:  # %bb.1: # %cond.false
+; RV32I-NEXT:    addi a1, a0, -1
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    lui a2, 5
+; RV32I-NEXT:    addi a2, a2, 1365
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 3
+; RV32I-NEXT:    addi a1, a1, 819
+; RV32I-NEXT:    and a2, a0, a1
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 1
+; RV32I-NEXT:    addi a1, a1, -241
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 8
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    slli a0, a0, 19
+; RV32I-NEXT:    srli a0, a0, 27
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB1_2:
+; RV32I-NEXT:    li a0, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_cttz_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srli a1, a1, 48
+; RV64I-NEXT:    beqz a1, .LBB1_2
+; RV64I-NEXT:  # %bb.1: # %cond.false
+; RV64I-NEXT:    addi a1, a0, -1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    lui a2, 5
+; RV64I-NEXT:    addiw a2, a2, 1365
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    lui a1, 3
+; RV64I-NEXT:    addiw a1, a1, 819
+; RV64I-NEXT:    and a2, a0, a1
+; RV64I-NEXT:    srli a0, a0, 2
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    lui a1, 1
+; RV64I-NEXT:    addiw a1, a1, -241
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slliw a1, a0, 8
+; RV64I-NEXT:    addw a0, a1, a0
+; RV64I-NEXT:    slli a0, a0, 51
+; RV64I-NEXT:    srli a0, a0, 59
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB1_2:
+; RV64I-NEXT:    li a0, 16
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_cttz_i16:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lui a1, 16
+; RV32ZBB-NEXT:    or a0, a0, a1
+; RV32ZBB-NEXT:    ctz a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_cttz_i16:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    lui a1, 16
+; RV64ZBB-NEXT:    or a0, a0, a1
+; RV64ZBB-NEXT:    ctz a0, a0
+; RV64ZBB-NEXT:    ret
+  %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 false)
+  ret i16 %tmp
+}
+
+define i32 @test_cttz_i32(i32 %a) nounwind {
+; RV32I-LABEL: test_cttz_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    beqz a0, .LBB2_2
+; RV32I-NEXT:  # %bb.1: # %cond.false
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    addi a1, a0, -1
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    lui a2, 349525
+; RV32I-NEXT:    addi a2, a2, 1365
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 209715
+; RV32I-NEXT:    addi a1, a1, 819
+; RV32I-NEXT:    and a2, a0, a1
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 61681
+; RV32I-NEXT:    addi a1, a1, -241
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    lui a1, 4112
+; RV32I-NEXT:    addi a1, a1, 257
+; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB2_2:
+; RV32I-NEXT:    li a0, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_cttz_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    beqz a1, .LBB2_2
+; RV64I-NEXT:  # %bb.1: # %cond.false
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    addiw a1, a0, -1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    lui a2, 349525
+; RV64I-NEXT:    addiw a2, a2, 1365
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    lui a1, 209715
+; RV64I-NEXT:    addiw a1, a1, 819
+; RV64I-NEXT:    and a2, a0, a1
+; RV64I-NEXT:    srli a0, a0, 2
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    lui a1, 61681
+; RV64I-NEXT:    addiw a1, a1, -241
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    lui a1, 4112
+; RV64I-NEXT:    addiw a1, a1, 257
+; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    srliw a0, a0, 24
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB2_2:
+; RV64I-NEXT:    li a0, 32
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_cttz_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    ctz a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_cttz_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    ctzw a0, a0
+; RV64ZBB-NEXT:    ret
+  %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false)
+  ret i32 %tmp
+}
+
+define i32 @test_ctlz_i32(i32 %a) nounwind {
+; RV32I-LABEL: test_ctlz_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    beqz a0, .LBB3_2
+; RV32I-NEXT:  # %bb.1: # %cond.false
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 2
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 8
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 16
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    lui a2, 349525
+; RV32I-NEXT:    addi a2, a2, 1365
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 209715
+; RV32I-NEXT:    addi a1, a1, 819
+; RV32I-NEXT:    and a2, a0, a1
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 61681
+; RV32I-NEXT:    addi a1, a1, -241
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    lui a1, 4112
+; RV32I-NEXT:    addi a1, a1, 257
+; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB3_2:
+; RV32I-NEXT:    li a0, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_ctlz_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    beqz a1, .LBB3_2
+; RV64I-NEXT:  # %bb.1: # %cond.false
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    srliw a1, a0, 1
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    srliw a1, a0, 2
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    srliw a1, a0, 4
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    srliw a1, a0, 8
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    srliw a1, a0, 16
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    lui a2, 349525
+; RV64I-NEXT:    addiw a2, a2, 1365
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    lui a1, 209715
+; RV64I-NEXT:    addiw a1, a1, 819
+; RV64I-NEXT:    and a2, a0, a1
+; RV64I-NEXT:    srli a0, a0, 2
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    lui a1, 61681
+; RV64I-NEXT:    addiw a1, a1, -241
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    lui a1, 4112
+; RV64I-NEXT:    addiw a1, a1, 257
+; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    srliw a0, a0, 24
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB3_2:
+; RV64I-NEXT:    li a0, 32
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_ctlz_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    clz a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_ctlz_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    clzw a0, a0
+; RV64ZBB-NEXT:    ret
+  %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false)
+  ret i32 %tmp
+}
+
+define i64 @test_cttz_i64(i64 %a) nounwind {
+; RV32I-LABEL: test_cttz_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s6, 0(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    addi a0, a0, -1
+; RV32I-NEXT:    not a1, s2
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    lui a2, 349525
+; RV32I-NEXT:    addi s4, a2, 1365
+; RV32I-NEXT:    and a1, a1, s4
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 209715
+; RV32I-NEXT:    addi s5, a1, 819
+; RV32I-NEXT:    and a1, a0, s5
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, s5
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 61681
+; RV32I-NEXT:    addi s6, a1, -241
+; RV32I-NEXT:    and a0, a0, s6
+; RV32I-NEXT:    lui a1, 4112
+; RV32I-NEXT:    addi s3, a1, 257
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    mv s0, a0
+; RV32I-NEXT:    addi a0, s1, -1
+; RV32I-NEXT:    not a1, s1
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    and a1, a1, s4
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    and a1, a0, s5
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, s5
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    and a0, a0, s6
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    bnez s2, .LBB4_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    addi a0, a0, 32
+; RV32I-NEXT:    j .LBB4_3
+; RV32I-NEXT:  .LBB4_2:
+; RV32I-NEXT:    srli a0, s0, 24
+; RV32I-NEXT:  .LBB4_3:
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_cttz_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    beqz a0, .LBB4_2
+; RV64I-NEXT:  # %bb.1: # %cond.false
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    addi a1, a0, -1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    lui a1, %hi(.LCPI4_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI4_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI4_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI4_1)(a2)
+; RV64I-NEXT:    srli a3, a0, 1
+; RV64I-NEXT:    and a1, a3, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    and a1, a0, a2
+; RV64I-NEXT:    srli a0, a0, 2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    lui a2, %hi(.LCPI4_2)
+; RV64I-NEXT:    ld a2, %lo(.LCPI4_2)(a2)
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    lui a1, %hi(.LCPI4_3)
+; RV64I-NEXT:    ld a1, %lo(.LCPI4_3)(a1)
+; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    srli a0, a0, 56
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB4_2:
+; RV64I-NEXT:    li a0, 64
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_cttz_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    bnez a0, .LBB4_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    ctz a0, a1
+; RV32ZBB-NEXT:    addi a0, a0, 32
+; RV32ZBB-NEXT:    li a1, 0
+; RV32ZBB-NEXT:    ret
+; RV32ZBB-NEXT:  .LBB4_2:
+; RV32ZBB-NEXT:    ctz a0, a0
+; RV32ZBB-NEXT:    li a1, 0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_cttz_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    ctz a0, a0
+; RV64ZBB-NEXT:    ret
+  %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 false)
+  ret i64 %tmp
+}
+
+define i8 @test_cttz_i8_zero_undef(i8 %a) nounwind {
+; RV32I-LABEL: test_cttz_i8_zero_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a1, a0, -1
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    andi a1, a1, 85
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    andi a1, a0, 51
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    andi a0, a0, 51
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    andi a0, a0, 15
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_cttz_i8_zero_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a1, a0, -1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    andi a1, a1, 85
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    andi a1, a0, 51
+; RV64I-NEXT:    srli a0, a0, 2
+; RV64I-NEXT:    andi a0, a0, 51
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    andi a0, a0, 15
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_cttz_i8_zero_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    ctz a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_cttz_i8_zero_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    ctz a0, a0
+; RV64ZBB-NEXT:    ret
+  %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 true)
+  ret i8 %tmp
+}
+
+define i16 @test_cttz_i16_zero_undef(i16 %a) nounwind {
+; RV32I-LABEL: test_cttz_i16_zero_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi a1, a0, -1
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    lui a2, 5
+; RV32I-NEXT:    addi a2, a2, 1365
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 3
+; RV32I-NEXT:    addi a1, a1, 819
+; RV32I-NEXT:    and a2, a0, a1
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 1
+; RV32I-NEXT:    addi a1, a1, -241
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 8
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    slli a0, a0, 19
+; RV32I-NEXT:    srli a0, a0, 27
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_cttz_i16_zero_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a1, a0, -1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    lui a2, 5
+; RV64I-NEXT:    addiw a2, a2, 1365
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    lui a1, 3
+; RV64I-NEXT:    addiw a1, a1, 819
+; RV64I-NEXT:    and a2, a0, a1
+; RV64I-NEXT:    srli a0, a0, 2
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    lui a1, 1
+; RV64I-NEXT:    addiw a1, a1, -241
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slliw a1, a0, 8
+; RV64I-NEXT:    addw a0, a1, a0
+; RV64I-NEXT:    slli a0, a0, 51
+; RV64I-NEXT:    srli a0, a0, 59
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_cttz_i16_zero_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    ctz a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_cttz_i16_zero_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    ctz a0, a0
+; RV64ZBB-NEXT:    ret
+  %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 true)
+  ret i16 %tmp
+}
+
+define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
+; RV32I-LABEL: test_cttz_i32_zero_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    addi a1, a0, -1
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    lui a2, 349525
+; RV32I-NEXT:    addi a2, a2, 1365
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 209715
+; RV32I-NEXT:    addi a1, a1, 819
+; RV32I-NEXT:    and a2, a0, a1
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 61681
+; RV32I-NEXT:    addi a1, a1, -241
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    lui a1, 4112
+; RV32I-NEXT:    addi a1, a1, 257
+; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_cttz_i32_zero_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    addiw a1, a0, -1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    lui a2, 349525
+; RV64I-NEXT:    addiw a2, a2, 1365
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    lui a1, 209715
+; RV64I-NEXT:    addiw a1, a1, 819
+; RV64I-NEXT:    and a2, a0, a1
+; RV64I-NEXT:    srli a0, a0, 2
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    lui a1, 61681
+; RV64I-NEXT:    addiw a1, a1, -241
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    lui a1, 4112
+; RV64I-NEXT:    addiw a1, a1, 257
+; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    srliw a0, a0, 24
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_cttz_i32_zero_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    ctz a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_cttz_i32_zero_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    ctzw a0, a0
+; RV64ZBB-NEXT:    ret
+  %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 true)
+  ret i32 %tmp
+}
+
+define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
+; RV32I-LABEL: test_cttz_i64_zero_undef:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s6, 0(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s1, a1
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    addi a0, a0, -1
+; RV32I-NEXT:    not a1, s2
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    lui a2, 349525
+; RV32I-NEXT:    addi s4, a2, 1365
+; RV32I-NEXT:    and a1, a1, s4
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 209715
+; RV32I-NEXT:    addi s5, a1, 819
+; RV32I-NEXT:    and a1, a0, s5
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, s5
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 61681
+; RV32I-NEXT:    addi s6, a1, -241
+; RV32I-NEXT:    and a0, a0, s6
+; RV32I-NEXT:    lui a1, 4112
+; RV32I-NEXT:    addi s3, a1, 257
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    mv s0, a0
+; RV32I-NEXT:    addi a0, s1, -1
+; RV32I-NEXT:    not a1, s1
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    and a1, a1, s4
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    and a1, a0, s5
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, s5
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    and a0, a0, s6
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    bnez s2, .LBB8_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    addi a0, a0, 32
+; RV32I-NEXT:    j .LBB8_3
+; RV32I-NEXT:  .LBB8_2:
+; RV32I-NEXT:    srli a0, s0, 24
+; RV32I-NEXT:  .LBB8_3:
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_cttz_i64_zero_undef:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    addi a1, a0, -1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    lui a1, %hi(.LCPI8_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI8_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI8_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI8_1)(a2)
+; RV64I-NEXT:    srli a3, a0, 1
+; RV64I-NEXT:    and a1, a3, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    and a1, a0, a2
+; RV64I-NEXT:    srli a0, a0, 2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    lui a2, %hi(.LCPI8_2)
+; RV64I-NEXT:    ld a2, %lo(.LCPI8_2)(a2)
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    lui a1, %hi(.LCPI8_3)
+; RV64I-NEXT:    ld a1, %lo(.LCPI8_3)(a1)
+; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    srli a0, a0, 56
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_cttz_i64_zero_undef:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    bnez a0, .LBB8_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    ctz a0, a1
+; RV32ZBB-NEXT:    addi a0, a0, 32
+; RV32ZBB-NEXT:    li a1, 0
+; RV32ZBB-NEXT:    ret
+; RV32ZBB-NEXT:  .LBB8_2:
+; RV32ZBB-NEXT:    ctz a0, a0
+; RV32ZBB-NEXT:    li a1, 0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_cttz_i64_zero_undef:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    ctz a0, a0
+; RV64ZBB-NEXT:    ret
+  %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 true)
+  ret i64 %tmp
+}
+
+define i32 @test_ctpop_i32(i32 %a) nounwind {
+; RV32I-LABEL: test_ctpop_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    lui a2, 349525
+; RV32I-NEXT:    addi a2, a2, 1365
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 209715
+; RV32I-NEXT:    addi a1, a1, 819
+; RV32I-NEXT:    and a2, a0, a1
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 61681
+; RV32I-NEXT:    addi a1, a1, -241
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    lui a1, 4112
+; RV32I-NEXT:    addi a1, a1, 257
+; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_ctpop_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    lui a2, 349525
+; RV64I-NEXT:    addiw a2, a2, 1365
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    lui a1, 209715
+; RV64I-NEXT:    addiw a1, a1, 819
+; RV64I-NEXT:    and a2, a0, a1
+; RV64I-NEXT:    srli a0, a0, 2
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    lui a1, 61681
+; RV64I-NEXT:    addiw a1, a1, -241
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    lui a1, 4112
+; RV64I-NEXT:    addiw a1, a1, 257
+; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    srliw a0, a0, 24
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_ctpop_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    cpop a0, a0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_ctpop_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    cpopw a0, a0
+; RV64ZBB-NEXT:    ret
+  %1 = call i32 @llvm.ctpop.i32(i32 %a)
+  ret i32 %1
+}
+
+define i64 @test_ctpop_i64(i64 %a) nounwind {
+; RV32I-LABEL: test_ctpop_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    mv s0, a0
+; RV32I-NEXT:    srli a0, a1, 1
+; RV32I-NEXT:    lui a2, 349525
+; RV32I-NEXT:    addi s2, a2, 1365
+; RV32I-NEXT:    and a0, a0, s2
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    lui a1, 209715
+; RV32I-NEXT:    addi s3, a1, 819
+; RV32I-NEXT:    and a1, a0, s3
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, s3
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 61681
+; RV32I-NEXT:    addi s4, a1, -241
+; RV32I-NEXT:    and a0, a0, s4
+; RV32I-NEXT:    lui a1, 4112
+; RV32I-NEXT:    addi s1, a1, 257
+; RV32I-NEXT:    mv a1, s1
+; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    srli s5, a0, 24
+; RV32I-NEXT:    srli a0, s0, 1
+; RV32I-NEXT:    and a0, a0, s2
+; RV32I-NEXT:    sub a0, s0, a0
+; RV32I-NEXT:    and a1, a0, s3
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    and a0, a0, s3
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    and a0, a0, s4
+; RV32I-NEXT:    mv a1, s1
+; RV32I-NEXT:    call __mulsi3 at plt
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    add a0, a0, s5
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_ctpop_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    lui a1, %hi(.LCPI10_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI10_0)(a1)
+; RV64I-NEXT:    lui a2, %hi(.LCPI10_1)
+; RV64I-NEXT:    ld a2, %lo(.LCPI10_1)(a2)
+; RV64I-NEXT:    srli a3, a0, 1
+; RV64I-NEXT:    and a1, a3, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    and a1, a0, a2
+; RV64I-NEXT:    srli a0, a0, 2
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    lui a2, %hi(.LCPI10_2)
+; RV64I-NEXT:    ld a2, %lo(.LCPI10_2)(a2)
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    lui a1, %hi(.LCPI10_3)
+; RV64I-NEXT:    ld a1, %lo(.LCPI10_3)(a1)
+; RV64I-NEXT:    call __muldi3 at plt
+; RV64I-NEXT:    srli a0, a0, 56
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_ctpop_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    cpop a1, a1
+; RV32ZBB-NEXT:    cpop a0, a0
+; RV32ZBB-NEXT:    add a0, a0, a1
+; RV32ZBB-NEXT:    li a1, 0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_ctpop_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    cpop a0, a0
+; RV64ZBB-NEXT:    ret
+  %1 = call i64 @llvm.ctpop.i64(i64 %a)
+  ret i64 %1
+}
+
+define i32 @test_parity_i32(i32 %a) {
+; RV32I-LABEL: test_parity_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a1, a0, 16
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 8
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 2
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_parity_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 32
+; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    srliw a0, a0, 16
+; RV64I-NEXT:    xor a0, a1, a0
+; RV64I-NEXT:    srli a1, a0, 8
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 2
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_parity_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    cpop a0, a0
+; RV32ZBB-NEXT:    andi a0, a0, 1
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_parity_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    cpopw a0, a0
+; RV64ZBB-NEXT:    andi a0, a0, 1
+; RV64ZBB-NEXT:    ret
+  %1 = call i32 @llvm.ctpop.i32(i32 %a)
+  %2 = and i32 %1, 1
+  ret i32 %2
+}
+
+define i64 @test_parity_i64(i64 %a) {
+; RV32I-LABEL: test_parity_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 16
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 8
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 4
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 2
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    srli a1, a0, 1
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    andi a0, a0, 1
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: test_parity_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a1, a0, 32
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 16
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 8
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 4
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 2
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 1
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: test_parity_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    xor a0, a0, a1
+; RV32ZBB-NEXT:    cpop a0, a0
+; RV32ZBB-NEXT:    andi a0, a0, 1
+; RV32ZBB-NEXT:    li a1, 0
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: test_parity_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    cpop a0, a0
+; RV64ZBB-NEXT:    andi a0, a0, 1
+; RV64ZBB-NEXT:    ret
+  %1 = call i64 @llvm.ctpop.i64(i64 %a)
+  %2 = and i64 %1, 1
+  ret i64 %2
+}


        


More information about the llvm-commits mailing list