[llvm] e764e9e - [RISCV] Add i16 bswap and i8/i16 bitreverse tests to the Zbp tests. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Feb 14 19:00:57 PST 2021


Author: Craig Topper
Date: 2021-02-14T18:44:26-08:00
New Revision: e764e9e32c20914948787cc28996ecaab257628d

URL: https://github.com/llvm/llvm-project/commit/e764e9e32c20914948787cc28996ecaab257628d
DIFF: https://github.com/llvm/llvm-project/commit/e764e9e32c20914948787cc28996ecaab257628d.diff

LOG: [RISCV] Add i16 bswap and i8/i16 bitreverse tests to the Zbp tests. NFC

Maybe we should use GREVI directly for these rather than
promoting and then shifting right.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rv32Zbp.ll
    llvm/test/CodeGen/RISCV/rv64Zbp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rv32Zbp.ll b/llvm/test/CodeGen/RISCV/rv32Zbp.ll
index ec1720337dda..96b898fe2ce2 100644
--- a/llvm/test/CodeGen/RISCV/rv32Zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv32Zbp.ll
@@ -2338,6 +2338,34 @@ define i64 @grev16_i64(i64 %a) nounwind {
   ret i64 %or
 }
 
+declare i16 @llvm.bswap.i16(i16)
+
+define zeroext i16 @bswap_i16(i16 zeroext %a) nounwind {
+; RV32I-LABEL: bswap_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a1, a0, 8
+; RV32I-NEXT:    slli a0, a0, 8
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: bswap_i16:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    rev8 a0, a0
+; RV32IB-NEXT:    srli a0, a0, 16
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: bswap_i16:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    rev8 a0, a0
+; RV32IBP-NEXT:    srli a0, a0, 16
+; RV32IBP-NEXT:    ret
+  %1 = tail call i16 @llvm.bswap.i16(i16 %a)
+  ret i16 %1
+}
+
 declare i32 @llvm.bswap.i32(i32)
 
 define i32 @bswap_i32(i32 %a) nounwind {
@@ -2416,6 +2444,97 @@ define i64 @bswap_i64(i64 %a) {
   ret i64 %1
 }
 
+declare i8 @llvm.bitreverse.i8(i8)
+
+define zeroext i8 @bitreverse_i8(i8 zeroext %a) nounwind {
+; RV32I-LABEL: bitreverse_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 20
+; RV32I-NEXT:    lui a2, 61440
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    slli a0, a0, 28
+; RV32I-NEXT:    or a0, a1, a0
+; RV32I-NEXT:    lui a1, 208896
+; RV32I-NEXT:    and a1, a0, a1
+; RV32I-NEXT:    slli a1, a1, 2
+; RV32I-NEXT:    lui a2, 835584
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    lui a1, 348160
+; RV32I-NEXT:    and a1, a0, a1
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    lui a2, 696320
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    srli a0, a0, 24
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: bitreverse_i8:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    rev a0, a0
+; RV32IB-NEXT:    srli a0, a0, 24
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: bitreverse_i8:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    rev a0, a0
+; RV32IBP-NEXT:    srli a0, a0, 24
+; RV32IBP-NEXT:    ret
+  %1 = tail call i8 @llvm.bitreverse.i8(i8 %a)
+  ret i8 %1
+}
+
+declare i16 @llvm.bitreverse.i16(i16)
+
+define zeroext i16 @bitreverse_i16(i16 zeroext %a) nounwind {
+; RV32I-LABEL: bitreverse_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 8
+; RV32I-NEXT:    lui a2, 4080
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    lui a1, 61680
+; RV32I-NEXT:    and a1, a0, a1
+; RV32I-NEXT:    slli a1, a1, 4
+; RV32I-NEXT:    lui a2, 986880
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    srli a0, a0, 4
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    lui a1, 209712
+; RV32I-NEXT:    and a1, a0, a1
+; RV32I-NEXT:    slli a1, a1, 2
+; RV32I-NEXT:    lui a2, 838848
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    srli a0, a0, 2
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    lui a1, 349520
+; RV32I-NEXT:    and a1, a0, a1
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    lui a2, 699040
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    srli a0, a0, 16
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: bitreverse_i16:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    rev a0, a0
+; RV32IB-NEXT:    srli a0, a0, 16
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: bitreverse_i16:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    rev a0, a0
+; RV32IBP-NEXT:    srli a0, a0, 16
+; RV32IBP-NEXT:    ret
+  %1 = tail call i16 @llvm.bitreverse.i16(i16 %a)
+  ret i16 %1
+}
+
 declare i32 @llvm.bitreverse.i32(i32)
 
 define i32 @bitreverse_i32(i32 %a) nounwind {

diff  --git a/llvm/test/CodeGen/RISCV/rv64Zbp.ll b/llvm/test/CodeGen/RISCV/rv64Zbp.ll
index ee912dab8905..da0e11faee32 100644
--- a/llvm/test/CodeGen/RISCV/rv64Zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv64Zbp.ll
@@ -2670,6 +2670,34 @@ define i64 @grev32_fshr(i64 %a) nounwind {
   ret i64 %or
 }
 
+declare i16 @llvm.bswap.i16(i16)
+
+define zeroext i16 @bswap_i16(i16 zeroext %a) nounwind {
+; RV64I-LABEL: bswap_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a1, a0, 8
+; RV64I-NEXT:    slli a0, a0, 8
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: bswap_i16:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    rev8 a0, a0
+; RV64IB-NEXT:    srli a0, a0, 48
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: bswap_i16:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    rev8 a0, a0
+; RV64IBP-NEXT:    srli a0, a0, 48
+; RV64IBP-NEXT:    ret
+  %1 = tail call i16 @llvm.bswap.i16(i16 %a)
+  ret i16 %1
+}
+
 declare i32 @llvm.bswap.i32(i32)
 
 define signext i32 @bswap_i32(i32 signext %a) nounwind {
@@ -2787,6 +2815,114 @@ define i64 @bswap_i64(i64 %a) {
   ret i64 %1
 }
 
+declare i8 @llvm.bitreverse.i8(i8)
+
+define zeroext i8 @bitreverse_i8(i8 zeroext %a) nounwind {
+; RV64I-LABEL: bitreverse_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 52
+; RV64I-NEXT:    addi a2, zero, 15
+; RV64I-NEXT:    slli a2, a2, 56
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    slli a0, a0, 60
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    addi a1, zero, 51
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    slli a1, a1, 2
+; RV64I-NEXT:    addi a2, zero, -13
+; RV64I-NEXT:    slli a2, a2, 58
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    srli a0, a0, 2
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    addi a1, zero, 85
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    slli a1, a1, 1
+; RV64I-NEXT:    addi a2, zero, -43
+; RV64I-NEXT:    slli a2, a2, 57
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    srli a0, a0, 1
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    srli a0, a0, 56
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: bitreverse_i8:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    rev a0, a0
+; RV64IB-NEXT:    srli a0, a0, 56
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: bitreverse_i8:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    rev a0, a0
+; RV64IBP-NEXT:    srli a0, a0, 56
+; RV64IBP-NEXT:    ret
+  %1 = tail call i8 @llvm.bitreverse.i8(i8 %a)
+  ret i8 %1
+}
+
+declare i16 @llvm.bitreverse.i16(i16)
+
+define zeroext i16 @bitreverse_i16(i16 zeroext %a) nounwind {
+; RV64I-LABEL: bitreverse_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 40
+; RV64I-NEXT:    addi a2, zero, 255
+; RV64I-NEXT:    slli a2, a2, 48
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    lui a1, 1
+; RV64I-NEXT:    addiw a1, a1, -241
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    slli a1, a1, 4
+; RV64I-NEXT:    addi a2, zero, -241
+; RV64I-NEXT:    slli a2, a2, 52
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    srli a0, a0, 4
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    lui a1, 3
+; RV64I-NEXT:    addiw a1, a1, 819
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    slli a1, a1, 2
+; RV64I-NEXT:    lui a2, 1048575
+; RV64I-NEXT:    addiw a2, a2, 819
+; RV64I-NEXT:    slli a2, a2, 50
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    srli a0, a0, 2
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    lui a1, 5
+; RV64I-NEXT:    addiw a1, a1, 1365
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    slli a1, a1, 1
+; RV64I-NEXT:    lui a2, 1048573
+; RV64I-NEXT:    addiw a2, a2, 1365
+; RV64I-NEXT:    slli a2, a2, 49
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    srli a0, a0, 1
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    srli a0, a0, 48
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: bitreverse_i16:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    rev a0, a0
+; RV64IB-NEXT:    srli a0, a0, 48
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: bitreverse_i16:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    rev a0, a0
+; RV64IBP-NEXT:    srli a0, a0, 48
+; RV64IBP-NEXT:    ret
+  %1 = tail call i16 @llvm.bitreverse.i16(i16 %a)
+  ret i16 %1
+}
+
 declare i32 @llvm.bitreverse.i32(i32)
 
 define signext i32 @bitreverse_i32(i32 signext %a) nounwind {


        


More information about the llvm-commits mailing list