[llvm] 5547d69 - [RISCV] Add more rev32 and rev16 test cases using fshl/fshr intrinsics. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 1 20:39:14 PST 2020


Author: Craig Topper
Date: 2020-11-01T20:30:55-08:00
New Revision: 5547d69e541d5587c7982fed4753f14c4ee85f35

URL: https://github.com/llvm/llvm-project/commit/5547d69e541d5587c7982fed4753f14c4ee85f35
DIFF: https://github.com/llvm/llvm-project/commit/5547d69e541d5587c7982fed4753f14c4ee85f35.diff

LOG: [RISCV] Add more rev32 and rev16 test cases using fshl/fshr intrinsics. NFC

fshl/fshr intrinsics turn into rotl/rotr ISD opcodes and we don't
have a complete set of patterns.

We pattern match rotl, but we have a custom match for rori that gets
priority. We don't pattern match rotr and we don't have patterns
or custom code for rori from rotr.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rv32Zbp.ll
    llvm/test/CodeGen/RISCV/rv64Zbp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rv32Zbp.ll b/llvm/test/CodeGen/RISCV/rv32Zbp.ll
index 8769ce77337c..ff2e903bb541 100644
--- a/llvm/test/CodeGen/RISCV/rv32Zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv32Zbp.ll
@@ -659,6 +659,53 @@ define i32 @grev16_i32(i32 %a) nounwind {
   ret i32 %or
 }
 
+declare i32 @llvm.fshl.i32(i32, i32, i32)
+declare i32 @llvm.fshr.i32(i32, i32, i32)
+
+define signext i32 @grev16_i32_fshl(i32 signext %a) nounwind {
+; RV32I-LABEL: grev16_i32_fshl:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a1, a0, 16
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: grev16_i32_fshl:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    rori a0, a0, 16
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: grev16_i32_fshl:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    rori a0, a0, 16
+; RV32IBP-NEXT:    ret
+  %or = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 16)
+  ret i32 %or
+}
+
+define signext i32 @grev16_i32_fshr(i32 signext %a) nounwind {
+; RV32I-LABEL: grev16_i32_fshr:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32IB-LABEL: grev16_i32_fshr:
+; RV32IB:       # %bb.0:
+; RV32IB-NEXT:    addi a1, zero, 16
+; RV32IB-NEXT:    ror a0, a0, a1
+; RV32IB-NEXT:    ret
+;
+; RV32IBP-LABEL: grev16_i32_fshr:
+; RV32IBP:       # %bb.0:
+; RV32IBP-NEXT:    addi a1, zero, 16
+; RV32IBP-NEXT:    ror a0, a0, a1
+; RV32IBP-NEXT:    ret
+  %or = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 16)
+  ret i32 %or
+}
+
 define i64 @grev16_i64(i64 %a) nounwind {
 ; RV32I-LABEL: grev16_i64:
 ; RV32I:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/rv64Zbp.ll b/llvm/test/CodeGen/RISCV/rv64Zbp.ll
index ae467efaab83..17c399ddcecd 100644
--- a/llvm/test/CodeGen/RISCV/rv64Zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv64Zbp.ll
@@ -745,6 +745,53 @@ define signext i32 @grev16_i32(i32 signext %a) nounwind {
   ret i32 %or
 }
 
+declare i32 @llvm.fshl.i32(i32, i32, i32)
+declare i32 @llvm.fshr.i32(i32, i32, i32)
+
+define signext i32 @grev16_i32_fshl(i32 signext %a) nounwind {
+; RV64I-LABEL: grev16_i32_fshl:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srliw a1, a0, 16
+; RV64I-NEXT:    slli a0, a0, 16
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: grev16_i32_fshl:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    greviw a0, a0, 16
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: grev16_i32_fshl:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    greviw a0, a0, 16
+; RV64IBP-NEXT:    ret
+  %or = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 16)
+  ret i32 %or
+}
+
+define signext i32 @grev16_i32_fshr(i32 signext %a) nounwind {
+; RV64I-LABEL: grev16_i32_fshr:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 16
+; RV64I-NEXT:    srliw a0, a0, 16
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: grev16_i32_fshr:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    greviw a0, a0, 16
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: grev16_i32_fshr:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    greviw a0, a0, 16
+; RV64IBP-NEXT:    ret
+  %or = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 16)
+  ret i32 %or
+}
+
 define i64 @grev16_i64(i64 %a) nounwind {
 ; RV64I-LABEL: grev16_i64:
 ; RV64I:       # %bb.0:
@@ -806,6 +853,53 @@ define i64 @grev32(i64 %a) nounwind {
   ret i64 %or
 }
 
+declare i64 @llvm.fshl.i64(i64, i64, i64)
+declare i64 @llvm.fshr.i64(i64, i64, i64)
+
+define i64 @grev32_fshl(i64 %a) nounwind {
+; RV64I-LABEL: grev32_fshl:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a1, a0, 32
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: grev32_fshl:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    rori a0, a0, 32
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: grev32_fshl:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    rori a0, a0, 32
+; RV64IBP-NEXT:    ret
+  %or = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 32)
+  ret i64 %or
+}
+
+define i64 @grev32_fshr(i64 %a) nounwind {
+; RV64I-LABEL: grev32_fshr:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64IB-LABEL: grev32_fshr:
+; RV64IB:       # %bb.0:
+; RV64IB-NEXT:    addi a1, zero, 32
+; RV64IB-NEXT:    ror a0, a0, a1
+; RV64IB-NEXT:    ret
+;
+; RV64IBP-LABEL: grev32_fshr:
+; RV64IBP:       # %bb.0:
+; RV64IBP-NEXT:    addi a1, zero, 32
+; RV64IBP-NEXT:    ror a0, a0, a1
+; RV64IBP-NEXT:    ret
+  %or = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 32)
+  ret i64 %or
+}
+
 declare i32 @llvm.bswap.i32(i32)
 
 define signext i32 @bswap_i32(i32 signext %a) nounwind {


        


More information about the llvm-commits mailing list