[llvm] 35564ff - Revert "[RISCV] Add test cases showing failure to remove mask on rotate amounts."

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu May 19 14:39:56 PDT 2022


Author: Craig Topper
Date: 2022-05-19T14:39:38-07:00
New Revision: 35564fff67bbb94f84e10afd20a653dfc4b787ef

URL: https://github.com/llvm/llvm-project/commit/35564fff67bbb94f84e10afd20a653dfc4b787ef
DIFF: https://github.com/llvm/llvm-project/commit/35564fff67bbb94f84e10afd20a653dfc4b787ef.diff

LOG: Revert "[RISCV] Add test cases showing failure to remove mask on rotate amounts."

This reverts commit e2f410feeab27a8bb2c015fc02bb8527702e401f.

This exposes a pre-existing bug in type legalization that is failing
expensive checks.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rotl-rotr.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/rotl-rotr.ll
index a7f7c38409a5..fd5437e3cb14 100644
--- a/llvm/test/CodeGen/RISCV/rotl-rotr.ll
+++ b/llvm/test/CodeGen/RISCV/rotl-rotr.ll
@@ -550,333 +550,3 @@ define i64 @rotr_64_mask(i64 %x, i64 %y) nounwind {
   %d = or i64 %b, %c
   ret i64 %d
 }
-
-define signext i32 @rotl_32_mask_multiple(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind {
-; RV32I-LABEL: rotl_32_mask_multiple:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    sll a3, a0, a2
-; RV32I-NEXT:    neg a4, a2
-; RV32I-NEXT:    srl a0, a0, a4
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    sll a2, a1, a2
-; RV32I-NEXT:    srl a1, a1, a4
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: rotl_32_mask_multiple:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    sllw a3, a0, a2
-; RV64I-NEXT:    negw a4, a2
-; RV64I-NEXT:    srlw a0, a0, a4
-; RV64I-NEXT:    or a0, a3, a0
-; RV64I-NEXT:    sllw a2, a1, a2
-; RV64I-NEXT:    srlw a1, a1, a4
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    addw a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: rotl_32_mask_multiple:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    andi a2, a2, 31
-; RV32ZBB-NEXT:    rol a0, a0, a2
-; RV32ZBB-NEXT:    rol a1, a1, a2
-; RV32ZBB-NEXT:    add a0, a0, a1
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: rotl_32_mask_multiple:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    andi a2, a2, 31
-; RV64ZBB-NEXT:    rolw a0, a0, a2
-; RV64ZBB-NEXT:    rolw a1, a1, a2
-; RV64ZBB-NEXT:    addw a0, a0, a1
-; RV64ZBB-NEXT:    ret
-  %maskedamt = and i32 %amt, 31
-  %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %maskedamt)
-  %2 = tail call i32 @llvm.fshl.i32(i32 %b, i32 %b, i32 %maskedamt)
-  %3 = add i32 %1, %2
-  ret i32 %3
-}
-declare i32 @llvm.fshl.i32(i32, i32, i32)
-
-define i64 @rotl_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind {
-; RV32I-LABEL: rotl_64_mask_multiple:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a5, a4, 26
-; RV32I-NEXT:    srli a5, a5, 31
-; RV32I-NEXT:    mv a6, a1
-; RV32I-NEXT:    bnez a5, .LBB9_2
-; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    mv a6, a0
-; RV32I-NEXT:  .LBB9_2:
-; RV32I-NEXT:    bnez a5, .LBB9_4
-; RV32I-NEXT:  # %bb.3:
-; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:  .LBB9_4:
-; RV32I-NEXT:    sll a7, a6, a4
-; RV32I-NEXT:    srli t0, a0, 1
-; RV32I-NEXT:    not a1, a4
-; RV32I-NEXT:    srl t0, t0, a1
-; RV32I-NEXT:    sll t1, a0, a4
-; RV32I-NEXT:    srli a0, a6, 1
-; RV32I-NEXT:    srl t2, a0, a1
-; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    bnez a5, .LBB9_6
-; RV32I-NEXT:  # %bb.5:
-; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:  .LBB9_6:
-; RV32I-NEXT:    or a6, a7, t0
-; RV32I-NEXT:    or a7, t1, t2
-; RV32I-NEXT:    sll t0, a0, a4
-; RV32I-NEXT:    bnez a5, .LBB9_8
-; RV32I-NEXT:  # %bb.7:
-; RV32I-NEXT:    mv a2, a3
-; RV32I-NEXT:  .LBB9_8:
-; RV32I-NEXT:    srli a3, a2, 1
-; RV32I-NEXT:    srl a3, a3, a1
-; RV32I-NEXT:    or a3, t0, a3
-; RV32I-NEXT:    sll a2, a2, a4
-; RV32I-NEXT:    srli a0, a0, 1
-; RV32I-NEXT:    srl a0, a0, a1
-; RV32I-NEXT:    or a0, a2, a0
-; RV32I-NEXT:    add a1, a7, a0
-; RV32I-NEXT:    add a0, a6, a3
-; RV32I-NEXT:    sltu a2, a0, a6
-; RV32I-NEXT:    add a1, a1, a2
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: rotl_64_mask_multiple:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    sll a3, a0, a2
-; RV64I-NEXT:    neg a4, a2
-; RV64I-NEXT:    srl a0, a0, a4
-; RV64I-NEXT:    or a0, a3, a0
-; RV64I-NEXT:    sll a2, a1, a2
-; RV64I-NEXT:    srl a1, a1, a4
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: rotl_64_mask_multiple:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    slli a5, a4, 26
-; RV32ZBB-NEXT:    srli a5, a5, 31
-; RV32ZBB-NEXT:    mv a6, a1
-; RV32ZBB-NEXT:    bnez a5, .LBB9_2
-; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    mv a6, a0
-; RV32ZBB-NEXT:  .LBB9_2:
-; RV32ZBB-NEXT:    bnez a5, .LBB9_4
-; RV32ZBB-NEXT:  # %bb.3:
-; RV32ZBB-NEXT:    mv a0, a1
-; RV32ZBB-NEXT:  .LBB9_4:
-; RV32ZBB-NEXT:    sll a7, a6, a4
-; RV32ZBB-NEXT:    srli t0, a0, 1
-; RV32ZBB-NEXT:    not a1, a4
-; RV32ZBB-NEXT:    srl t0, t0, a1
-; RV32ZBB-NEXT:    sll t1, a0, a4
-; RV32ZBB-NEXT:    srli a0, a6, 1
-; RV32ZBB-NEXT:    srl t2, a0, a1
-; RV32ZBB-NEXT:    mv a0, a3
-; RV32ZBB-NEXT:    bnez a5, .LBB9_6
-; RV32ZBB-NEXT:  # %bb.5:
-; RV32ZBB-NEXT:    mv a0, a2
-; RV32ZBB-NEXT:  .LBB9_6:
-; RV32ZBB-NEXT:    or a6, a7, t0
-; RV32ZBB-NEXT:    or a7, t1, t2
-; RV32ZBB-NEXT:    sll t0, a0, a4
-; RV32ZBB-NEXT:    bnez a5, .LBB9_8
-; RV32ZBB-NEXT:  # %bb.7:
-; RV32ZBB-NEXT:    mv a2, a3
-; RV32ZBB-NEXT:  .LBB9_8:
-; RV32ZBB-NEXT:    srli a3, a2, 1
-; RV32ZBB-NEXT:    srl a3, a3, a1
-; RV32ZBB-NEXT:    or a3, t0, a3
-; RV32ZBB-NEXT:    sll a2, a2, a4
-; RV32ZBB-NEXT:    srli a0, a0, 1
-; RV32ZBB-NEXT:    srl a0, a0, a1
-; RV32ZBB-NEXT:    or a0, a2, a0
-; RV32ZBB-NEXT:    add a1, a7, a0
-; RV32ZBB-NEXT:    add a0, a6, a3
-; RV32ZBB-NEXT:    sltu a2, a0, a6
-; RV32ZBB-NEXT:    add a1, a1, a2
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: rotl_64_mask_multiple:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    andi a2, a2, 63
-; RV64ZBB-NEXT:    rol a0, a0, a2
-; RV64ZBB-NEXT:    rol a1, a1, a2
-; RV64ZBB-NEXT:    add a0, a0, a1
-; RV64ZBB-NEXT:    ret
-  %maskedamt = and i64 %amt, 63
-  %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %maskedamt)
-  %2 = tail call i64 @llvm.fshl.i64(i64 %b, i64 %b, i64 %maskedamt)
-  %3 = add i64 %1, %2
-  ret i64 %3
-}
-declare i64 @llvm.fshl.i64(i64, i64, i64)
-
-define signext i32 @rotr_32_mask_multiple(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind {
-; RV32I-LABEL: rotr_32_mask_multiple:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srl a3, a0, a2
-; RV32I-NEXT:    neg a4, a2
-; RV32I-NEXT:    sll a0, a0, a4
-; RV32I-NEXT:    or a0, a3, a0
-; RV32I-NEXT:    srl a2, a1, a2
-; RV32I-NEXT:    sll a1, a1, a4
-; RV32I-NEXT:    or a1, a2, a1
-; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: rotr_32_mask_multiple:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srlw a3, a0, a2
-; RV64I-NEXT:    negw a4, a2
-; RV64I-NEXT:    sllw a0, a0, a4
-; RV64I-NEXT:    or a0, a3, a0
-; RV64I-NEXT:    srlw a2, a1, a2
-; RV64I-NEXT:    sllw a1, a1, a4
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    addw a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: rotr_32_mask_multiple:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    andi a2, a2, 31
-; RV32ZBB-NEXT:    ror a0, a0, a2
-; RV32ZBB-NEXT:    ror a1, a1, a2
-; RV32ZBB-NEXT:    add a0, a0, a1
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: rotr_32_mask_multiple:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    andi a2, a2, 31
-; RV64ZBB-NEXT:    rorw a0, a0, a2
-; RV64ZBB-NEXT:    rorw a1, a1, a2
-; RV64ZBB-NEXT:    addw a0, a0, a1
-; RV64ZBB-NEXT:    ret
-  %maskedamt = and i32 %amt, 31
-  %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %maskedamt)
-  %2 = tail call i32 @llvm.fshr.i32(i32 %b, i32 %b, i32 %maskedamt)
-  %3 = add i32 %1, %2
-  ret i32 %3
-}
-declare i32 @llvm.fshr.i32(i32, i32, i32)
-
-define i64 @rotr_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind {
-; RV32I-LABEL: rotr_64_mask_multiple:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    andi a5, a4, 32
-; RV32I-NEXT:    mv a6, a0
-; RV32I-NEXT:    beqz a5, .LBB11_2
-; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    mv a6, a1
-; RV32I-NEXT:  .LBB11_2:
-; RV32I-NEXT:    beqz a5, .LBB11_4
-; RV32I-NEXT:  # %bb.3:
-; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:  .LBB11_4:
-; RV32I-NEXT:    srl a7, a6, a4
-; RV32I-NEXT:    slli t0, a1, 1
-; RV32I-NEXT:    not a0, a4
-; RV32I-NEXT:    sll t0, t0, a0
-; RV32I-NEXT:    srl t1, a1, a4
-; RV32I-NEXT:    slli a1, a6, 1
-; RV32I-NEXT:    sll t2, a1, a0
-; RV32I-NEXT:    mv a6, a2
-; RV32I-NEXT:    beqz a5, .LBB11_6
-; RV32I-NEXT:  # %bb.5:
-; RV32I-NEXT:    mv a6, a3
-; RV32I-NEXT:  .LBB11_6:
-; RV32I-NEXT:    or a1, t0, a7
-; RV32I-NEXT:    or a7, t2, t1
-; RV32I-NEXT:    srl t0, a6, a4
-; RV32I-NEXT:    beqz a5, .LBB11_8
-; RV32I-NEXT:  # %bb.7:
-; RV32I-NEXT:    mv a3, a2
-; RV32I-NEXT:  .LBB11_8:
-; RV32I-NEXT:    slli a2, a3, 1
-; RV32I-NEXT:    sll a2, a2, a0
-; RV32I-NEXT:    or a2, a2, t0
-; RV32I-NEXT:    srl a3, a3, a4
-; RV32I-NEXT:    slli a4, a6, 1
-; RV32I-NEXT:    sll a0, a4, a0
-; RV32I-NEXT:    or a0, a0, a3
-; RV32I-NEXT:    add a3, a7, a0
-; RV32I-NEXT:    add a0, a1, a2
-; RV32I-NEXT:    sltu a1, a0, a1
-; RV32I-NEXT:    add a1, a3, a1
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: rotr_64_mask_multiple:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srl a3, a0, a2
-; RV64I-NEXT:    neg a4, a2
-; RV64I-NEXT:    sll a0, a0, a4
-; RV64I-NEXT:    or a0, a3, a0
-; RV64I-NEXT:    srl a2, a1, a2
-; RV64I-NEXT:    sll a1, a1, a4
-; RV64I-NEXT:    or a1, a2, a1
-; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV32ZBB-LABEL: rotr_64_mask_multiple:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    andi a5, a4, 32
-; RV32ZBB-NEXT:    mv a6, a0
-; RV32ZBB-NEXT:    beqz a5, .LBB11_2
-; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    mv a6, a1
-; RV32ZBB-NEXT:  .LBB11_2:
-; RV32ZBB-NEXT:    beqz a5, .LBB11_4
-; RV32ZBB-NEXT:  # %bb.3:
-; RV32ZBB-NEXT:    mv a1, a0
-; RV32ZBB-NEXT:  .LBB11_4:
-; RV32ZBB-NEXT:    srl a7, a6, a4
-; RV32ZBB-NEXT:    slli t0, a1, 1
-; RV32ZBB-NEXT:    not a0, a4
-; RV32ZBB-NEXT:    sll t0, t0, a0
-; RV32ZBB-NEXT:    srl t1, a1, a4
-; RV32ZBB-NEXT:    slli a1, a6, 1
-; RV32ZBB-NEXT:    sll t2, a1, a0
-; RV32ZBB-NEXT:    mv a6, a2
-; RV32ZBB-NEXT:    beqz a5, .LBB11_6
-; RV32ZBB-NEXT:  # %bb.5:
-; RV32ZBB-NEXT:    mv a6, a3
-; RV32ZBB-NEXT:  .LBB11_6:
-; RV32ZBB-NEXT:    or a1, t0, a7
-; RV32ZBB-NEXT:    or a7, t2, t1
-; RV32ZBB-NEXT:    srl t0, a6, a4
-; RV32ZBB-NEXT:    beqz a5, .LBB11_8
-; RV32ZBB-NEXT:  # %bb.7:
-; RV32ZBB-NEXT:    mv a3, a2
-; RV32ZBB-NEXT:  .LBB11_8:
-; RV32ZBB-NEXT:    slli a2, a3, 1
-; RV32ZBB-NEXT:    sll a2, a2, a0
-; RV32ZBB-NEXT:    or a2, a2, t0
-; RV32ZBB-NEXT:    srl a3, a3, a4
-; RV32ZBB-NEXT:    slli a4, a6, 1
-; RV32ZBB-NEXT:    sll a0, a4, a0
-; RV32ZBB-NEXT:    or a0, a0, a3
-; RV32ZBB-NEXT:    add a3, a7, a0
-; RV32ZBB-NEXT:    add a0, a1, a2
-; RV32ZBB-NEXT:    sltu a1, a0, a1
-; RV32ZBB-NEXT:    add a1, a3, a1
-; RV32ZBB-NEXT:    ret
-;
-; RV64ZBB-LABEL: rotr_64_mask_multiple:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    andi a2, a2, 63
-; RV64ZBB-NEXT:    ror a0, a0, a2
-; RV64ZBB-NEXT:    ror a1, a1, a2
-; RV64ZBB-NEXT:    add a0, a0, a1
-; RV64ZBB-NEXT:    ret
-  %maskedamt = and i64 %amt, 63
-  %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %maskedamt)
-  %2 = tail call i64 @llvm.fshr.i64(i64 %b, i64 %b, i64 %maskedamt)
-  %3 = add i64 %1, %2
-  ret i64 %3
-}
-declare i64 @llvm.fshr.i64(i64, i64, i64)


        


More information about the llvm-commits mailing list