[llvm] 460781f - [LegalizeTypes] Fix bug in expensive checks verification

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu May 26 13:13:43 PDT 2022


Author: Craig Topper
Date: 2022-05-26T13:13:32-07:00
New Revision: 460781feef56bdff3e3a88fb76b427a88795641b

URL: https://github.com/llvm/llvm-project/commit/460781feef56bdff3e3a88fb76b427a88795641b
DIFF: https://github.com/llvm/llvm-project/commit/460781feef56bdff3e3a88fb76b427a88795641b.diff

LOG: [LegalizeTypes] Fix bug in expensive checks verification

With a fix for an expensive checks build failure exposed by new RISC-V tests.
Something about expanding two rotates in type legalization caused a change
in the remapping tables that the expensive checks verifying wasn't expecting.
See comment in the code for how it was fixed.

Tests came from this commit that exposed the bug
[RISCV] Add test cases showing failure to remove mask on rotate amounts.

If the masking AND has multiple users we fail to remove it.

Reviewed By: RKSimon

Differential Revision: https://reviews.llvm.org/D126036

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
    llvm/test/CodeGen/RISCV/rotl-rotr.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index 7b7df0b4f6289..8fe9a83b9c3d6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -143,8 +143,16 @@ void DAGTypeLegalizer::PerformExpensiveChecks() {
         }
       } else {
         if (Mapped == 0) {
-          dbgs() << "Processed value not in any map!";
-          Failed = true;
+          SDValue NodeById = IdToValueMap.lookup(ResId);
+          // It is possible the node has been remapped to another node and had
+          // its Id updated in the Value to Id table. The node it remapped to
+          // may not have been processed yet. Look up the Id in the Id to Value
+          // table and re-check the Processed state. If the node hasn't been
+          // remapped we'll get the same state as we got earlier.
+          if (NodeById->getNodeId() == Processed) {
+            dbgs() << "Processed value not in any map!";
+            Failed = true;
+          }
         } else if (Mapped & (Mapped - 1)) {
           dbgs() << "Value in multiple maps!";
           Failed = true;

diff  --git a/llvm/test/CodeGen/RISCV/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/rotl-rotr.ll
index 58ec0b0b0c170..6e8b73fc98798 100644
--- a/llvm/test/CodeGen/RISCV/rotl-rotr.ll
+++ b/llvm/test/CodeGen/RISCV/rotl-rotr.ll
@@ -1,13 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32I
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs -enable-legalize-types-checking < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64I
 ; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32ZBB
 ; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64ZBB
 
+; NOTE: -enable-legalize-types-checking is on one command line due to a previous
+; assertion failure on an expensive checks build for @rotr_32_mask_multiple.
+
 ; These IR sequences are idioms for rotates. If rotate instructions are
 ; supported, they will be turned into ISD::ROTL or ISD::ROTR.
 
@@ -856,3 +859,323 @@ define signext i64 @rotr_64_mask_shared(i64 signext %a, i64 signext %b, i64 sign
   ret i64 %3
 }
 declare i64 @llvm.fshr.i64(i64, i64, i64)
+
+define signext i32 @rotl_32_mask_multiple(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind {
+; RV32I-LABEL: rotl_32_mask_multiple:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sll a3, a0, a2
+; RV32I-NEXT:    neg a4, a2
+; RV32I-NEXT:    srl a0, a0, a4
+; RV32I-NEXT:    or a0, a3, a0
+; RV32I-NEXT:    sll a2, a1, a2
+; RV32I-NEXT:    srl a1, a1, a4
+; RV32I-NEXT:    or a1, a2, a1
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: rotl_32_mask_multiple:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sllw a3, a0, a2
+; RV64I-NEXT:    negw a4, a2
+; RV64I-NEXT:    srlw a0, a0, a4
+; RV64I-NEXT:    or a0, a3, a0
+; RV64I-NEXT:    sllw a2, a1, a2
+; RV64I-NEXT:    srlw a1, a1, a4
+; RV64I-NEXT:    or a1, a2, a1
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: rotl_32_mask_multiple:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    rol a0, a0, a2
+; RV32ZBB-NEXT:    rol a1, a1, a2
+; RV32ZBB-NEXT:    add a0, a0, a1
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: rotl_32_mask_multiple:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    rolw a0, a0, a2
+; RV64ZBB-NEXT:    rolw a1, a1, a2
+; RV64ZBB-NEXT:    addw a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %maskedamt = and i32 %amt, 31
+  %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %maskedamt)
+  %2 = tail call i32 @llvm.fshl.i32(i32 %b, i32 %b, i32 %maskedamt)
+  %3 = add i32 %1, %2
+  ret i32 %3
+}
+
+define i64 @rotl_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind {
+; RV32I-LABEL: rotl_64_mask_multiple:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a5, a4, 26
+; RV32I-NEXT:    srli a5, a5, 31
+; RV32I-NEXT:    mv a6, a1
+; RV32I-NEXT:    bnez a5, .LBB13_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a6, a0
+; RV32I-NEXT:  .LBB13_2:
+; RV32I-NEXT:    bnez a5, .LBB13_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:  .LBB13_4:
+; RV32I-NEXT:    sll a7, a6, a4
+; RV32I-NEXT:    srli t0, a0, 1
+; RV32I-NEXT:    not a1, a4
+; RV32I-NEXT:    srl t0, t0, a1
+; RV32I-NEXT:    sll t1, a0, a4
+; RV32I-NEXT:    srli a0, a6, 1
+; RV32I-NEXT:    srl t2, a0, a1
+; RV32I-NEXT:    mv a0, a3
+; RV32I-NEXT:    bnez a5, .LBB13_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:  .LBB13_6:
+; RV32I-NEXT:    or a6, a7, t0
+; RV32I-NEXT:    or a7, t1, t2
+; RV32I-NEXT:    sll t0, a0, a4
+; RV32I-NEXT:    bnez a5, .LBB13_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    mv a2, a3
+; RV32I-NEXT:  .LBB13_8:
+; RV32I-NEXT:    srli a3, a2, 1
+; RV32I-NEXT:    srl a3, a3, a1
+; RV32I-NEXT:    or a3, t0, a3
+; RV32I-NEXT:    sll a2, a2, a4
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    srl a0, a0, a1
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    add a1, a7, a0
+; RV32I-NEXT:    add a0, a6, a3
+; RV32I-NEXT:    sltu a2, a0, a6
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: rotl_64_mask_multiple:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sll a3, a0, a2
+; RV64I-NEXT:    neg a4, a2
+; RV64I-NEXT:    srl a0, a0, a4
+; RV64I-NEXT:    or a0, a3, a0
+; RV64I-NEXT:    sll a2, a1, a2
+; RV64I-NEXT:    srl a1, a1, a4
+; RV64I-NEXT:    or a1, a2, a1
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: rotl_64_mask_multiple:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    slli a5, a4, 26
+; RV32ZBB-NEXT:    srli a5, a5, 31
+; RV32ZBB-NEXT:    mv a6, a1
+; RV32ZBB-NEXT:    bnez a5, .LBB13_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    mv a6, a0
+; RV32ZBB-NEXT:  .LBB13_2:
+; RV32ZBB-NEXT:    bnez a5, .LBB13_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    mv a0, a1
+; RV32ZBB-NEXT:  .LBB13_4:
+; RV32ZBB-NEXT:    sll a7, a6, a4
+; RV32ZBB-NEXT:    srli t0, a0, 1
+; RV32ZBB-NEXT:    not a1, a4
+; RV32ZBB-NEXT:    srl t0, t0, a1
+; RV32ZBB-NEXT:    sll t1, a0, a4
+; RV32ZBB-NEXT:    srli a0, a6, 1
+; RV32ZBB-NEXT:    srl t2, a0, a1
+; RV32ZBB-NEXT:    mv a0, a3
+; RV32ZBB-NEXT:    bnez a5, .LBB13_6
+; RV32ZBB-NEXT:  # %bb.5:
+; RV32ZBB-NEXT:    mv a0, a2
+; RV32ZBB-NEXT:  .LBB13_6:
+; RV32ZBB-NEXT:    or a6, a7, t0
+; RV32ZBB-NEXT:    or a7, t1, t2
+; RV32ZBB-NEXT:    sll t0, a0, a4
+; RV32ZBB-NEXT:    bnez a5, .LBB13_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    mv a2, a3
+; RV32ZBB-NEXT:  .LBB13_8:
+; RV32ZBB-NEXT:    srli a3, a2, 1
+; RV32ZBB-NEXT:    srl a3, a3, a1
+; RV32ZBB-NEXT:    or a3, t0, a3
+; RV32ZBB-NEXT:    sll a2, a2, a4
+; RV32ZBB-NEXT:    srli a0, a0, 1
+; RV32ZBB-NEXT:    srl a0, a0, a1
+; RV32ZBB-NEXT:    or a0, a2, a0
+; RV32ZBB-NEXT:    add a1, a7, a0
+; RV32ZBB-NEXT:    add a0, a6, a3
+; RV32ZBB-NEXT:    sltu a2, a0, a6
+; RV32ZBB-NEXT:    add a1, a1, a2
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: rotl_64_mask_multiple:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    rol a0, a0, a2
+; RV64ZBB-NEXT:    rol a1, a1, a2
+; RV64ZBB-NEXT:    add a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %maskedamt = and i64 %amt, 63
+  %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %maskedamt)
+  %2 = tail call i64 @llvm.fshl.i64(i64 %b, i64 %b, i64 %maskedamt)
+  %3 = add i64 %1, %2
+  ret i64 %3
+}
+
+define signext i32 @rotr_32_mask_multiple(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind {
+; RV32I-LABEL: rotr_32_mask_multiple:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srl a3, a0, a2
+; RV32I-NEXT:    neg a4, a2
+; RV32I-NEXT:    sll a0, a0, a4
+; RV32I-NEXT:    or a0, a3, a0
+; RV32I-NEXT:    srl a2, a1, a2
+; RV32I-NEXT:    sll a1, a1, a4
+; RV32I-NEXT:    or a1, a2, a1
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: rotr_32_mask_multiple:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srlw a3, a0, a2
+; RV64I-NEXT:    negw a4, a2
+; RV64I-NEXT:    sllw a0, a0, a4
+; RV64I-NEXT:    or a0, a3, a0
+; RV64I-NEXT:    srlw a2, a1, a2
+; RV64I-NEXT:    sllw a1, a1, a4
+; RV64I-NEXT:    or a1, a2, a1
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: rotr_32_mask_multiple:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    ror a0, a0, a2
+; RV32ZBB-NEXT:    ror a1, a1, a2
+; RV32ZBB-NEXT:    add a0, a0, a1
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: rotr_32_mask_multiple:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    rorw a0, a0, a2
+; RV64ZBB-NEXT:    rorw a1, a1, a2
+; RV64ZBB-NEXT:    addw a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %maskedamt = and i32 %amt, 31
+  %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %maskedamt)
+  %2 = tail call i32 @llvm.fshr.i32(i32 %b, i32 %b, i32 %maskedamt)
+  %3 = add i32 %1, %2
+  ret i32 %3
+}
+
+define i64 @rotr_64_mask_multiple(i64 %a, i64 %b, i64 %amt) nounwind {
+; RV32I-LABEL: rotr_64_mask_multiple:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a5, a4, 32
+; RV32I-NEXT:    mv a6, a0
+; RV32I-NEXT:    beqz a5, .LBB15_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a6, a1
+; RV32I-NEXT:  .LBB15_2:
+; RV32I-NEXT:    beqz a5, .LBB15_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    mv a1, a0
+; RV32I-NEXT:  .LBB15_4:
+; RV32I-NEXT:    srl a7, a6, a4
+; RV32I-NEXT:    slli t0, a1, 1
+; RV32I-NEXT:    not a0, a4
+; RV32I-NEXT:    sll t0, t0, a0
+; RV32I-NEXT:    srl t1, a1, a4
+; RV32I-NEXT:    slli a1, a6, 1
+; RV32I-NEXT:    sll t2, a1, a0
+; RV32I-NEXT:    mv a6, a2
+; RV32I-NEXT:    beqz a5, .LBB15_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    mv a6, a3
+; RV32I-NEXT:  .LBB15_6:
+; RV32I-NEXT:    or a1, t0, a7
+; RV32I-NEXT:    or a7, t2, t1
+; RV32I-NEXT:    srl t0, a6, a4
+; RV32I-NEXT:    beqz a5, .LBB15_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    mv a3, a2
+; RV32I-NEXT:  .LBB15_8:
+; RV32I-NEXT:    slli a2, a3, 1
+; RV32I-NEXT:    sll a2, a2, a0
+; RV32I-NEXT:    or a2, a2, t0
+; RV32I-NEXT:    srl a3, a3, a4
+; RV32I-NEXT:    slli a4, a6, 1
+; RV32I-NEXT:    sll a0, a4, a0
+; RV32I-NEXT:    or a0, a0, a3
+; RV32I-NEXT:    add a3, a7, a0
+; RV32I-NEXT:    add a0, a1, a2
+; RV32I-NEXT:    sltu a1, a0, a1
+; RV32I-NEXT:    add a1, a3, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: rotr_64_mask_multiple:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srl a3, a0, a2
+; RV64I-NEXT:    neg a4, a2
+; RV64I-NEXT:    sll a0, a0, a4
+; RV64I-NEXT:    or a0, a3, a0
+; RV64I-NEXT:    srl a2, a1, a2
+; RV64I-NEXT:    sll a1, a1, a4
+; RV64I-NEXT:    or a1, a2, a1
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: rotr_64_mask_multiple:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    andi a5, a4, 32
+; RV32ZBB-NEXT:    mv a6, a0
+; RV32ZBB-NEXT:    beqz a5, .LBB15_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    mv a6, a1
+; RV32ZBB-NEXT:  .LBB15_2:
+; RV32ZBB-NEXT:    beqz a5, .LBB15_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    mv a1, a0
+; RV32ZBB-NEXT:  .LBB15_4:
+; RV32ZBB-NEXT:    srl a7, a6, a4
+; RV32ZBB-NEXT:    slli t0, a1, 1
+; RV32ZBB-NEXT:    not a0, a4
+; RV32ZBB-NEXT:    sll t0, t0, a0
+; RV32ZBB-NEXT:    srl t1, a1, a4
+; RV32ZBB-NEXT:    slli a1, a6, 1
+; RV32ZBB-NEXT:    sll t2, a1, a0
+; RV32ZBB-NEXT:    mv a6, a2
+; RV32ZBB-NEXT:    beqz a5, .LBB15_6
+; RV32ZBB-NEXT:  # %bb.5:
+; RV32ZBB-NEXT:    mv a6, a3
+; RV32ZBB-NEXT:  .LBB15_6:
+; RV32ZBB-NEXT:    or a1, t0, a7
+; RV32ZBB-NEXT:    or a7, t2, t1
+; RV32ZBB-NEXT:    srl t0, a6, a4
+; RV32ZBB-NEXT:    beqz a5, .LBB15_8
+; RV32ZBB-NEXT:  # %bb.7:
+; RV32ZBB-NEXT:    mv a3, a2
+; RV32ZBB-NEXT:  .LBB15_8:
+; RV32ZBB-NEXT:    slli a2, a3, 1
+; RV32ZBB-NEXT:    sll a2, a2, a0
+; RV32ZBB-NEXT:    or a2, a2, t0
+; RV32ZBB-NEXT:    srl a3, a3, a4
+; RV32ZBB-NEXT:    slli a4, a6, 1
+; RV32ZBB-NEXT:    sll a0, a4, a0
+; RV32ZBB-NEXT:    or a0, a0, a3
+; RV32ZBB-NEXT:    add a3, a7, a0
+; RV32ZBB-NEXT:    add a0, a1, a2
+; RV32ZBB-NEXT:    sltu a1, a0, a1
+; RV32ZBB-NEXT:    add a1, a3, a1
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: rotr_64_mask_multiple:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    ror a0, a0, a2
+; RV64ZBB-NEXT:    ror a1, a1, a2
+; RV64ZBB-NEXT:    add a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %maskedamt = and i64 %amt, 63
+  %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %maskedamt)
+  %2 = tail call i64 @llvm.fshr.i64(i64 %b, i64 %b, i64 %maskedamt)
+  %3 = add i64 %1, %2
+  ret i64 %3
+}


        


More information about the llvm-commits mailing list