[llvm] 4d0c25f - [RISCV] Select disjoint_or+not as xnor. (#147636)

via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 8 21:50:27 PDT 2025


Author: Craig Topper
Date: 2025-07-08T21:50:23-07:00
New Revision: 4d0c25f4a637e49abff8cbaffe70cf179b955ea9

URL: https://github.com/llvm/llvm-project/commit/4d0c25f4a637e49abff8cbaffe70cf179b955ea9
DIFF: https://github.com/llvm/llvm-project/commit/4d0c25f4a637e49abff8cbaffe70cf179b955ea9.diff

LOG: [RISCV] Select disjoint_or+not as xnor. (#147636)

A disjoint OR can be converted to XOR. And a XOR+NOT is XNOR. Idea
taken from #147279.
    
I changed the existing xnor pattern to have the not on the outside
instead of the inside. These are equivalent for xor since xor is
associative. Tablegen was already generating multiple variants
of the isel pattern using associativity.
    
There are some issues here. The disjoint flag isn't preserved
through type legalization. I was hoping we could recover it
manually for the masked merge cases, but that doesn't work either.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
    llvm/lib/Target/RISCV/RISCVInstrInfo.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
    llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
    llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 097af4925860f..7f61c782fa603 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -3432,7 +3432,7 @@ bool RISCVDAGToDAGISel::selectSHXADD_UWOp(SDValue N, unsigned ShAmt,
   return false;
 }
 
-bool RISCVDAGToDAGISel::orIsAdd(const SDNode *N) const {
+bool RISCVDAGToDAGISel::orDisjoint(const SDNode *N) const {
   if (N->getFlags().hasDisjoint())
     return true;
   KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0);
@@ -3465,7 +3465,7 @@ bool RISCVDAGToDAGISel::selectZExtImm32(SDValue N, SDValue &Val) {
     case ISD::ADD:
       break;
     case ISD::OR:
-      if (orIsAdd(U))
+      if (orDisjoint(U))
         break;
       return false;
     default:

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index 29ee3ae31606e..72e2f965f0809 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -125,7 +125,7 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
   bool selectNegImm(SDValue N, SDValue &Val);
   bool selectInvLogicImm(SDValue N, SDValue &Val);
 
-  bool orIsAdd(const SDNode *Node) const;
+  bool orDisjoint(const SDNode *Node) const;
   bool hasAllNBitUsers(SDNode *Node, unsigned Bits,
                        const unsigned Depth = 0) const;
   bool hasAllBUsers(SDNode *Node) const { return hasAllNBitUsers(Node, 8); }

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 8f8fb6eba9a62..f63531a0109b0 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1450,15 +1450,19 @@ def : PatGprUimmLog2XLen<sra, SRAI>;
 
 // Select 'or' as ADDI if the immediate bits are known to be 0 in $rs1. This
 // can improve compressibility.
-def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
-  return orIsAdd(N);
+def riscv_or_disjoint : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
+  return orDisjoint(N);
 }]>;
-def : PatGprSimm12<or_is_add, ADDI>;
+def : PatGprSimm12<riscv_or_disjoint, ADDI>;
 
 def add_like : PatFrags<(ops node:$lhs, node:$rhs),
-                        [(or_is_add node:$lhs, node:$rhs),
+                        [(riscv_or_disjoint node:$lhs, node:$rhs),
                          (add  node:$lhs, node:$rhs)]>;
 
+def riscv_xor_like : PatFrags<(ops node:$lhs, node:$rhs),
+                              [(riscv_or_disjoint node:$lhs, node:$rhs),
+                               (xor  node:$lhs, node:$rhs)]>;
+
 // negate of low bit can be done via two (compressible) shifts.  The negate
 // is never compressible since rs1 and rd can't be the same register.
 def : Pat<(i32 (sub 0, (and_oneuse GPR:$rs, 1))),
@@ -2160,7 +2164,7 @@ def : PatGprImm<binop_allwusers<xor>, XORI, u32simm12>;
 // Select 'or' as ADDIW if the immediate bits are known to be 0 in $rs1 and
 // $rs1 is sign extended. This can improve compressibility. Using ADDIW gives
 // more power to RISCVOptWInstrs.
-def : Pat<(or_is_add 33signbits_node:$rs1, simm12:$imm),
+def : Pat<(riscv_or_disjoint 33signbits_node:$rs1, simm12:$imm),
           (ADDIW $rs1, simm12:$imm)>;
 
 /// Loads

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index b9b7ee4674ae4..31ea2de334a77 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -931,11 +931,11 @@ multiclass VPatWidenOrDisjoint_VV_VX<SDNode extop, string instruction_name> {
     defvar wti = vtiToWti.Wti;
     let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
                                  GetVTypePredicates<wti>.Predicates) in {
-      def : Pat<(wti.Vector (extop (vti.Vector (or_is_add vti.RegClass:$rs2, vti.RegClass:$rs1)))),
+      def : Pat<(wti.Vector (extop (vti.Vector (riscv_or_disjoint vti.RegClass:$rs2, vti.RegClass:$rs1)))),
                 (!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX)
                    (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2,
                    vti.RegClass:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>;
-      def : Pat<(wti.Vector (extop (vti.Vector (or_is_add vti.RegClass:$rs2, (SplatPat (XLenVT GPR:$rs1)))))),
+      def : Pat<(wti.Vector (extop (vti.Vector (riscv_or_disjoint vti.RegClass:$rs2, (SplatPat (XLenVT GPR:$rs1)))))),
                 (!cast<Instruction>(instruction_name#"_VX_"#vti.LMul.MX)
                    (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2,
                    GPR:$rs1, vti.AVL, vti.Log2SEW, TA_MA)>;

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 1f598863c8d08..695223b8fd19a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -653,7 +653,7 @@ let HasOneUse = 1 in {
                                                node:$E),
                                           (riscv_or_vl node:$A, node:$B, node:$C,
                                                        node:$D, node:$E), [{
-    return orIsAdd(N);
+    return orDisjoint(N);
   }]>;
   def riscv_sub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D,
                                          node:$E),

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index 4806bcc1d63de..ecda1e6a48053 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -497,7 +497,8 @@ def invLogicImm : ComplexPattern<XLenVT, 1, "selectInvLogicImm", [], [], 0>;
 let Predicates = [HasStdExtZbbOrZbkb] in {
 def : Pat<(XLenVT (and GPR:$rs1, (not GPR:$rs2))), (ANDN GPR:$rs1, GPR:$rs2)>;
 def : Pat<(XLenVT (or  GPR:$rs1, (not GPR:$rs2))), (ORN  GPR:$rs1, GPR:$rs2)>;
-def : Pat<(XLenVT (xor GPR:$rs1, (not GPR:$rs2))), (XNOR GPR:$rs1, GPR:$rs2)>;
+def : Pat<(XLenVT (not (riscv_xor_like GPR:$rs1, GPR:$rs2))),
+          (XNOR GPR:$rs1, GPR:$rs2)>;
 
 def : Pat<(XLenVT (and GPR:$rs1, invLogicImm:$rs2)), (ANDN GPR:$rs1, invLogicImm:$rs2)>;
 def : Pat<(XLenVT (or  GPR:$rs1, invLogicImm:$rs2)), (ORN  GPR:$rs1, invLogicImm:$rs2)>;

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
index 88bb19f499ab5..3ec857dc41ead 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
@@ -111,6 +111,137 @@ define i64 @xnor_i64(i64 %a, i64 %b) nounwind {
   ret i64 %xor
 }
 
+define i32 @disjoint_or_xnor_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: disjoint_or_xnor_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBKB-LABEL: disjoint_or_xnor_i32:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    xnor a0, a0, a1
+; RV32ZBB-ZBKB-NEXT:    ret
+  %or = or disjoint i32 %a, %b
+  %not = xor i32 %or, -1
+  ret i32 %not
+}
+
+define i64 @disjoint_or_xnor_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: disjoint_or_xnor_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    or a1, a1, a3
+; CHECK-NEXT:    or a0, a0, a2
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    not a1, a1
+; CHECK-NEXT:    ret
+  %or = or disjoint i64 %a, %b
+  %not = xor i64 %or, -1
+  ret i64 %not
+}
+
+define i32 @disjoint_or_xnor_knownbits_i32(i32 %x, i32 %y, i32 %z) nounwind {
+; RV32I-LABEL: disjoint_or_xnor_knownbits_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 126
+; RV32I-NEXT:    andi a1, a1, -127
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBKB-LABEL: disjoint_or_xnor_knownbits_i32:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    andi a0, a0, 126
+; RV32ZBB-ZBKB-NEXT:    andi a1, a1, -127
+; RV32ZBB-ZBKB-NEXT:    xnor a0, a0, a1
+; RV32ZBB-ZBKB-NEXT:    ret
+  %a = and i32 %x, 126
+  %b = and i32 %y, -127
+  %or = or i32 %a, %b
+  %not = xor i32 %or, -1
+  ret i32 %not
+}
+
+define i64 @disjoint_or_xnor_knownbits_i64(i64 %x, i64 %y, i64 %z) nounwind {
+; RV32I-LABEL: disjoint_or_xnor_knownbits_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 126
+; RV32I-NEXT:    andi a1, a2, -127
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    not a1, a3
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBKB-LABEL: disjoint_or_xnor_knownbits_i64:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    andi a0, a0, 126
+; RV32ZBB-ZBKB-NEXT:    andi a1, a2, -127
+; RV32ZBB-ZBKB-NEXT:    xnor a0, a0, a1
+; RV32ZBB-ZBKB-NEXT:    not a1, a3
+; RV32ZBB-ZBKB-NEXT:    ret
+  %a = and i64 %x, 126
+  %b = and i64 %y, -127
+  %or = or i64 %a, %b
+  %not = xor i64 %or, -1
+  ret i64 %not
+}
+
+define i32 @inverted_masked_merge_i32(i32 %x, i32 %y, i32 %z) nounwind {
+; RV32I-LABEL: inverted_masked_merge_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    xor a1, a1, a2
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    xor a0, a0, a2
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBKB-LABEL: inverted_masked_merge_i32:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    and a1, a0, a1
+; RV32ZBB-ZBKB-NEXT:    andn a0, a2, a0
+; RV32ZBB-ZBKB-NEXT:    or a0, a1, a0
+; RV32ZBB-ZBKB-NEXT:    not a0, a0
+; RV32ZBB-ZBKB-NEXT:    ret
+  %a = and i32 %x, %y
+  %notx = xor i32 %x, -1
+  %b = and i32 %notx, %z
+  %or = or i32 %a, %b
+  %not = xor i32 %or, -1
+  ret i32 %not
+}
+
+define i64 @inverted_masked_merge_i64(i64 %x, i64 %y, i64 %z) nounwind {
+; RV32I-LABEL: inverted_masked_merge_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    xor a3, a3, a5
+; RV32I-NEXT:    xor a2, a2, a4
+; RV32I-NEXT:    and a1, a3, a1
+; RV32I-NEXT:    and a0, a2, a0
+; RV32I-NEXT:    xor a1, a1, a5
+; RV32I-NEXT:    xor a0, a0, a4
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    not a1, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBKB-LABEL: inverted_masked_merge_i64:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    and a2, a0, a2
+; RV32ZBB-ZBKB-NEXT:    and a3, a1, a3
+; RV32ZBB-ZBKB-NEXT:    andn a0, a4, a0
+; RV32ZBB-ZBKB-NEXT:    andn a1, a5, a1
+; RV32ZBB-ZBKB-NEXT:    or a1, a3, a1
+; RV32ZBB-ZBKB-NEXT:    or a0, a2, a0
+; RV32ZBB-ZBKB-NEXT:    not a0, a0
+; RV32ZBB-ZBKB-NEXT:    not a1, a1
+; RV32ZBB-ZBKB-NEXT:    ret
+  %a = and i64 %x, %y
+  %notx = xor i64 %x, -1
+  %b = and i64 %notx, %z
+  %or = or i64 %a, %b
+  %not = xor i64 %or, -1
+  ret i64 %not
+}
+
 declare i32 @llvm.fshl.i32(i32, i32, i32)
 
 define i32 @rol_i32(i32 %a, i32 %b) nounwind {
@@ -141,15 +272,15 @@ define i64 @rol_i64(i64 %a, i64 %b) nounwind {
 ; CHECK-NEXT:    slli a5, a2, 26
 ; CHECK-NEXT:    srli a5, a5, 31
 ; CHECK-NEXT:    mv a3, a1
-; CHECK-NEXT:    bnez a5, .LBB7_2
+; CHECK-NEXT:    bnez a5, .LBB13_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a3, a0
-; CHECK-NEXT:  .LBB7_2:
+; CHECK-NEXT:  .LBB13_2:
 ; CHECK-NEXT:    sll a4, a3, a2
-; CHECK-NEXT:    bnez a5, .LBB7_4
+; CHECK-NEXT:    bnez a5, .LBB13_4
 ; CHECK-NEXT:  # %bb.3:
 ; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB7_4:
+; CHECK-NEXT:  .LBB13_4:
 ; CHECK-NEXT:    srli a1, a0, 1
 ; CHECK-NEXT:    not a5, a2
 ; CHECK-NEXT:    sll a2, a0, a2
@@ -192,15 +323,15 @@ define i64 @ror_i64(i64 %a, i64 %b) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andi a5, a2, 32
 ; CHECK-NEXT:    mv a3, a0
-; CHECK-NEXT:    beqz a5, .LBB9_2
+; CHECK-NEXT:    beqz a5, .LBB15_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a3, a1
-; CHECK-NEXT:  .LBB9_2:
+; CHECK-NEXT:  .LBB15_2:
 ; CHECK-NEXT:    srl a4, a3, a2
-; CHECK-NEXT:    beqz a5, .LBB9_4
+; CHECK-NEXT:    beqz a5, .LBB15_4
 ; CHECK-NEXT:  # %bb.3:
 ; CHECK-NEXT:    mv a1, a0
-; CHECK-NEXT:  .LBB9_4:
+; CHECK-NEXT:  .LBB15_4:
 ; CHECK-NEXT:    slli a0, a1, 1
 ; CHECK-NEXT:    not a5, a2
 ; CHECK-NEXT:    srl a1, a1, a2
@@ -442,19 +573,19 @@ define i1 @andn_snez_i64(i64 %a, i64 %b) nounwind {
 define i32 @and_hoisted_not_i32(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: and_hoisted_not_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a2, .LBB24_2
+; RV32I-NEXT:    beqz a2, .LBB30_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a1, a1
 ; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:  .LBB24_2: # %identity
+; RV32I-NEXT:  .LBB30_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: and_hoisted_not_i32:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB24_2
+; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB30_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    andn a0, a0, a1
-; RV32ZBB-ZBKB-NEXT:  .LBB24_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB30_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -470,19 +601,19 @@ identity:
 define i32 @and_hoisted_not_i32_swapped(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: and_hoisted_not_i32_swapped:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a2, .LBB25_2
+; RV32I-NEXT:    beqz a2, .LBB31_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a1, a1
 ; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:  .LBB25_2: # %identity
+; RV32I-NEXT:  .LBB31_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: and_hoisted_not_i32_swapped:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB25_2
+; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB31_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    andn a0, a0, a1
-; RV32ZBB-ZBKB-NEXT:  .LBB25_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB31_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -498,22 +629,22 @@ identity:
 define i64 @and_hoisted_not_i64(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: and_hoisted_not_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a4, .LBB26_2
+; RV32I-NEXT:    beqz a4, .LBB32_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a3, a3
 ; RV32I-NEXT:    not a2, a2
 ; RV32I-NEXT:    and a0, a2, a0
 ; RV32I-NEXT:    and a1, a3, a1
-; RV32I-NEXT:  .LBB26_2: # %identity
+; RV32I-NEXT:  .LBB32_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: and_hoisted_not_i64:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB26_2
+; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB32_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    andn a0, a0, a2
 ; RV32ZBB-ZBKB-NEXT:    andn a1, a1, a3
-; RV32ZBB-ZBKB-NEXT:  .LBB26_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB32_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -529,22 +660,22 @@ identity:
 define i64 @and_hoisted_not_i64_swapped(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: and_hoisted_not_i64_swapped:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a4, .LBB27_2
+; RV32I-NEXT:    beqz a4, .LBB33_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a3, a3
 ; RV32I-NEXT:    not a2, a2
 ; RV32I-NEXT:    and a0, a0, a2
 ; RV32I-NEXT:    and a1, a1, a3
-; RV32I-NEXT:  .LBB27_2: # %identity
+; RV32I-NEXT:  .LBB33_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: and_hoisted_not_i64_swapped:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB27_2
+; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB33_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    andn a0, a0, a2
 ; RV32ZBB-ZBKB-NEXT:    andn a1, a1, a3
-; RV32ZBB-ZBKB-NEXT:  .LBB27_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB33_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -560,19 +691,19 @@ identity:
 define i32 @or_hoisted_not_i32(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: or_hoisted_not_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a2, .LBB28_2
+; RV32I-NEXT:    beqz a2, .LBB34_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a1, a1
 ; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:  .LBB28_2: # %identity
+; RV32I-NEXT:  .LBB34_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: or_hoisted_not_i32:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB28_2
+; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB34_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    orn a0, a0, a1
-; RV32ZBB-ZBKB-NEXT:  .LBB28_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB34_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -588,19 +719,19 @@ identity:
 define i32 @or_hoisted_not_i32_swapped(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: or_hoisted_not_i32_swapped:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a2, .LBB29_2
+; RV32I-NEXT:    beqz a2, .LBB35_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a1, a1
 ; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:  .LBB29_2: # %identity
+; RV32I-NEXT:  .LBB35_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: or_hoisted_not_i32_swapped:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB29_2
+; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB35_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    orn a0, a0, a1
-; RV32ZBB-ZBKB-NEXT:  .LBB29_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB35_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -616,22 +747,22 @@ identity:
 define i64 @or_hoisted_not_i64(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: or_hoisted_not_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a4, .LBB30_2
+; RV32I-NEXT:    beqz a4, .LBB36_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a3, a3
 ; RV32I-NEXT:    not a2, a2
 ; RV32I-NEXT:    or a0, a2, a0
 ; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:  .LBB30_2: # %identity
+; RV32I-NEXT:  .LBB36_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: or_hoisted_not_i64:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB30_2
+; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB36_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    orn a0, a0, a2
 ; RV32ZBB-ZBKB-NEXT:    orn a1, a1, a3
-; RV32ZBB-ZBKB-NEXT:  .LBB30_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB36_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -647,22 +778,22 @@ identity:
 define i64 @or_hoisted_not_i64_swapped(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: or_hoisted_not_i64_swapped:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a4, .LBB31_2
+; RV32I-NEXT:    beqz a4, .LBB37_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a3, a3
 ; RV32I-NEXT:    not a2, a2
 ; RV32I-NEXT:    or a0, a0, a2
 ; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:  .LBB31_2: # %identity
+; RV32I-NEXT:  .LBB37_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: or_hoisted_not_i64_swapped:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB31_2
+; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB37_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    orn a0, a0, a2
 ; RV32ZBB-ZBKB-NEXT:    orn a1, a1, a3
-; RV32ZBB-ZBKB-NEXT:  .LBB31_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB37_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -678,19 +809,19 @@ identity:
 define i32 @xor_hoisted_not_i32(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: xor_hoisted_not_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a2, .LBB32_2
+; RV32I-NEXT:    beqz a2, .LBB38_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a1, a1
 ; RV32I-NEXT:    xor a0, a1, a0
-; RV32I-NEXT:  .LBB32_2: # %identity
+; RV32I-NEXT:  .LBB38_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: xor_hoisted_not_i32:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB32_2
+; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB38_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    xnor a0, a1, a0
-; RV32ZBB-ZBKB-NEXT:  .LBB32_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB38_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -706,19 +837,19 @@ identity:
 define i32 @xor_hoisted_not_i32_swapped(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: xor_hoisted_not_i32_swapped:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a2, .LBB33_2
+; RV32I-NEXT:    beqz a2, .LBB39_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a1, a1
 ; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:  .LBB33_2: # %identity
+; RV32I-NEXT:  .LBB39_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: xor_hoisted_not_i32_swapped:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB33_2
+; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB39_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    xnor a0, a1, a0
-; RV32ZBB-ZBKB-NEXT:  .LBB33_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB39_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -734,22 +865,22 @@ identity:
 define i64 @xor_hoisted_not_i64(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: xor_hoisted_not_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a4, .LBB34_2
+; RV32I-NEXT:    beqz a4, .LBB40_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a3, a3
 ; RV32I-NEXT:    not a2, a2
 ; RV32I-NEXT:    xor a0, a2, a0
 ; RV32I-NEXT:    xor a1, a3, a1
-; RV32I-NEXT:  .LBB34_2: # %identity
+; RV32I-NEXT:  .LBB40_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: xor_hoisted_not_i64:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB34_2
+; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB40_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    xnor a0, a2, a0
 ; RV32ZBB-ZBKB-NEXT:    xnor a1, a3, a1
-; RV32ZBB-ZBKB-NEXT:  .LBB34_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB40_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -765,22 +896,22 @@ identity:
 define i64 @xor_hoisted_not_i64_swapped(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: xor_hoisted_not_i64_swapped:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a4, .LBB35_2
+; RV32I-NEXT:    beqz a4, .LBB41_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a3, a3
 ; RV32I-NEXT:    not a2, a2
 ; RV32I-NEXT:    xor a0, a0, a2
 ; RV32I-NEXT:    xor a1, a1, a3
-; RV32I-NEXT:  .LBB35_2: # %identity
+; RV32I-NEXT:  .LBB41_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: xor_hoisted_not_i64_swapped:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB35_2
+; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB41_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    xnor a0, a2, a0
 ; RV32ZBB-ZBKB-NEXT:    xnor a1, a3, a1
-; RV32ZBB-ZBKB-NEXT:  .LBB35_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB41_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
index 4ca637b788a45..36e5958fcba5b 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
@@ -102,6 +102,125 @@ define i64 @xnor_i64(i64 %a, i64 %b) nounwind {
   ret i64 %xor
 }
 
+define signext i32 @disjoint_or_xnor_i32(i32 signext %a, i32 signext %b) nounwind {
+; CHECK-LABEL: disjoint_or_xnor_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    or a0, a0, a1
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    ret
+  %or = or disjoint i32 %a, %b
+  %not = xor i32 %or, -1
+  ret i32 %not
+}
+
+define i64 @disjoint_or_xnor_i64(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: disjoint_or_xnor_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-ZBKB-LABEL: disjoint_or_xnor_i64:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    xnor a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
+  %or = or disjoint i64 %a, %b
+  %not = xor i64 %or, -1
+  ret i64 %not
+}
+
+define signext i32 @disjoint_or_xnor_knownbits_i32(i32 signext %x, i32 signext %y, i32 signext %z) nounwind {
+; RV64I-LABEL: disjoint_or_xnor_knownbits_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 126
+; RV64I-NEXT:    andi a1, a1, -127
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-ZBKB-LABEL: disjoint_or_xnor_knownbits_i32:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    andi a0, a0, 126
+; RV64ZBB-ZBKB-NEXT:    andi a1, a1, -127
+; RV64ZBB-ZBKB-NEXT:    xnor a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
+  %a = and i32 %x, 126
+  %b = and i32 %y, -127
+  %or = or i32 %a, %b
+  %not = xor i32 %or, -1
+  ret i32 %not
+}
+
+define i64 @disjoint_or_xnor_knownbits_i64(i64 %x, i64 %y, i64 %z) nounwind {
+; RV64I-LABEL: disjoint_or_xnor_knownbits_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 126
+; RV64I-NEXT:    andi a1, a1, -127
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-ZBKB-LABEL: disjoint_or_xnor_knownbits_i64:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    andi a0, a0, 126
+; RV64ZBB-ZBKB-NEXT:    andi a1, a1, -127
+; RV64ZBB-ZBKB-NEXT:    xnor a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
+  %a = and i64 %x, 126
+  %b = and i64 %y, -127
+  %or = or i64 %a, %b
+  %not = xor i64 %or, -1
+  ret i64 %not
+}
+
+define signext i32 @inverted_masked_merge_i32(i32 signext %x, i32 signext %y, i32 signext %z) nounwind {
+; RV64I-LABEL: inverted_masked_merge_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xor a1, a1, a2
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    xor a0, a0, a2
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-ZBKB-LABEL: inverted_masked_merge_i32:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    and a1, a0, a1
+; RV64ZBB-ZBKB-NEXT:    andn a0, a2, a0
+; RV64ZBB-ZBKB-NEXT:    or a0, a1, a0
+; RV64ZBB-ZBKB-NEXT:    not a0, a0
+; RV64ZBB-ZBKB-NEXT:    ret
+  %a = and i32 %x, %y
+  %notx = xor i32 %x, -1
+  %b = and i32 %notx, %z
+  %or = or i32 %a, %b
+  %not = xor i32 %or, -1
+  ret i32 %not
+}
+
+define i64 @inverted_masked_merge_i64(i64 %x, i64 %y, i64 %z) nounwind {
+; RV64I-LABEL: inverted_masked_merge_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xor a1, a1, a2
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    xor a0, a0, a2
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-ZBKB-LABEL: inverted_masked_merge_i64:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    and a1, a0, a1
+; RV64ZBB-ZBKB-NEXT:    andn a0, a2, a0
+; RV64ZBB-ZBKB-NEXT:    or a0, a1, a0
+; RV64ZBB-ZBKB-NEXT:    not a0, a0
+; RV64ZBB-ZBKB-NEXT:    ret
+  %a = and i64 %x, %y
+  %notx = xor i64 %x, -1
+  %b = and i64 %notx, %z
+  %or = or i64 %a, %b
+  %not = xor i64 %or, -1
+  ret i64 %not
+}
+
 declare i32 @llvm.fshl.i32(i32, i32, i32)
 
 define signext i32 @rol_i32(i32 signext %a, i32 signext %b) nounwind {
@@ -558,19 +677,19 @@ define i1 @andn_snez_i64(i64 %a, i64 %b) nounwind {
 define i32 @and_hoisted_not_i32(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: and_hoisted_not_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB32_2
+; RV64I-NEXT:    beqz a2, .LBB38_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:  .LBB32_2: # %identity
+; RV64I-NEXT:  .LBB38_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: and_hoisted_not_i32:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB32_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB38_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    andn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT:  .LBB32_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB38_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -586,19 +705,19 @@ identity:
 define i32 @and_hoisted_not_i32_swapped(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: and_hoisted_not_i32_swapped:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB33_2
+; RV64I-NEXT:    beqz a2, .LBB39_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:  .LBB33_2: # %identity
+; RV64I-NEXT:  .LBB39_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: and_hoisted_not_i32_swapped:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB33_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB39_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    andn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT:  .LBB33_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB39_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -614,19 +733,19 @@ identity:
 define i64 @and_hoisted_not_i64(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: and_hoisted_not_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB34_2
+; RV64I-NEXT:    beqz a2, .LBB40_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:  .LBB34_2: # %identity
+; RV64I-NEXT:  .LBB40_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: and_hoisted_not_i64:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB34_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB40_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    andn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT:  .LBB34_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB40_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -642,19 +761,19 @@ identity:
 define i64 @and_hoisted_not_i64_swapped(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: and_hoisted_not_i64_swapped:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB35_2
+; RV64I-NEXT:    beqz a2, .LBB41_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:  .LBB35_2: # %identity
+; RV64I-NEXT:  .LBB41_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: and_hoisted_not_i64_swapped:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB35_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB41_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    andn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT:  .LBB35_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB41_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -670,19 +789,19 @@ identity:
 define i32 @or_hoisted_not_i32(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: or_hoisted_not_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB36_2
+; RV64I-NEXT:    beqz a2, .LBB42_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:  .LBB36_2: # %identity
+; RV64I-NEXT:  .LBB42_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: or_hoisted_not_i32:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB36_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB42_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    orn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT:  .LBB36_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB42_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -698,19 +817,19 @@ identity:
 define i32 @or_hoisted_not_i32_swapped(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: or_hoisted_not_i32_swapped:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB37_2
+; RV64I-NEXT:    beqz a2, .LBB43_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:  .LBB37_2: # %identity
+; RV64I-NEXT:  .LBB43_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: or_hoisted_not_i32_swapped:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB37_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB43_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    orn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT:  .LBB37_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB43_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -726,19 +845,19 @@ identity:
 define i64 @or_hoisted_not_i64(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: or_hoisted_not_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB38_2
+; RV64I-NEXT:    beqz a2, .LBB44_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:  .LBB38_2: # %identity
+; RV64I-NEXT:  .LBB44_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: or_hoisted_not_i64:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB38_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB44_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    orn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT:  .LBB38_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB44_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -754,19 +873,19 @@ identity:
 define i64 @or_hoisted_not_i64_swapped(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: or_hoisted_not_i64_swapped:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB39_2
+; RV64I-NEXT:    beqz a2, .LBB45_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:  .LBB39_2: # %identity
+; RV64I-NEXT:  .LBB45_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: or_hoisted_not_i64_swapped:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB39_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB45_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    orn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT:  .LBB39_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB45_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -782,19 +901,19 @@ identity:
 define i32 @xor_hoisted_not_i32(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: xor_hoisted_not_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB40_2
+; RV64I-NEXT:    beqz a2, .LBB46_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    xor a0, a1, a0
-; RV64I-NEXT:  .LBB40_2: # %identity
+; RV64I-NEXT:  .LBB46_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: xor_hoisted_not_i32:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB40_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB46_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    xnor a0, a1, a0
-; RV64ZBB-ZBKB-NEXT:  .LBB40_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB46_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -810,19 +929,19 @@ identity:
 define i32 @xor_hoisted_not_i32_swapped(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: xor_hoisted_not_i32_swapped:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB41_2
+; RV64I-NEXT:    beqz a2, .LBB47_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:  .LBB41_2: # %identity
+; RV64I-NEXT:  .LBB47_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: xor_hoisted_not_i32_swapped:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB41_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB47_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    xnor a0, a1, a0
-; RV64ZBB-ZBKB-NEXT:  .LBB41_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB47_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -838,19 +957,19 @@ identity:
 define i64 @xor_hoisted_not_i64(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: xor_hoisted_not_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB42_2
+; RV64I-NEXT:    beqz a2, .LBB48_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    xor a0, a1, a0
-; RV64I-NEXT:  .LBB42_2: # %identity
+; RV64I-NEXT:  .LBB48_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: xor_hoisted_not_i64:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB42_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB48_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    xnor a0, a1, a0
-; RV64ZBB-ZBKB-NEXT:  .LBB42_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB48_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -866,19 +985,19 @@ identity:
 define i64 @xor_hoisted_not_i64_swapped(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: xor_hoisted_not_i64_swapped:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB43_2
+; RV64I-NEXT:    beqz a2, .LBB49_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:  .LBB43_2: # %identity
+; RV64I-NEXT:  .LBB49_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: xor_hoisted_not_i64_swapped:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB43_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB49_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    xnor a0, a1, a0
-; RV64ZBB-ZBKB-NEXT:  .LBB43_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB49_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity


        


More information about the llvm-commits mailing list