[llvm] [RISCV] Select disjoint_or+not as xnor. (PR #147636)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 8 19:15:51 PDT 2025


https://github.com/topperc created https://github.com/llvm/llvm-project/pull/147636

A disjoint OR can be converted to XOR. And a XOR+NOT is XNOR.
    
I changed the existing xnor pattern to have the not on the outside
instead of the inside. These are equivalent for xor since xor is
associative. Tablegen was already generating multiple variants
of the isel pattern using associativity.
    
There are some issues here. The disjoint flag isn't preserved
through type legalization. I was hoping we could recover it
manually for the masked merge cases, but that doesn't work either.

>From 70af45dbcecd62ac51f8ee39ebe2e1606de211f8 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 8 Jul 2025 18:38:49 -0700
Subject: [PATCH 1/2] Pre-commit tests.

---
 llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll | 223 ++++++++++++++++++------
 llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll | 196 ++++++++++++++++-----
 2 files changed, 315 insertions(+), 104 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
index 88bb19f499ab5..852b118f0dc79 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
@@ -111,6 +111,117 @@ define i64 @xnor_i64(i64 %a, i64 %b) nounwind {
   ret i64 %xor
 }
 
+define i32 @disjoint_or_xnor_i32(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: disjoint_or_xnor_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    or a0, a0, a1
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    ret
+  %or = or disjoint i32 %a, %b
+  %not = xor i32 %or, -1
+  ret i32 %not
+}
+
+define i64 @disjoint_or_xnor_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: disjoint_or_xnor_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    or a1, a1, a3
+; CHECK-NEXT:    or a0, a0, a2
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    not a1, a1
+; CHECK-NEXT:    ret
+  %or = or disjoint i64 %a, %b
+  %not = xor i64 %or, -1
+  ret i64 %not
+}
+
+define i32 @disjoint_or_xnor_knownbits_i32(i32 %x, i32 %y, i32 %z) nounwind {
+; CHECK-LABEL: disjoint_or_xnor_knownbits_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a0, a0, 126
+; CHECK-NEXT:    andi a1, a1, -127
+; CHECK-NEXT:    or a0, a0, a1
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    ret
+  %a = and i32 %x, 126
+  %b = and i32 %y, -127
+  %or = or i32 %a, %b
+  %not = xor i32 %or, -1
+  ret i32 %not
+}
+
+define i64 @disjoint_or_xnor_knownbits_i64(i64 %x, i64 %y, i64 %z) nounwind {
+; CHECK-LABEL: disjoint_or_xnor_knownbits_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a0, a0, 126
+; CHECK-NEXT:    andi a1, a2, -127
+; CHECK-NEXT:    or a0, a0, a1
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    not a1, a3
+; CHECK-NEXT:    ret
+  %a = and i64 %x, 126
+  %b = and i64 %y, -127
+  %or = or i64 %a, %b
+  %not = xor i64 %or, -1
+  ret i64 %not
+}
+
+define i32 @inverted_masked_merge_i32(i32 %x, i32 %y, i32 %z) nounwind {
+; RV32I-LABEL: inverted_masked_merge_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    xor a1, a1, a2
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    xor a0, a0, a2
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBKB-LABEL: inverted_masked_merge_i32:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    and a1, a0, a1
+; RV32ZBB-ZBKB-NEXT:    andn a0, a2, a0
+; RV32ZBB-ZBKB-NEXT:    or a0, a1, a0
+; RV32ZBB-ZBKB-NEXT:    not a0, a0
+; RV32ZBB-ZBKB-NEXT:    ret
+  %a = and i32 %x, %y
+  %notx = xor i32 %x, -1
+  %b = and i32 %notx, %z
+  %or = or i32 %a, %b
+  %not = xor i32 %or, -1
+  ret i32 %not
+}
+
+define i64 @inverted_masked_merge_i64(i64 %x, i64 %y, i64 %z) nounwind {
+; RV32I-LABEL: inverted_masked_merge_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    xor a3, a3, a5
+; RV32I-NEXT:    xor a2, a2, a4
+; RV32I-NEXT:    and a1, a3, a1
+; RV32I-NEXT:    and a0, a2, a0
+; RV32I-NEXT:    xor a1, a1, a5
+; RV32I-NEXT:    xor a0, a0, a4
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    not a1, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBKB-LABEL: inverted_masked_merge_i64:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    and a2, a0, a2
+; RV32ZBB-ZBKB-NEXT:    and a3, a1, a3
+; RV32ZBB-ZBKB-NEXT:    andn a0, a4, a0
+; RV32ZBB-ZBKB-NEXT:    andn a1, a5, a1
+; RV32ZBB-ZBKB-NEXT:    or a1, a3, a1
+; RV32ZBB-ZBKB-NEXT:    or a0, a2, a0
+; RV32ZBB-ZBKB-NEXT:    not a0, a0
+; RV32ZBB-ZBKB-NEXT:    not a1, a1
+; RV32ZBB-ZBKB-NEXT:    ret
+  %a = and i64 %x, %y
+  %notx = xor i64 %x, -1
+  %b = and i64 %notx, %z
+  %or = or i64 %a, %b
+  %not = xor i64 %or, -1
+  ret i64 %not
+}
+
 declare i32 @llvm.fshl.i32(i32, i32, i32)
 
 define i32 @rol_i32(i32 %a, i32 %b) nounwind {
@@ -141,15 +252,15 @@ define i64 @rol_i64(i64 %a, i64 %b) nounwind {
 ; CHECK-NEXT:    slli a5, a2, 26
 ; CHECK-NEXT:    srli a5, a5, 31
 ; CHECK-NEXT:    mv a3, a1
-; CHECK-NEXT:    bnez a5, .LBB7_2
+; CHECK-NEXT:    bnez a5, .LBB13_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a3, a0
-; CHECK-NEXT:  .LBB7_2:
+; CHECK-NEXT:  .LBB13_2:
 ; CHECK-NEXT:    sll a4, a3, a2
-; CHECK-NEXT:    bnez a5, .LBB7_4
+; CHECK-NEXT:    bnez a5, .LBB13_4
 ; CHECK-NEXT:  # %bb.3:
 ; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB7_4:
+; CHECK-NEXT:  .LBB13_4:
 ; CHECK-NEXT:    srli a1, a0, 1
 ; CHECK-NEXT:    not a5, a2
 ; CHECK-NEXT:    sll a2, a0, a2
@@ -192,15 +303,15 @@ define i64 @ror_i64(i64 %a, i64 %b) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andi a5, a2, 32
 ; CHECK-NEXT:    mv a3, a0
-; CHECK-NEXT:    beqz a5, .LBB9_2
+; CHECK-NEXT:    beqz a5, .LBB15_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a3, a1
-; CHECK-NEXT:  .LBB9_2:
+; CHECK-NEXT:  .LBB15_2:
 ; CHECK-NEXT:    srl a4, a3, a2
-; CHECK-NEXT:    beqz a5, .LBB9_4
+; CHECK-NEXT:    beqz a5, .LBB15_4
 ; CHECK-NEXT:  # %bb.3:
 ; CHECK-NEXT:    mv a1, a0
-; CHECK-NEXT:  .LBB9_4:
+; CHECK-NEXT:  .LBB15_4:
 ; CHECK-NEXT:    slli a0, a1, 1
 ; CHECK-NEXT:    not a5, a2
 ; CHECK-NEXT:    srl a1, a1, a2
@@ -442,19 +553,19 @@ define i1 @andn_snez_i64(i64 %a, i64 %b) nounwind {
 define i32 @and_hoisted_not_i32(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: and_hoisted_not_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a2, .LBB24_2
+; RV32I-NEXT:    beqz a2, .LBB30_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a1, a1
 ; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:  .LBB24_2: # %identity
+; RV32I-NEXT:  .LBB30_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: and_hoisted_not_i32:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB24_2
+; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB30_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    andn a0, a0, a1
-; RV32ZBB-ZBKB-NEXT:  .LBB24_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB30_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -470,19 +581,19 @@ identity:
 define i32 @and_hoisted_not_i32_swapped(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: and_hoisted_not_i32_swapped:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a2, .LBB25_2
+; RV32I-NEXT:    beqz a2, .LBB31_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a1, a1
 ; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:  .LBB25_2: # %identity
+; RV32I-NEXT:  .LBB31_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: and_hoisted_not_i32_swapped:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB25_2
+; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB31_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    andn a0, a0, a1
-; RV32ZBB-ZBKB-NEXT:  .LBB25_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB31_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -498,22 +609,22 @@ identity:
 define i64 @and_hoisted_not_i64(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: and_hoisted_not_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a4, .LBB26_2
+; RV32I-NEXT:    beqz a4, .LBB32_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a3, a3
 ; RV32I-NEXT:    not a2, a2
 ; RV32I-NEXT:    and a0, a2, a0
 ; RV32I-NEXT:    and a1, a3, a1
-; RV32I-NEXT:  .LBB26_2: # %identity
+; RV32I-NEXT:  .LBB32_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: and_hoisted_not_i64:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB26_2
+; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB32_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    andn a0, a0, a2
 ; RV32ZBB-ZBKB-NEXT:    andn a1, a1, a3
-; RV32ZBB-ZBKB-NEXT:  .LBB26_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB32_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -529,22 +640,22 @@ identity:
 define i64 @and_hoisted_not_i64_swapped(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: and_hoisted_not_i64_swapped:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a4, .LBB27_2
+; RV32I-NEXT:    beqz a4, .LBB33_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a3, a3
 ; RV32I-NEXT:    not a2, a2
 ; RV32I-NEXT:    and a0, a0, a2
 ; RV32I-NEXT:    and a1, a1, a3
-; RV32I-NEXT:  .LBB27_2: # %identity
+; RV32I-NEXT:  .LBB33_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: and_hoisted_not_i64_swapped:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB27_2
+; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB33_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    andn a0, a0, a2
 ; RV32ZBB-ZBKB-NEXT:    andn a1, a1, a3
-; RV32ZBB-ZBKB-NEXT:  .LBB27_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB33_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -560,19 +671,19 @@ identity:
 define i32 @or_hoisted_not_i32(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: or_hoisted_not_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a2, .LBB28_2
+; RV32I-NEXT:    beqz a2, .LBB34_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a1, a1
 ; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:  .LBB28_2: # %identity
+; RV32I-NEXT:  .LBB34_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: or_hoisted_not_i32:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB28_2
+; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB34_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    orn a0, a0, a1
-; RV32ZBB-ZBKB-NEXT:  .LBB28_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB34_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -588,19 +699,19 @@ identity:
 define i32 @or_hoisted_not_i32_swapped(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: or_hoisted_not_i32_swapped:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a2, .LBB29_2
+; RV32I-NEXT:    beqz a2, .LBB35_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a1, a1
 ; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:  .LBB29_2: # %identity
+; RV32I-NEXT:  .LBB35_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: or_hoisted_not_i32_swapped:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB29_2
+; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB35_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    orn a0, a0, a1
-; RV32ZBB-ZBKB-NEXT:  .LBB29_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB35_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -616,22 +727,22 @@ identity:
 define i64 @or_hoisted_not_i64(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: or_hoisted_not_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a4, .LBB30_2
+; RV32I-NEXT:    beqz a4, .LBB36_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a3, a3
 ; RV32I-NEXT:    not a2, a2
 ; RV32I-NEXT:    or a0, a2, a0
 ; RV32I-NEXT:    or a1, a3, a1
-; RV32I-NEXT:  .LBB30_2: # %identity
+; RV32I-NEXT:  .LBB36_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: or_hoisted_not_i64:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB30_2
+; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB36_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    orn a0, a0, a2
 ; RV32ZBB-ZBKB-NEXT:    orn a1, a1, a3
-; RV32ZBB-ZBKB-NEXT:  .LBB30_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB36_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -647,22 +758,22 @@ identity:
 define i64 @or_hoisted_not_i64_swapped(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: or_hoisted_not_i64_swapped:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a4, .LBB31_2
+; RV32I-NEXT:    beqz a4, .LBB37_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a3, a3
 ; RV32I-NEXT:    not a2, a2
 ; RV32I-NEXT:    or a0, a0, a2
 ; RV32I-NEXT:    or a1, a1, a3
-; RV32I-NEXT:  .LBB31_2: # %identity
+; RV32I-NEXT:  .LBB37_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: or_hoisted_not_i64_swapped:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB31_2
+; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB37_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    orn a0, a0, a2
 ; RV32ZBB-ZBKB-NEXT:    orn a1, a1, a3
-; RV32ZBB-ZBKB-NEXT:  .LBB31_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB37_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -678,19 +789,19 @@ identity:
 define i32 @xor_hoisted_not_i32(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: xor_hoisted_not_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a2, .LBB32_2
+; RV32I-NEXT:    beqz a2, .LBB38_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a1, a1
 ; RV32I-NEXT:    xor a0, a1, a0
-; RV32I-NEXT:  .LBB32_2: # %identity
+; RV32I-NEXT:  .LBB38_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: xor_hoisted_not_i32:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB32_2
+; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB38_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    xnor a0, a1, a0
-; RV32ZBB-ZBKB-NEXT:  .LBB32_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB38_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -706,19 +817,19 @@ identity:
 define i32 @xor_hoisted_not_i32_swapped(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: xor_hoisted_not_i32_swapped:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a2, .LBB33_2
+; RV32I-NEXT:    beqz a2, .LBB39_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a1, a1
 ; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:  .LBB33_2: # %identity
+; RV32I-NEXT:  .LBB39_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: xor_hoisted_not_i32_swapped:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB33_2
+; RV32ZBB-ZBKB-NEXT:    beqz a2, .LBB39_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    xnor a0, a1, a0
-; RV32ZBB-ZBKB-NEXT:  .LBB33_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB39_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -734,22 +845,22 @@ identity:
 define i64 @xor_hoisted_not_i64(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: xor_hoisted_not_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a4, .LBB34_2
+; RV32I-NEXT:    beqz a4, .LBB40_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a3, a3
 ; RV32I-NEXT:    not a2, a2
 ; RV32I-NEXT:    xor a0, a2, a0
 ; RV32I-NEXT:    xor a1, a3, a1
-; RV32I-NEXT:  .LBB34_2: # %identity
+; RV32I-NEXT:  .LBB40_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: xor_hoisted_not_i64:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB34_2
+; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB40_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    xnor a0, a2, a0
 ; RV32ZBB-ZBKB-NEXT:    xnor a1, a3, a1
-; RV32ZBB-ZBKB-NEXT:  .LBB34_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB40_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -765,22 +876,22 @@ identity:
 define i64 @xor_hoisted_not_i64_swapped(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV32I-LABEL: xor_hoisted_not_i64_swapped:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    beqz a4, .LBB35_2
+; RV32I-NEXT:    beqz a4, .LBB41_2
 ; RV32I-NEXT:  # %bb.1: # %mask
 ; RV32I-NEXT:    not a3, a3
 ; RV32I-NEXT:    not a2, a2
 ; RV32I-NEXT:    xor a0, a0, a2
 ; RV32I-NEXT:    xor a1, a1, a3
-; RV32I-NEXT:  .LBB35_2: # %identity
+; RV32I-NEXT:  .LBB41_2: # %identity
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-ZBKB-LABEL: xor_hoisted_not_i64_swapped:
 ; RV32ZBB-ZBKB:       # %bb.0:
-; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB35_2
+; RV32ZBB-ZBKB-NEXT:    beqz a4, .LBB41_2
 ; RV32ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV32ZBB-ZBKB-NEXT:    xnor a0, a2, a0
 ; RV32ZBB-ZBKB-NEXT:    xnor a1, a3, a1
-; RV32ZBB-ZBKB-NEXT:  .LBB35_2: # %identity
+; RV32ZBB-ZBKB-NEXT:  .LBB41_2: # %identity
 ; RV32ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
diff --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
index 4ca637b788a45..0eb41638bc3cb 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
@@ -102,6 +102,106 @@ define i64 @xnor_i64(i64 %a, i64 %b) nounwind {
   ret i64 %xor
 }
 
+define signext i32 @disjoint_or_xnor_i32(i32 signext %a, i32 signext %b) nounwind {
+; CHECK-LABEL: disjoint_or_xnor_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    or a0, a0, a1
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    ret
+  %or = or disjoint i32 %a, %b
+  %not = xor i32 %or, -1
+  ret i32 %not
+}
+
+define i64 @disjoint_or_xnor_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: disjoint_or_xnor_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    or a0, a0, a1
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    ret
+  %or = or disjoint i64 %a, %b
+  %not = xor i64 %or, -1
+  ret i64 %not
+}
+
+define signext i32 @disjoint_or_xnor_knownbits_i32(i32 signext %x, i32 signext %y, i32 signext %z) nounwind {
+; CHECK-LABEL: disjoint_or_xnor_knownbits_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a0, a0, 126
+; CHECK-NEXT:    andi a1, a1, -127
+; CHECK-NEXT:    or a0, a0, a1
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    ret
+  %a = and i32 %x, 126
+  %b = and i32 %y, -127
+  %or = or i32 %a, %b
+  %not = xor i32 %or, -1
+  ret i32 %not
+}
+
+define i64 @disjoint_or_xnor_knownbits_i64(i64 %x, i64 %y, i64 %z) nounwind {
+; CHECK-LABEL: disjoint_or_xnor_knownbits_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a0, a0, 126
+; CHECK-NEXT:    andi a1, a1, -127
+; CHECK-NEXT:    or a0, a0, a1
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    ret
+  %a = and i64 %x, 126
+  %b = and i64 %y, -127
+  %or = or i64 %a, %b
+  %not = xor i64 %or, -1
+  ret i64 %not
+}
+
+define signext i32 @inverted_masked_merge_i32(i32 signext %x, i32 signext %y, i32 signext %z) nounwind {
+; RV64I-LABEL: inverted_masked_merge_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xor a1, a1, a2
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    xor a0, a0, a2
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-ZBKB-LABEL: inverted_masked_merge_i32:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    and a1, a0, a1
+; RV64ZBB-ZBKB-NEXT:    andn a0, a2, a0
+; RV64ZBB-ZBKB-NEXT:    or a0, a1, a0
+; RV64ZBB-ZBKB-NEXT:    not a0, a0
+; RV64ZBB-ZBKB-NEXT:    ret
+  %a = and i32 %x, %y
+  %notx = xor i32 %x, -1
+  %b = and i32 %notx, %z
+  %or = or i32 %a, %b
+  %not = xor i32 %or, -1
+  ret i32 %not
+}
+
+define i64 @inverted_masked_merge_i64(i64 %x, i64 %y, i64 %z) nounwind {
+; RV64I-LABEL: inverted_masked_merge_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xor a1, a1, a2
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    xor a0, a0, a2
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-ZBKB-LABEL: inverted_masked_merge_i64:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    and a1, a0, a1
+; RV64ZBB-ZBKB-NEXT:    andn a0, a2, a0
+; RV64ZBB-ZBKB-NEXT:    or a0, a1, a0
+; RV64ZBB-ZBKB-NEXT:    not a0, a0
+; RV64ZBB-ZBKB-NEXT:    ret
+  %a = and i64 %x, %y
+  %notx = xor i64 %x, -1
+  %b = and i64 %notx, %z
+  %or = or i64 %a, %b
+  %not = xor i64 %or, -1
+  ret i64 %not
+}
+
 declare i32 @llvm.fshl.i32(i32, i32, i32)
 
 define signext i32 @rol_i32(i32 signext %a, i32 signext %b) nounwind {
@@ -558,19 +658,19 @@ define i1 @andn_snez_i64(i64 %a, i64 %b) nounwind {
 define i32 @and_hoisted_not_i32(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: and_hoisted_not_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB32_2
+; RV64I-NEXT:    beqz a2, .LBB38_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:  .LBB32_2: # %identity
+; RV64I-NEXT:  .LBB38_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: and_hoisted_not_i32:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB32_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB38_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    andn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT:  .LBB32_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB38_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -586,19 +686,19 @@ identity:
 define i32 @and_hoisted_not_i32_swapped(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: and_hoisted_not_i32_swapped:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB33_2
+; RV64I-NEXT:    beqz a2, .LBB39_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:  .LBB33_2: # %identity
+; RV64I-NEXT:  .LBB39_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: and_hoisted_not_i32_swapped:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB33_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB39_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    andn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT:  .LBB33_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB39_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -614,19 +714,19 @@ identity:
 define i64 @and_hoisted_not_i64(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: and_hoisted_not_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB34_2
+; RV64I-NEXT:    beqz a2, .LBB40_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:  .LBB34_2: # %identity
+; RV64I-NEXT:  .LBB40_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: and_hoisted_not_i64:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB34_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB40_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    andn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT:  .LBB34_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB40_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -642,19 +742,19 @@ identity:
 define i64 @and_hoisted_not_i64_swapped(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: and_hoisted_not_i64_swapped:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB35_2
+; RV64I-NEXT:    beqz a2, .LBB41_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:  .LBB35_2: # %identity
+; RV64I-NEXT:  .LBB41_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: and_hoisted_not_i64_swapped:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB35_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB41_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    andn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT:  .LBB35_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB41_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -670,19 +770,19 @@ identity:
 define i32 @or_hoisted_not_i32(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: or_hoisted_not_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB36_2
+; RV64I-NEXT:    beqz a2, .LBB42_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:  .LBB36_2: # %identity
+; RV64I-NEXT:  .LBB42_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: or_hoisted_not_i32:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB36_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB42_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    orn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT:  .LBB36_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB42_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -698,19 +798,19 @@ identity:
 define i32 @or_hoisted_not_i32_swapped(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: or_hoisted_not_i32_swapped:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB37_2
+; RV64I-NEXT:    beqz a2, .LBB43_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:  .LBB37_2: # %identity
+; RV64I-NEXT:  .LBB43_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: or_hoisted_not_i32_swapped:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB37_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB43_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    orn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT:  .LBB37_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB43_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -726,19 +826,19 @@ identity:
 define i64 @or_hoisted_not_i64(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: or_hoisted_not_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB38_2
+; RV64I-NEXT:    beqz a2, .LBB44_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:  .LBB38_2: # %identity
+; RV64I-NEXT:  .LBB44_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: or_hoisted_not_i64:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB38_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB44_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    orn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT:  .LBB38_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB44_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -754,19 +854,19 @@ identity:
 define i64 @or_hoisted_not_i64_swapped(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: or_hoisted_not_i64_swapped:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB39_2
+; RV64I-NEXT:    beqz a2, .LBB45_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:  .LBB39_2: # %identity
+; RV64I-NEXT:  .LBB45_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: or_hoisted_not_i64_swapped:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB39_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB45_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    orn a0, a0, a1
-; RV64ZBB-ZBKB-NEXT:  .LBB39_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB45_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -782,19 +882,19 @@ identity:
 define i32 @xor_hoisted_not_i32(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: xor_hoisted_not_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB40_2
+; RV64I-NEXT:    beqz a2, .LBB46_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    xor a0, a1, a0
-; RV64I-NEXT:  .LBB40_2: # %identity
+; RV64I-NEXT:  .LBB46_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: xor_hoisted_not_i32:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB40_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB46_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    xnor a0, a1, a0
-; RV64ZBB-ZBKB-NEXT:  .LBB40_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB46_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -810,19 +910,19 @@ identity:
 define i32 @xor_hoisted_not_i32_swapped(i32 %x, i32 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: xor_hoisted_not_i32_swapped:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB41_2
+; RV64I-NEXT:    beqz a2, .LBB47_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:  .LBB41_2: # %identity
+; RV64I-NEXT:  .LBB47_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: xor_hoisted_not_i32_swapped:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB41_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB47_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    xnor a0, a1, a0
-; RV64ZBB-ZBKB-NEXT:  .LBB41_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB47_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i32 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -838,19 +938,19 @@ identity:
 define i64 @xor_hoisted_not_i64(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: xor_hoisted_not_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB42_2
+; RV64I-NEXT:    beqz a2, .LBB48_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    xor a0, a1, a0
-; RV64I-NEXT:  .LBB42_2: # %identity
+; RV64I-NEXT:  .LBB48_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: xor_hoisted_not_i64:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB42_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB48_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    xnor a0, a1, a0
-; RV64ZBB-ZBKB-NEXT:  .LBB42_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB48_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity
@@ -866,19 +966,19 @@ identity:
 define i64 @xor_hoisted_not_i64_swapped(i64 %x, i64 %m, i1 zeroext %cond) {
 ; RV64I-LABEL: xor_hoisted_not_i64_swapped:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    beqz a2, .LBB43_2
+; RV64I-NEXT:    beqz a2, .LBB49_2
 ; RV64I-NEXT:  # %bb.1: # %mask
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:  .LBB43_2: # %identity
+; RV64I-NEXT:  .LBB49_2: # %identity
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-ZBKB-LABEL: xor_hoisted_not_i64_swapped:
 ; RV64ZBB-ZBKB:       # %bb.0:
-; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB43_2
+; RV64ZBB-ZBKB-NEXT:    beqz a2, .LBB49_2
 ; RV64ZBB-ZBKB-NEXT:  # %bb.1: # %mask
 ; RV64ZBB-ZBKB-NEXT:    xnor a0, a1, a0
-; RV64ZBB-ZBKB-NEXT:  .LBB43_2: # %identity
+; RV64ZBB-ZBKB-NEXT:  .LBB49_2: # %identity
 ; RV64ZBB-ZBKB-NEXT:    ret
   %a = xor i64 %m, -1
   br i1 %cond, label %mask, label %identity

>From 7c17a53417fbb6196ec67cb9acc4474b53758cc3 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 8 Jul 2025 18:54:40 -0700
Subject: [PATCH 2/2] [RISCV] Select disjoint_or+not as xnor.

A disjoint OR can be converted to XOR. And a XOR+NOT is XNOR.

I changed the existing xnor pattern to have the not on the outside
instead of the inside. These are equivalent for xor since xor is
associative. Tablegen was already generating multiple variants
of the isel pattern using associativity.

There are some issues here. The disjoint flag isn't preserved
through type legalization. I was hoping we could recover it
manually for the masked merge cases, but that doesn't work either.
---
 llvm/lib/Target/RISCV/RISCVInstrInfo.td   |  4 ++
 llvm/lib/Target/RISCV/RISCVInstrInfoZb.td |  3 +-
 llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll   | 60 +++++++++++++++--------
 llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll   | 57 ++++++++++++++-------
 4 files changed, 84 insertions(+), 40 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 8f8fb6eba9a62..6064ac1eda69e 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1459,6 +1459,10 @@ def add_like : PatFrags<(ops node:$lhs, node:$rhs),
                         [(or_is_add node:$lhs, node:$rhs),
                          (add  node:$lhs, node:$rhs)]>;
 
+def riscv_xor_like : PatFrags<(ops node:$lhs, node:$rhs),
+                              [(or_is_add node:$lhs, node:$rhs),
+                               (xor  node:$lhs, node:$rhs)]>;
+
 // negate of low bit can be done via two (compressible) shifts.  The negate
 // is never compressible since rs1 and rd can't be the same register.
 def : Pat<(i32 (sub 0, (and_oneuse GPR:$rs, 1))),
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index 4806bcc1d63de..ecda1e6a48053 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -497,7 +497,8 @@ def invLogicImm : ComplexPattern<XLenVT, 1, "selectInvLogicImm", [], [], 0>;
 let Predicates = [HasStdExtZbbOrZbkb] in {
 def : Pat<(XLenVT (and GPR:$rs1, (not GPR:$rs2))), (ANDN GPR:$rs1, GPR:$rs2)>;
 def : Pat<(XLenVT (or  GPR:$rs1, (not GPR:$rs2))), (ORN  GPR:$rs1, GPR:$rs2)>;
-def : Pat<(XLenVT (xor GPR:$rs1, (not GPR:$rs2))), (XNOR GPR:$rs1, GPR:$rs2)>;
+def : Pat<(XLenVT (not (riscv_xor_like GPR:$rs1, GPR:$rs2))),
+          (XNOR GPR:$rs1, GPR:$rs2)>;
 
 def : Pat<(XLenVT (and GPR:$rs1, invLogicImm:$rs2)), (ANDN GPR:$rs1, invLogicImm:$rs2)>;
 def : Pat<(XLenVT (or  GPR:$rs1, invLogicImm:$rs2)), (ORN  GPR:$rs1, invLogicImm:$rs2)>;
diff --git a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
index 852b118f0dc79..3ec857dc41ead 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
@@ -112,11 +112,16 @@ define i64 @xnor_i64(i64 %a, i64 %b) nounwind {
 }
 
 define i32 @disjoint_or_xnor_i32(i32 %a, i32 %b) nounwind {
-; CHECK-LABEL: disjoint_or_xnor_i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    or a0, a0, a1
-; CHECK-NEXT:    not a0, a0
-; CHECK-NEXT:    ret
+; RV32I-LABEL: disjoint_or_xnor_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBKB-LABEL: disjoint_or_xnor_i32:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    xnor a0, a0, a1
+; RV32ZBB-ZBKB-NEXT:    ret
   %or = or disjoint i32 %a, %b
   %not = xor i32 %or, -1
   ret i32 %not
@@ -136,13 +141,20 @@ define i64 @disjoint_or_xnor_i64(i64 %a, i64 %b) nounwind {
 }
 
 define i32 @disjoint_or_xnor_knownbits_i32(i32 %x, i32 %y, i32 %z) nounwind {
-; CHECK-LABEL: disjoint_or_xnor_knownbits_i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    andi a0, a0, 126
-; CHECK-NEXT:    andi a1, a1, -127
-; CHECK-NEXT:    or a0, a0, a1
-; CHECK-NEXT:    not a0, a0
-; CHECK-NEXT:    ret
+; RV32I-LABEL: disjoint_or_xnor_knownbits_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 126
+; RV32I-NEXT:    andi a1, a1, -127
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBKB-LABEL: disjoint_or_xnor_knownbits_i32:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    andi a0, a0, 126
+; RV32ZBB-ZBKB-NEXT:    andi a1, a1, -127
+; RV32ZBB-ZBKB-NEXT:    xnor a0, a0, a1
+; RV32ZBB-ZBKB-NEXT:    ret
   %a = and i32 %x, 126
   %b = and i32 %y, -127
   %or = or i32 %a, %b
@@ -151,14 +163,22 @@ define i32 @disjoint_or_xnor_knownbits_i32(i32 %x, i32 %y, i32 %z) nounwind {
 }
 
 define i64 @disjoint_or_xnor_knownbits_i64(i64 %x, i64 %y, i64 %z) nounwind {
-; CHECK-LABEL: disjoint_or_xnor_knownbits_i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    andi a0, a0, 126
-; CHECK-NEXT:    andi a1, a2, -127
-; CHECK-NEXT:    or a0, a0, a1
-; CHECK-NEXT:    not a0, a0
-; CHECK-NEXT:    not a1, a3
-; CHECK-NEXT:    ret
+; RV32I-LABEL: disjoint_or_xnor_knownbits_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 126
+; RV32I-NEXT:    andi a1, a2, -127
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    not a0, a0
+; RV32I-NEXT:    not a1, a3
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBKB-LABEL: disjoint_or_xnor_knownbits_i64:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    andi a0, a0, 126
+; RV32ZBB-ZBKB-NEXT:    andi a1, a2, -127
+; RV32ZBB-ZBKB-NEXT:    xnor a0, a0, a1
+; RV32ZBB-ZBKB-NEXT:    not a1, a3
+; RV32ZBB-ZBKB-NEXT:    ret
   %a = and i64 %x, 126
   %b = and i64 %y, -127
   %or = or i64 %a, %b
diff --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
index 0eb41638bc3cb..36e5958fcba5b 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
@@ -114,24 +114,36 @@ define signext i32 @disjoint_or_xnor_i32(i32 signext %a, i32 signext %b) nounwin
 }
 
 define i64 @disjoint_or_xnor_i64(i64 %a, i64 %b) nounwind {
-; CHECK-LABEL: disjoint_or_xnor_i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    or a0, a0, a1
-; CHECK-NEXT:    not a0, a0
-; CHECK-NEXT:    ret
+; RV64I-LABEL: disjoint_or_xnor_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-ZBKB-LABEL: disjoint_or_xnor_i64:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    xnor a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
   %or = or disjoint i64 %a, %b
   %not = xor i64 %or, -1
   ret i64 %not
 }
 
 define signext i32 @disjoint_or_xnor_knownbits_i32(i32 signext %x, i32 signext %y, i32 signext %z) nounwind {
-; CHECK-LABEL: disjoint_or_xnor_knownbits_i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    andi a0, a0, 126
-; CHECK-NEXT:    andi a1, a1, -127
-; CHECK-NEXT:    or a0, a0, a1
-; CHECK-NEXT:    not a0, a0
-; CHECK-NEXT:    ret
+; RV64I-LABEL: disjoint_or_xnor_knownbits_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 126
+; RV64I-NEXT:    andi a1, a1, -127
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-ZBKB-LABEL: disjoint_or_xnor_knownbits_i32:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    andi a0, a0, 126
+; RV64ZBB-ZBKB-NEXT:    andi a1, a1, -127
+; RV64ZBB-ZBKB-NEXT:    xnor a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
   %a = and i32 %x, 126
   %b = and i32 %y, -127
   %or = or i32 %a, %b
@@ -140,13 +152,20 @@ define signext i32 @disjoint_or_xnor_knownbits_i32(i32 signext %x, i32 signext %
 }
 
 define i64 @disjoint_or_xnor_knownbits_i64(i64 %x, i64 %y, i64 %z) nounwind {
-; CHECK-LABEL: disjoint_or_xnor_knownbits_i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    andi a0, a0, 126
-; CHECK-NEXT:    andi a1, a1, -127
-; CHECK-NEXT:    or a0, a0, a1
-; CHECK-NEXT:    not a0, a0
-; CHECK-NEXT:    ret
+; RV64I-LABEL: disjoint_or_xnor_knownbits_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 126
+; RV64I-NEXT:    andi a1, a1, -127
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-ZBKB-LABEL: disjoint_or_xnor_knownbits_i64:
+; RV64ZBB-ZBKB:       # %bb.0:
+; RV64ZBB-ZBKB-NEXT:    andi a0, a0, 126
+; RV64ZBB-ZBKB-NEXT:    andi a1, a1, -127
+; RV64ZBB-ZBKB-NEXT:    xnor a0, a0, a1
+; RV64ZBB-ZBKB-NEXT:    ret
   %a = and i64 %x, 126
   %b = and i64 %y, -127
   %or = or i64 %a, %b



More information about the llvm-commits mailing list