[llvm] [RISCV] Select disjoint_or+not as xnor. (PR #147636)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 8 19:16:28 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Craig Topper (topperc)
<details>
<summary>Changes</summary>
A disjoint OR can be converted to XOR. And a XOR+NOT is XNOR.
I changed the existing xnor pattern to have the not on the outside
instead of the inside. These are equivalent for xor since xor is
associative. Tablegen was already generating multiple variants
of the isel pattern using associativity.
There are some issues here. The disjoint flag isn't preserved
through type legalization. I was hoping we could recover it
manually for the masked merge cases, but that doesn't work either.
---
Patch is 32.13 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/147636.diff
4 Files Affected:
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfo.td (+4)
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoZb.td (+2-1)
- (modified) llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll (+187-56)
- (modified) llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll (+167-48)
``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 8f8fb6eba9a62..6064ac1eda69e 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1459,6 +1459,10 @@ def add_like : PatFrags<(ops node:$lhs, node:$rhs),
[(or_is_add node:$lhs, node:$rhs),
(add node:$lhs, node:$rhs)]>;
+def riscv_xor_like : PatFrags<(ops node:$lhs, node:$rhs),
+ [(or_is_add node:$lhs, node:$rhs),
+ (xor node:$lhs, node:$rhs)]>;
+
// negate of low bit can be done via two (compressible) shifts. The negate
// is never compressible since rs1 and rd can't be the same register.
def : Pat<(i32 (sub 0, (and_oneuse GPR:$rs, 1))),
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index 4806bcc1d63de..ecda1e6a48053 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -497,7 +497,8 @@ def invLogicImm : ComplexPattern<XLenVT, 1, "selectInvLogicImm", [], [], 0>;
let Predicates = [HasStdExtZbbOrZbkb] in {
def : Pat<(XLenVT (and GPR:$rs1, (not GPR:$rs2))), (ANDN GPR:$rs1, GPR:$rs2)>;
def : Pat<(XLenVT (or GPR:$rs1, (not GPR:$rs2))), (ORN GPR:$rs1, GPR:$rs2)>;
-def : Pat<(XLenVT (xor GPR:$rs1, (not GPR:$rs2))), (XNOR GPR:$rs1, GPR:$rs2)>;
+def : Pat<(XLenVT (not (riscv_xor_like GPR:$rs1, GPR:$rs2))),
+ (XNOR GPR:$rs1, GPR:$rs2)>;
def : Pat<(XLenVT (and GPR:$rs1, invLogicImm:$rs2)), (ANDN GPR:$rs1, invLogicImm:$rs2)>;
def : Pat<(XLenVT (or GPR:$rs1, invLogicImm:$rs2)), (ORN GPR:$rs1, invLogicImm:$rs2)>;
diff --git a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
index 88bb19f499ab5..3ec857dc41ead 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
@@ -111,6 +111,137 @@ define i64 @xnor_i64(i64 %a, i64 %b) nounwind {
ret i64 %xor
}
+define i32 @disjoint_or_xnor_i32(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: disjoint_or_xnor_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: ret
+;
+; RV32ZBB-ZBKB-LABEL: disjoint_or_xnor_i32:
+; RV32ZBB-ZBKB: # %bb.0:
+; RV32ZBB-ZBKB-NEXT: xnor a0, a0, a1
+; RV32ZBB-ZBKB-NEXT: ret
+ %or = or disjoint i32 %a, %b
+ %not = xor i32 %or, -1
+ ret i32 %not
+}
+
+define i64 @disjoint_or_xnor_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: disjoint_or_xnor_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: or a1, a1, a3
+; CHECK-NEXT: or a0, a0, a2
+; CHECK-NEXT: not a0, a0
+; CHECK-NEXT: not a1, a1
+; CHECK-NEXT: ret
+ %or = or disjoint i64 %a, %b
+ %not = xor i64 %or, -1
+ ret i64 %not
+}
+
+define i32 @disjoint_or_xnor_knownbits_i32(i32 %x, i32 %y, i32 %z) nounwind {
+; RV32I-LABEL: disjoint_or_xnor_knownbits_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: andi a0, a0, 126
+; RV32I-NEXT: andi a1, a1, -127
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: ret
+;
+; RV32ZBB-ZBKB-LABEL: disjoint_or_xnor_knownbits_i32:
+; RV32ZBB-ZBKB: # %bb.0:
+; RV32ZBB-ZBKB-NEXT: andi a0, a0, 126
+; RV32ZBB-ZBKB-NEXT: andi a1, a1, -127
+; RV32ZBB-ZBKB-NEXT: xnor a0, a0, a1
+; RV32ZBB-ZBKB-NEXT: ret
+ %a = and i32 %x, 126
+ %b = and i32 %y, -127
+ %or = or i32 %a, %b
+ %not = xor i32 %or, -1
+ ret i32 %not
+}
+
+define i64 @disjoint_or_xnor_knownbits_i64(i64 %x, i64 %y, i64 %z) nounwind {
+; RV32I-LABEL: disjoint_or_xnor_knownbits_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: andi a0, a0, 126
+; RV32I-NEXT: andi a1, a2, -127
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: not a1, a3
+; RV32I-NEXT: ret
+;
+; RV32ZBB-ZBKB-LABEL: disjoint_or_xnor_knownbits_i64:
+; RV32ZBB-ZBKB: # %bb.0:
+; RV32ZBB-ZBKB-NEXT: andi a0, a0, 126
+; RV32ZBB-ZBKB-NEXT: andi a1, a2, -127
+; RV32ZBB-ZBKB-NEXT: xnor a0, a0, a1
+; RV32ZBB-ZBKB-NEXT: not a1, a3
+; RV32ZBB-ZBKB-NEXT: ret
+ %a = and i64 %x, 126
+ %b = and i64 %y, -127
+ %or = or i64 %a, %b
+ %not = xor i64 %or, -1
+ ret i64 %not
+}
+
+define i32 @inverted_masked_merge_i32(i32 %x, i32 %y, i32 %z) nounwind {
+; RV32I-LABEL: inverted_masked_merge_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: xor a1, a1, a2
+; RV32I-NEXT: and a0, a1, a0
+; RV32I-NEXT: xor a0, a0, a2
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: ret
+;
+; RV32ZBB-ZBKB-LABEL: inverted_masked_merge_i32:
+; RV32ZBB-ZBKB: # %bb.0:
+; RV32ZBB-ZBKB-NEXT: and a1, a0, a1
+; RV32ZBB-ZBKB-NEXT: andn a0, a2, a0
+; RV32ZBB-ZBKB-NEXT: or a0, a1, a0
+; RV32ZBB-ZBKB-NEXT: not a0, a0
+; RV32ZBB-ZBKB-NEXT: ret
+ %a = and i32 %x, %y
+ %notx = xor i32 %x, -1
+ %b = and i32 %notx, %z
+ %or = or i32 %a, %b
+ %not = xor i32 %or, -1
+ ret i32 %not
+}
+
+define i64 @inverted_masked_merge_i64(i64 %x, i64 %y, i64 %z) nounwind {
+; RV32I-LABEL: inverted_masked_merge_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: xor a3, a3, a5
+; RV32I-NEXT: xor a2, a2, a4
+; RV32I-NEXT: and a1, a3, a1
+; RV32I-NEXT: and a0, a2, a0
+; RV32I-NEXT: xor a1, a1, a5
+; RV32I-NEXT: xor a0, a0, a4
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: not a1, a1
+; RV32I-NEXT: ret
+;
+; RV32ZBB-ZBKB-LABEL: inverted_masked_merge_i64:
+; RV32ZBB-ZBKB: # %bb.0:
+; RV32ZBB-ZBKB-NEXT: and a2, a0, a2
+; RV32ZBB-ZBKB-NEXT: and a3, a1, a3
+; RV32ZBB-ZBKB-NEXT: andn a0, a4, a0
+; RV32ZBB-ZBKB-NEXT: andn a1, a5, a1
+; RV32ZBB-ZBKB-NEXT: or a1, a3, a1
+; RV32ZBB-ZBKB-NEXT: or a0, a2, a0
+; RV32ZBB-ZBKB-NEXT: not a0, a0
+; RV32ZBB-ZBKB-NEXT: not a1, a1
+; RV32ZBB-ZBKB-NEXT: ret
+ %a = and i64 %x, %y
+ %notx = xor i64 %x, -1
+ %b = and i64 %notx, %z
+ %or = or i64 %a, %b
+ %not = xor i64 %or, -1
+ ret i64 %not
+}
+
declare i32 @llvm.fshl.i32(i32, i32, i32)
define i32 @rol_i32(i32 %a, i32 %b) nounwind {
@@ -141,15 +272,15 @@ define i64 @rol_i64(i64 %a, i64 %b) nounwind {
; CHECK-NEXT: slli a5, a2, 26
; CHECK-NEXT: srli a5, a5, 31
; CHECK-NEXT: mv a3, a1
-; CHECK-NEXT: bnez a5, .LBB7_2
+; CHECK-NEXT: bnez a5, .LBB13_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a0
-; CHECK-NEXT: .LBB7_2:
+; CHECK-NEXT: .LBB13_2:
; CHECK-NEXT: sll a4, a3, a2
-; CHECK-NEXT: bnez a5, .LBB7_4
+; CHECK-NEXT: bnez a5, .LBB13_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB7_4:
+; CHECK-NEXT: .LBB13_4:
; CHECK-NEXT: srli a1, a0, 1
; CHECK-NEXT: not a5, a2
; CHECK-NEXT: sll a2, a0, a2
@@ -192,15 +323,15 @@ define i64 @ror_i64(i64 %a, i64 %b) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: andi a5, a2, 32
; CHECK-NEXT: mv a3, a0
-; CHECK-NEXT: beqz a5, .LBB9_2
+; CHECK-NEXT: beqz a5, .LBB15_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a1
-; CHECK-NEXT: .LBB9_2:
+; CHECK-NEXT: .LBB15_2:
; CHECK-NEXT: srl a4, a3, a2
-; CHECK-NEXT: beqz a5, .LBB9_4
+; CHECK-NEXT: beqz a5, .LBB15_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a1, a0
-; CHECK-NEXT: .LBB9_4:
+; CHECK-NEXT: .LBB15_4:
; CHECK-NEXT: slli a0, a1, 1
; CHECK-NEXT: not a5, a2
; CHECK-NEXT: srl a1, a1, a2
@@ -442,19 +573,19 @@ define i1 @andn_snez_i64(i64 %a, i64 %b) nounwind {
define i32 @and_hoisted_not_i32(i32 %x, i32 %m, i1 zeroext %cond) {
; RV32I-LABEL: and_hoisted_not_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a2, .LBB24_2
+; RV32I-NEXT: beqz a2, .LBB30_2
; RV32I-NEXT: # %bb.1: # %mask
; RV32I-NEXT: not a1, a1
; RV32I-NEXT: and a0, a1, a0
-; RV32I-NEXT: .LBB24_2: # %identity
+; RV32I-NEXT: .LBB30_2: # %identity
; RV32I-NEXT: ret
;
; RV32ZBB-ZBKB-LABEL: and_hoisted_not_i32:
; RV32ZBB-ZBKB: # %bb.0:
-; RV32ZBB-ZBKB-NEXT: beqz a2, .LBB24_2
+; RV32ZBB-ZBKB-NEXT: beqz a2, .LBB30_2
; RV32ZBB-ZBKB-NEXT: # %bb.1: # %mask
; RV32ZBB-ZBKB-NEXT: andn a0, a0, a1
-; RV32ZBB-ZBKB-NEXT: .LBB24_2: # %identity
+; RV32ZBB-ZBKB-NEXT: .LBB30_2: # %identity
; RV32ZBB-ZBKB-NEXT: ret
%a = xor i32 %m, -1
br i1 %cond, label %mask, label %identity
@@ -470,19 +601,19 @@ identity:
define i32 @and_hoisted_not_i32_swapped(i32 %x, i32 %m, i1 zeroext %cond) {
; RV32I-LABEL: and_hoisted_not_i32_swapped:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a2, .LBB25_2
+; RV32I-NEXT: beqz a2, .LBB31_2
; RV32I-NEXT: # %bb.1: # %mask
; RV32I-NEXT: not a1, a1
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: .LBB25_2: # %identity
+; RV32I-NEXT: .LBB31_2: # %identity
; RV32I-NEXT: ret
;
; RV32ZBB-ZBKB-LABEL: and_hoisted_not_i32_swapped:
; RV32ZBB-ZBKB: # %bb.0:
-; RV32ZBB-ZBKB-NEXT: beqz a2, .LBB25_2
+; RV32ZBB-ZBKB-NEXT: beqz a2, .LBB31_2
; RV32ZBB-ZBKB-NEXT: # %bb.1: # %mask
; RV32ZBB-ZBKB-NEXT: andn a0, a0, a1
-; RV32ZBB-ZBKB-NEXT: .LBB25_2: # %identity
+; RV32ZBB-ZBKB-NEXT: .LBB31_2: # %identity
; RV32ZBB-ZBKB-NEXT: ret
%a = xor i32 %m, -1
br i1 %cond, label %mask, label %identity
@@ -498,22 +629,22 @@ identity:
define i64 @and_hoisted_not_i64(i64 %x, i64 %m, i1 zeroext %cond) {
; RV32I-LABEL: and_hoisted_not_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a4, .LBB26_2
+; RV32I-NEXT: beqz a4, .LBB32_2
; RV32I-NEXT: # %bb.1: # %mask
; RV32I-NEXT: not a3, a3
; RV32I-NEXT: not a2, a2
; RV32I-NEXT: and a0, a2, a0
; RV32I-NEXT: and a1, a3, a1
-; RV32I-NEXT: .LBB26_2: # %identity
+; RV32I-NEXT: .LBB32_2: # %identity
; RV32I-NEXT: ret
;
; RV32ZBB-ZBKB-LABEL: and_hoisted_not_i64:
; RV32ZBB-ZBKB: # %bb.0:
-; RV32ZBB-ZBKB-NEXT: beqz a4, .LBB26_2
+; RV32ZBB-ZBKB-NEXT: beqz a4, .LBB32_2
; RV32ZBB-ZBKB-NEXT: # %bb.1: # %mask
; RV32ZBB-ZBKB-NEXT: andn a0, a0, a2
; RV32ZBB-ZBKB-NEXT: andn a1, a1, a3
-; RV32ZBB-ZBKB-NEXT: .LBB26_2: # %identity
+; RV32ZBB-ZBKB-NEXT: .LBB32_2: # %identity
; RV32ZBB-ZBKB-NEXT: ret
%a = xor i64 %m, -1
br i1 %cond, label %mask, label %identity
@@ -529,22 +660,22 @@ identity:
define i64 @and_hoisted_not_i64_swapped(i64 %x, i64 %m, i1 zeroext %cond) {
; RV32I-LABEL: and_hoisted_not_i64_swapped:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a4, .LBB27_2
+; RV32I-NEXT: beqz a4, .LBB33_2
; RV32I-NEXT: # %bb.1: # %mask
; RV32I-NEXT: not a3, a3
; RV32I-NEXT: not a2, a2
; RV32I-NEXT: and a0, a0, a2
; RV32I-NEXT: and a1, a1, a3
-; RV32I-NEXT: .LBB27_2: # %identity
+; RV32I-NEXT: .LBB33_2: # %identity
; RV32I-NEXT: ret
;
; RV32ZBB-ZBKB-LABEL: and_hoisted_not_i64_swapped:
; RV32ZBB-ZBKB: # %bb.0:
-; RV32ZBB-ZBKB-NEXT: beqz a4, .LBB27_2
+; RV32ZBB-ZBKB-NEXT: beqz a4, .LBB33_2
; RV32ZBB-ZBKB-NEXT: # %bb.1: # %mask
; RV32ZBB-ZBKB-NEXT: andn a0, a0, a2
; RV32ZBB-ZBKB-NEXT: andn a1, a1, a3
-; RV32ZBB-ZBKB-NEXT: .LBB27_2: # %identity
+; RV32ZBB-ZBKB-NEXT: .LBB33_2: # %identity
; RV32ZBB-ZBKB-NEXT: ret
%a = xor i64 %m, -1
br i1 %cond, label %mask, label %identity
@@ -560,19 +691,19 @@ identity:
define i32 @or_hoisted_not_i32(i32 %x, i32 %m, i1 zeroext %cond) {
; RV32I-LABEL: or_hoisted_not_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a2, .LBB28_2
+; RV32I-NEXT: beqz a2, .LBB34_2
; RV32I-NEXT: # %bb.1: # %mask
; RV32I-NEXT: not a1, a1
; RV32I-NEXT: or a0, a1, a0
-; RV32I-NEXT: .LBB28_2: # %identity
+; RV32I-NEXT: .LBB34_2: # %identity
; RV32I-NEXT: ret
;
; RV32ZBB-ZBKB-LABEL: or_hoisted_not_i32:
; RV32ZBB-ZBKB: # %bb.0:
-; RV32ZBB-ZBKB-NEXT: beqz a2, .LBB28_2
+; RV32ZBB-ZBKB-NEXT: beqz a2, .LBB34_2
; RV32ZBB-ZBKB-NEXT: # %bb.1: # %mask
; RV32ZBB-ZBKB-NEXT: orn a0, a0, a1
-; RV32ZBB-ZBKB-NEXT: .LBB28_2: # %identity
+; RV32ZBB-ZBKB-NEXT: .LBB34_2: # %identity
; RV32ZBB-ZBKB-NEXT: ret
%a = xor i32 %m, -1
br i1 %cond, label %mask, label %identity
@@ -588,19 +719,19 @@ identity:
define i32 @or_hoisted_not_i32_swapped(i32 %x, i32 %m, i1 zeroext %cond) {
; RV32I-LABEL: or_hoisted_not_i32_swapped:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a2, .LBB29_2
+; RV32I-NEXT: beqz a2, .LBB35_2
; RV32I-NEXT: # %bb.1: # %mask
; RV32I-NEXT: not a1, a1
; RV32I-NEXT: or a0, a0, a1
-; RV32I-NEXT: .LBB29_2: # %identity
+; RV32I-NEXT: .LBB35_2: # %identity
; RV32I-NEXT: ret
;
; RV32ZBB-ZBKB-LABEL: or_hoisted_not_i32_swapped:
; RV32ZBB-ZBKB: # %bb.0:
-; RV32ZBB-ZBKB-NEXT: beqz a2, .LBB29_2
+; RV32ZBB-ZBKB-NEXT: beqz a2, .LBB35_2
; RV32ZBB-ZBKB-NEXT: # %bb.1: # %mask
; RV32ZBB-ZBKB-NEXT: orn a0, a0, a1
-; RV32ZBB-ZBKB-NEXT: .LBB29_2: # %identity
+; RV32ZBB-ZBKB-NEXT: .LBB35_2: # %identity
; RV32ZBB-ZBKB-NEXT: ret
%a = xor i32 %m, -1
br i1 %cond, label %mask, label %identity
@@ -616,22 +747,22 @@ identity:
define i64 @or_hoisted_not_i64(i64 %x, i64 %m, i1 zeroext %cond) {
; RV32I-LABEL: or_hoisted_not_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a4, .LBB30_2
+; RV32I-NEXT: beqz a4, .LBB36_2
; RV32I-NEXT: # %bb.1: # %mask
; RV32I-NEXT: not a3, a3
; RV32I-NEXT: not a2, a2
; RV32I-NEXT: or a0, a2, a0
; RV32I-NEXT: or a1, a3, a1
-; RV32I-NEXT: .LBB30_2: # %identity
+; RV32I-NEXT: .LBB36_2: # %identity
; RV32I-NEXT: ret
;
; RV32ZBB-ZBKB-LABEL: or_hoisted_not_i64:
; RV32ZBB-ZBKB: # %bb.0:
-; RV32ZBB-ZBKB-NEXT: beqz a4, .LBB30_2
+; RV32ZBB-ZBKB-NEXT: beqz a4, .LBB36_2
; RV32ZBB-ZBKB-NEXT: # %bb.1: # %mask
; RV32ZBB-ZBKB-NEXT: orn a0, a0, a2
; RV32ZBB-ZBKB-NEXT: orn a1, a1, a3
-; RV32ZBB-ZBKB-NEXT: .LBB30_2: # %identity
+; RV32ZBB-ZBKB-NEXT: .LBB36_2: # %identity
; RV32ZBB-ZBKB-NEXT: ret
%a = xor i64 %m, -1
br i1 %cond, label %mask, label %identity
@@ -647,22 +778,22 @@ identity:
define i64 @or_hoisted_not_i64_swapped(i64 %x, i64 %m, i1 zeroext %cond) {
; RV32I-LABEL: or_hoisted_not_i64_swapped:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a4, .LBB31_2
+; RV32I-NEXT: beqz a4, .LBB37_2
; RV32I-NEXT: # %bb.1: # %mask
; RV32I-NEXT: not a3, a3
; RV32I-NEXT: not a2, a2
; RV32I-NEXT: or a0, a0, a2
; RV32I-NEXT: or a1, a1, a3
-; RV32I-NEXT: .LBB31_2: # %identity
+; RV32I-NEXT: .LBB37_2: # %identity
; RV32I-NEXT: ret
;
; RV32ZBB-ZBKB-LABEL: or_hoisted_not_i64_swapped:
; RV32ZBB-ZBKB: # %bb.0:
-; RV32ZBB-ZBKB-NEXT: beqz a4, .LBB31_2
+; RV32ZBB-ZBKB-NEXT: beqz a4, .LBB37_2
; RV32ZBB-ZBKB-NEXT: # %bb.1: # %mask
; RV32ZBB-ZBKB-NEXT: orn a0, a0, a2
; RV32ZBB-ZBKB-NEXT: orn a1, a1, a3
-; RV32ZBB-ZBKB-NEXT: .LBB31_2: # %identity
+; RV32ZBB-ZBKB-NEXT: .LBB37_2: # %identity
; RV32ZBB-ZBKB-NEXT: ret
%a = xor i64 %m, -1
br i1 %cond, label %mask, label %identity
@@ -678,19 +809,19 @@ identity:
define i32 @xor_hoisted_not_i32(i32 %x, i32 %m, i1 zeroext %cond) {
; RV32I-LABEL: xor_hoisted_not_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a2, .LBB32_2
+; RV32I-NEXT: beqz a2, .LBB38_2
; RV32I-NEXT: # %bb.1: # %mask
; RV32I-NEXT: not a1, a1
; RV32I-NEXT: xor a0, a1, a0
-; RV32I-NEXT: .LBB32_2: # %identity
+; RV32I-NEXT: .LBB38_2: # %identity
; RV32I-NEXT: ret
;
; RV32ZBB-ZBKB-LABEL: xor_hoisted_not_i32:
; RV32ZBB-ZBKB: # %bb.0:
-; RV32ZBB-ZBKB-NEXT: beqz a2, .LBB32_2
+; RV32ZBB-ZBKB-NEXT: beqz a2, .LBB38_2
; RV32ZBB-ZBKB-NEXT: # %bb.1: # %mask
; RV32ZBB-ZBKB-NEXT: xnor a0, a1, a0
-; RV32ZBB-ZBKB-NEXT: .LBB32_2: # %identity
+; RV32ZBB-ZBKB-NEXT: .LBB38_2: # %identity
; RV32ZBB-ZBKB-NEXT: ret
%a = xor i32 %m, -1
br i1 %cond, label %mask, label %identity
@@ -706,19 +837,19 @@ identity:
define i32 @xor_hoisted_not_i32_swapped(i32 %x, i32 %m, i1 zeroext %cond) {
; RV32I-LABEL: xor_hoisted_not_i32_swapped:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a2, .LBB33_2
+; RV32I-NEXT: beqz a2, .LBB39_2
; RV32I-NEXT: # %bb.1: # %mask
; RV32I-NEXT: not a1, a1
; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: .LBB33_2: # %identity
+; RV32I-NEXT: .LBB39_2: # %identity
; RV32I-NEXT: ret
;
; RV32ZBB-ZBKB-LABEL: xor_hoisted_not_i32_swapped:
; RV32ZBB-ZBKB: # %bb.0:
-; RV32ZBB-ZBKB-NEXT: beqz a2, .LBB33_2
+; RV32ZBB-ZBKB-NEXT: beqz a2, .LBB39_2
; RV32ZBB-ZBKB-NEXT: # %bb.1: # %mask
; RV32ZBB-ZBKB-NEXT: xnor a0, a1, a0
-; RV32ZBB-ZBKB-NEXT: .LBB33_2: # %identity
+; RV32ZBB-ZBKB-NEXT: .LBB39_2: # %identity
; RV32ZBB-ZBKB-NEXT: ret
%a = xor i32 %m, -1
br i1 %cond, label %mask, label %identity
@@ -734,22 +865,22 @@ identity:
define i64 @xor_hoisted_not_i64(i64 %x, i64 %m, i1 zeroext %cond) {
; RV32I-LABEL: xor_hoisted_not_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a4, .LBB34_2
+; RV32I-NEXT: beqz a4, .LBB40_2
; RV32I-NEXT: # %bb.1: # %mask
; RV32I-NEXT: not a3, a3
; RV32I-NEXT: not a2, a2
; RV32I-NEXT: xor a0, a2, a0
; RV32I-NEXT: xor a1, a3, a1
-; RV32I-NEXT: .LBB34_2: # %identity
+; RV32I-NEXT: .LBB40_2: # %identity
; RV32I-NEXT: ret
;
; RV32ZBB-ZBKB-LABEL: xor_hoisted_not_i64:
; RV32ZBB-ZBKB: # %bb.0:
-; RV32ZBB-ZBKB-NEXT: beqz a4, .LBB34_2
+; RV32ZBB-ZBKB-NEXT: beqz a4, .LBB40_2
; RV32ZBB-ZBKB-NEXT: # %bb.1: # %mask
; RV32ZBB-ZBKB-NEXT: xnor a0, a2, a0
; RV32ZBB-ZBKB-NEXT: xnor a1, a3, a1
-; RV32ZBB-ZBKB-NEXT: .LBB34_2: # %identity
+; RV32ZBB-ZBKB-NEXT: .LBB40_2: # %identity
; RV32ZBB-ZBKB-NEXT: ret
%a = xor i64 %m, -1
br i1 %cond, label %mask, label %identity
@@ -765,22 +896,22 @@ identity:
define i64 @xor_hoisted_not_i64_swapped(i64 %x, i64 %m, i1 zeroext %cond) {
; RV32I-LABEL: xor_hoisted_not_i64_swapped:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a4, .LBB35_2
+; RV32I-NEXT: beqz a4, .LBB41_2
; RV32I-NEXT: # %bb.1: # %mask
; RV32I-NEXT: not a3, a3
; RV32I-NEXT: not a2, a2
; RV32I-NEXT: xor a0, a0, a2
; RV32I-NEXT: xor a1, a1, a3
-; RV32I-NEXT: .LBB35_2: # %identity
+; RV32I-NEXT: .LBB41_2: # %identity
; RV32I-NEXT: ret
;
; RV32ZBB-ZBKB-LABEL: xor_hoisted_not_i64_swapped:
; RV32ZBB-ZBKB: # %bb.0:
-; RV32ZBB-ZBKB-NEXT: beqz a4, .LBB35_2
+; RV32ZBB-ZBKB-NEXT: beqz a4, .LBB41_2
; RV32ZBB-ZBKB-NEXT: # %bb.1: # %mask
; RV32ZBB-ZBKB-NEXT: xnor a0, a2, a0
; RV32ZBB-ZBKB-NEXT: xnor a1, a3, a1
-; RV32ZBB-ZBKB-NEXT: .LBB35_2: # %identity
+; RV32ZBB-ZBKB-NEXT: .LBB41_2: # %identity
; RV32ZBB-ZBKB-NEXT: ret
%a = xor i64 %m, -1
br i1 %cond, label %mask, label %identity
diff --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
index 4ca637b788a45..36e5958fcba5b 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
@@ -102,6 +102,125 @@ define i64 @xnor_i64(i64 %a, i64 %b) nounwind {
ret i64 %xor
}
+define signext i32 @disjoint_or_xnor_i32(i32 signext %a, i32 signext %b) nounwind {
+; CHECK-LABEL: disjoint_or_xnor_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: or a0, a0, a1
+; CHECK-NEXT: not a0, a0
+; CHECK-NEXT: ret
+ %or = or disjoint i32 %a, %b
+ %not = xor i32 %or, -1
+ ret i32 %not
+}
+
+define i64 @disjoint_or_xnor_i64(i64 %a, i64 %b) nounwind {
+; RV64I-LABEL: disjoint_or_xnor_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: not a0, a0
+; RV64I-NEXT: ret
+;
+; RV64ZBB-ZBKB-LABEL: disjoint_or_xnor_i64:
+; RV64ZBB-ZBKB: # %bb.0:
+; RV64ZBB-ZBKB-NEXT: xnor a0, a0, a1
+; RV64ZBB-ZBKB-NEXT: ret
+ %or = or disjoint i64 %a, %b
+ %not = xor i64 %or, -1
+ ret i64 %not
+}
+
+define signext i32 @disjoint_or_xnor_knownbits_i32(i32 signext %x, i32 signext %y, i32 signext %z) nounwind {
+; RV64I-LABEL: disjoint_or_xnor_knownbits_i32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: andi a0, a0, 126
+; RV64I-NEXT: andi a1, a1, -127
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: not a0, a0
+; RV64I-NEXT: ret
+;
+; RV64ZBB-ZBKB-LABEL: disjoint_or_xnor_knownbits_i32:
+; RV64ZBB-ZBKB: # %bb.0:
+; RV64ZBB-ZBKB-NEXT: andi a0, a0, 126
+; RV64ZBB-ZBKB-NEXT: andi a1, a1, -127
+; RV64ZBB-ZBKB-NEXT: xnor a0, a0, a1
+; RV64ZBB-ZBKB-NEXT: ret
+ %a = and i32 %x, 126
+ %b = and i32 %y, -127
+ %or = or i32 %a, %b
+ %not = xor i32 %or, -1
+ ret i32 %not
+}
+
+define i64 @disjoint_or_xnor_knownbits_i64(i64 %x, i64 %y, i64 %z) nounwind {
+; RV64I-LABEL: disjoint_or_xnor_knownbits_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: andi a0, a0, 126
+; RV64I-NEXT: andi a1, a1, -127
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: not a0, a0
+; RV64I-NEXT: ret
+;
+...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/147636
More information about the llvm-commits
mailing list