[llvm] [NFC][AMDGPU] Add test for unfold-masked-merge-scalar-variablemask.ll (PR #140093)
via llvm-commits
llvm-commits at lists.llvm.org
Thu May 15 09:10:18 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-amdgpu
Author: Harrison Hao (harrisonGPU)
<details>
<summary>Changes</summary>
---
Patch is 29.34 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/140093.diff
1 Files Affected:
- (added) llvm/test/CodeGen/AMDGPU/unfold-masked-merge-scalar-variablemask.ll (+779)
``````````diff
diff --git a/llvm/test/CodeGen/AMDGPU/unfold-masked-merge-scalar-variablemask.ll b/llvm/test/CodeGen/AMDGPU/unfold-masked-merge-scalar-variablemask.ll
new file mode 100644
index 0000000000000..d580db53c253c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/unfold-masked-merge-scalar-variablemask.ll
@@ -0,0 +1,779 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck --check-prefix=GCN %s
+
+define i32 @s_out32(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_out32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s0, s0, s2
+; GCN-NEXT: s_and_not1_b32 s1, s1, s2
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %mx = and i32 %x, %mask
+ %notmask = xor i32 %mask, -1
+ %my = and i32 %y, %notmask
+ %r = or i32 %mx, %my
+ ret i32 %r
+}
+
+define i64 @s_out64(i64 inreg %x, i64 inreg %y, i64 inreg %mask) {
+; GCN-LABEL: s_out64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b64 s[0:1], s[0:1], s[16:17]
+; GCN-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[16:17]
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %mx = and i64 %x, %mask
+ %notmask = xor i64 %mask, -1
+ %my = and i64 %y, %notmask
+ %r = or i64 %mx, %my
+ ret i64 %r
+}
+
+define i32 @s_in32(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s0, s2
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %n1, %y
+ ret i32 %r
+}
+
+define i64 @s_in64(i64 inreg %x, i64 inreg %y, i64 inreg %mask) {
+; GCN-LABEL: s_in64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b64 s[0:1], s[0:1], s[16:17]
+; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i64 %x, %y
+ %n1 = and i64 %n0, %mask
+ %r = xor i64 %n1, %y
+ ret i64 %r
+}
+; ============================================================================ ;
+; Commutativity tests.
+; ============================================================================ ;
+define i32 @s_in_commutativity_0_0_1(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_commutativity_0_0_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s2, s0
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %mask, %n0
+ %r = xor i32 %n1, %y
+ ret i32 %r
+}
+
+define i32 @s_in_commutativity_0_1_0(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_commutativity_0_1_0:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s0, s2
+; GCN-NEXT: s_xor_b32 s0, s1, s0
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %y, %n1
+ ret i32 %r
+}
+
+define i32 @in_commutativity_0_1_1(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: in_commutativity_0_1_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s2, s0
+; GCN-NEXT: s_xor_b32 s0, s1, s0
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %mask, %n0
+ %r = xor i32 %y, %n1
+ ret i32 %r
+}
+
+define i32 @s_in_commutativity_1_0_0(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_commutativity_1_0_0:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s1, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s1, s1, s2
+; GCN-NEXT: s_xor_b32 s0, s1, s0
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %n1, %x
+ ret i32 %r
+}
+
+define i32 @s_in_commutativity_1_0_1(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_commutativity_1_0_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s1, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s1, s2, s1
+; GCN-NEXT: s_xor_b32 s0, s1, s0
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %mask, %n0
+ %r = xor i32 %n1, %x
+ ret i32 %r
+}
+
+define i32 @s_in_commutativity_1_1_0(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_commutativity_1_1_0:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s1, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s1, s1, s2
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %x, %n1
+ ret i32 %r
+}
+
+define i32 @s_in_commutativity_1_1_1(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_commutativity_1_1_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s1, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s1, s2, s1
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %mask, %n0
+ %r = xor i32 %x, %n1
+ ret i32 %r
+}
+; ============================================================================ ;
+; Y is an 'and' too.
+; ============================================================================ ;
+define i32 @s_in_complex_y0(i32 inreg %x, i32 inreg %y_hi, i32 inreg %y_low, i32 inreg %mask) {
+; GCN-LABEL: s_in_complex_y0:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s1, s1, s2
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_and_b32 s0, s0, s3
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %y = and i32 %y_hi, %y_low
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %n1, %y
+ ret i32 %r
+}
+
+define i32 @s_in_complex_y1(i32 inreg %x, i32 inreg %y_hi, i32 inreg %y_low, i32 inreg %mask) {
+; GCN-LABEL: s_in_complex_y1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s1, s1, s2
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_and_b32 s0, s0, s3
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_xor_b32 s0, s1, s0
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %y = and i32 %y_hi, %y_low
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %y, %n1
+ ret i32 %r
+}
+; ============================================================================ ;
+; M is an 'xor' too.
+; ============================================================================ ;
+define i32 @s_in_complex_m0(i32 inreg %x, i32 inreg %y, i32 inreg %m_a, i32 inreg %m_b) {
+; GCN-LABEL: s_in_complex_m0:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s2, s2, s3
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s0, s2
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %mask = xor i32 %m_a, %m_b
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %n1, %y
+ ret i32 %r
+}
+
+define i32 @s_in_complex_m1(i32 inreg %x, i32 inreg %y, i32 inreg %m_a, i32 inreg %m_b) {
+; GCN-LABEL: s_in_complex_m1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s2, s2, s3
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s2, s0
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %mask = xor i32 %m_a, %m_b
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %mask, %n0
+ %r = xor i32 %n1, %y
+ ret i32 %r
+}
+; ============================================================================ ;
+; Both Y and M are complex.
+; ============================================================================ ;
+define i32 @s_in_complex_y0_m0(i32 inreg %x, i32 inreg %y_hi, i32 inreg %y_low, i32 inreg %m_a, i32 inreg %m_b) {
+; GCN-LABEL: s_in_complex_y0_m0:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s1, s1, s2
+; GCN-NEXT: s_xor_b32 s2, s3, s16
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s0, s2
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %y = and i32 %y_hi, %y_low
+ %mask = xor i32 %m_a, %m_b
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %n1, %y
+ ret i32 %r
+}
+
+define i32 @s_in_complex_y1_m0(i32 inreg %x, i32 inreg %y_hi, i32 inreg %y_low, i32 inreg %m_a, i32 inreg %m_b) {
+; GCN-LABEL: s_in_complex_y1_m0:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s1, s1, s2
+; GCN-NEXT: s_xor_b32 s2, s3, s16
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s0, s2
+; GCN-NEXT: s_xor_b32 s0, s1, s0
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %y = and i32 %y_hi, %y_low
+ %mask = xor i32 %m_a, %m_b
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %y, %n1
+ ret i32 %r
+}
+
+define i32 @s_in_complex_y0_m1(i32 inreg %x, i32 inreg %y_hi, i32 inreg %y_low, i32 inreg %m_a, i32 inreg %m_b) {
+; GCN-LABEL: s_in_complex_y0_m1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s1, s1, s2
+; GCN-NEXT: s_xor_b32 s2, s3, s16
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s2, s0
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %y = and i32 %y_hi, %y_low
+ %mask = xor i32 %m_a, %m_b
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %mask, %n0
+ %r = xor i32 %n1, %y
+ ret i32 %r
+}
+
+define i32 @s_in_complex_y1_m1(i32 inreg %x, i32 inreg %y_hi, i32 inreg %y_low, i32 inreg %m_a, i32 inreg %m_b) {
+; GCN-LABEL: s_in_complex_y1_m1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s1, s1, s2
+; GCN-NEXT: s_xor_b32 s2, s3, s16
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s2, s0
+; GCN-NEXT: s_xor_b32 s0, s1, s0
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %y = and i32 %y_hi, %y_low
+ %mask = xor i32 %m_a, %m_b
+ %n0 = xor i32 %x, %y
+ %n1 = and i32 %mask, %n0
+ %r = xor i32 %y, %n1
+ ret i32 %r
+}
+; ============================================================================ ;
+; Various cases with %x and/or %y being a constant
+; ============================================================================ ;
+define i32 @s_out_constant_varx_mone(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_out_constant_varx_mone:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s0, s2, s0
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_or_not1_b32 s0, s0, s2
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %notmask = xor i32 %mask, -1
+ %mx = and i32 %mask, %x
+ %my = and i32 %notmask, -1
+ %r = or i32 %mx, %my
+ ret i32 %r
+}
+
+define i32 @s_in_constant_varx_mone(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_constant_varx_mone:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_not_b32 s0, s0
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_nand_b32 s0, s0, s2
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, -1
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %n1, -1
+ ret i32 %r
+}
+
+; This is not a canonical form. Testing for completeness only.
+define i32 @s_out_constant_varx_mone_invmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_out_constant_varx_mone_invmask:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_or_b32 s0, s0, s2
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %notmask = xor i32 %mask, -1
+ %mx = and i32 %notmask, %x
+ %my = and i32 %mask, -1
+ %r = or i32 %mx, %my
+ ret i32 %r
+}
+
+; This is not a canonical form. Testing for completeness only.
+define i32 @s_in_constant_varx_mone_invmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_constant_varx_mone_invmask:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_not_b32 s1, s2
+; GCN-NEXT: s_not_b32 s0, s0
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_nand_b32 s0, s0, s1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %notmask = xor i32 %mask, -1
+ %n0 = xor i32 %x, -1
+ %n1 = and i32 %n0, %notmask
+ %r = xor i32 %n1, -1
+ ret i32 %r
+}
+
+define i32 @s_out_constant_varx_42(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_out_constant_varx_42:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s0, s2, s0
+; GCN-NEXT: s_and_not1_b32 s1, 42, s2
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %notmask = xor i32 %mask, -1
+ %mx = and i32 %mask, %x
+ %my = and i32 %notmask, 42
+ %r = or i32 %mx, %my
+ ret i32 %r
+}
+
+define i32 @in_constant_varx_42(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: in_constant_varx_42:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s0, s0, 42
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_b32 s0, s0, s2
+; GCN-NEXT: s_xor_b32 s0, s0, 42
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 %x, 42
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %n1, 42
+ ret i32 %r
+}
+
+; This is not a canonical form. Testing for completeness only.
+define i32 @s_out_constant_varx_42_invmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_out_constant_varx_42_invmask:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_not1_b32 s0, s0, s2
+; GCN-NEXT: s_and_b32 s1, s2, 42
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %notmask = xor i32 %mask, -1
+ %mx = and i32 %notmask, %x
+ %my = and i32 %mask, 42
+ %r = or i32 %mx, %my
+ ret i32 %r
+}
+
+; This is not a canonical form. Testing for completeness only.
+define i32 @s_in_constant_varx_42_invmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_constant_varx_42_invmask:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s0, s0, 42
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GCN-NEXT: s_and_not1_b32 s0, s0, s2
+; GCN-NEXT: s_xor_b32 s0, s0, 42
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %notmask = xor i32 %mask, -1
+ %n0 = xor i32 %x, 42
+ %n1 = and i32 %n0, %notmask
+ %r = xor i32 %n1, 42
+ ret i32 %r
+}
+
+define i32 @s_out_constant_mone_vary(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_out_constant_mone_vary:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_or_b32 s0, s1, s2
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %notmask = xor i32 %mask, -1
+ %mx = and i32 %mask, -1
+ %my = and i32 %notmask, %y
+ %r = or i32 %mx, %my
+ ret i32 %r
+}
+
+define i32 @s_in_constant_mone_vary(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_in_constant_mone_vary:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_or_b32 s0, s2, s1
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+ %n0 = xor i32 -1, %y
+ %n1 = and i32 %n0, %mask
+ %r = xor i32 %n1, %y
+ ret i32 %r
+}
+
+; This is not a canonical form. Testing for completeness only.
+define i32 @s_out_constant_mone_vary_invmask(i32 inreg %x, i32 inreg %y, i32 inreg %mask) {
+; GCN-LABEL: s_out_constant_mone_vary_invmask:
+; GCN: ...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/140093
More information about the llvm-commits
mailing list