[llvm] c7659d8 - [NFC][GlobalISel] Pre-commit GISel AMDGPU tests for XOR, OR, AND (#138586)
via llvm-commits
llvm-commits at lists.llvm.org
Tue May 6 12:59:19 PDT 2025
Author: Chinmay Deshpande
Date: 2025-05-06T12:59:15-07:00
New Revision: c7659d88ac97ccfdc4eadb23673e3a4fd9cdeb53
URL: https://github.com/llvm/llvm-project/commit/c7659d88ac97ccfdc4eadb23673e3a4fd9cdeb53
DIFF: https://github.com/llvm/llvm-project/commit/c7659d88ac97ccfdc4eadb23673e3a4fd9cdeb53.diff
LOG: [NFC][GlobalISel] Pre-commit GISel AMDGPU tests for XOR, OR, AND (#138586)
Added:
llvm/test/CodeGen/AMDGPU/GlobalISel/and.ll
llvm/test/CodeGen/AMDGPU/GlobalISel/or.ll
llvm/test/CodeGen/AMDGPU/GlobalISel/xor.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/and.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/and.ll
new file mode 100644
index 0000000000000..ed3720a950b38
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/and.ll
@@ -0,0 +1,839 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 < %s | FileCheck -check-prefixes=GCN,GFX7 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 < %s | FileCheck -check-prefixes=GCN,GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16, -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11,GFX11-TRUE16 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16, -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11,GFX11-FAKE16 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12 %s
+
+define amdgpu_ps i16 @s_and_i16(i16 inreg %num, i16 inreg %den) {
+; GCN-LABEL: s_and_i16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_and_b32 s0, s0, s1
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_and_i16:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_and_b32 s0, s0, s1
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_and_i16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_and_b32 s0, s0, s1
+; GFX12-NEXT: ; return to shader part epilog
+ %result = and i16 %num, %den
+ ret i16 %result
+}
+
+define i16 @v_and_i16(i16 %num, i16 %den) {
+; GCN-LABEL: v_and_i16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v0, v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_and_i16:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_and_i16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = and i16 %num, %den
+ ret i16 %result
+}
+
+define amdgpu_ps zeroext i16 @s_and_i16_zeroext(i16 inreg zeroext %num, i16 inreg zeroext %den) {
+; GFX7-LABEL: s_and_i16_zeroext:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_and_b32 s0, s0, s1
+; GFX7-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_and_i16_zeroext:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_and_b32 s0, s0, s1
+; GFX8-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_and_i16_zeroext:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_and_b32 s0, s0, s1
+; GFX9-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_and_i16_zeroext:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_and_b32 s0, s0, s1
+; GFX10PLUS-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_and_i16_zeroext:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_and_b32 s0, s0, s1
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX12-NEXT: ; return to shader part epilog
+ %result = and i16 %num, %den
+ ret i16 %result
+}
+
+define zeroext i16 @v_and_i16_zeroext(i16 zeroext %num, i16 zeroext %den) {
+; GCN-LABEL: v_and_i16_zeroext:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v0, v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_and_i16_zeroext:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_and_i16_zeroext:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = and i16 %num, %den
+ ret i16 %result
+}
+
+define amdgpu_ps signext i16 @s_and_i16_signext(i16 inreg signext %num, i16 inreg signext %den) {
+; GFX7-LABEL: s_and_i16_signext:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_and_b32 s0, s0, s1
+; GFX7-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_and_i16_signext:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_and_b32 s0, s0, s1
+; GFX8-NEXT: s_sext_i32_i16 s0, s0
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_and_i16_signext:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_and_b32 s0, s0, s1
+; GFX9-NEXT: s_sext_i32_i16 s0, s0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_and_i16_signext:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_and_b32 s0, s0, s1
+; GFX10PLUS-NEXT: s_sext_i32_i16 s0, s0
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_and_i16_signext:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_and_b32 s0, s0, s1
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_sext_i32_i16 s0, s0
+; GFX12-NEXT: ; return to shader part epilog
+ %result = and i16 %num, %den
+ ret i16 %result
+}
+
+define signext i16 @v_and_i16_signext(i16 signext %num, i16 signext %den) {
+; GCN-LABEL: v_and_i16_signext:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v0, v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_and_i16_signext:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_and_i16_signext:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = and i16 %num, %den
+ ret i16 %result
+}
+
+define amdgpu_ps i32 @s_and_i32(i32 inreg %num, i32 inreg %den) {
+; GCN-LABEL: s_and_i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_and_b32 s0, s0, s1
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_and_i32:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_and_b32 s0, s0, s1
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_and_i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_and_b32 s0, s0, s1
+; GFX12-NEXT: ; return to shader part epilog
+ %result = and i32 %num, %den
+ ret i32 %result
+}
+
+define i32 @v_and_i32(i32 %num, i32 %den) {
+; GCN-LABEL: v_and_i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v0, v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_and_i32:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_and_i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = and i32 %num, %den
+ ret i32 %result
+}
+
+define amdgpu_ps <2 x i32> @s_and_v2i32(<2 x i32> inreg %num, <2 x i32> inreg %den) {
+; GCN-LABEL: s_and_v2i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_and_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_and_v2i32:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_and_b64 s[0:1], s[0:1], s[2:3]
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_and_v2i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_and_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: ; return to shader part epilog
+ %result = and <2 x i32> %num, %den
+ ret <2 x i32> %result
+}
+
+define <2 x i32> @v_and_v2i32(<2 x i32> %num, <2 x i32> %den) {
+; GCN-LABEL: v_and_v2i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v0, v0, v2
+; GCN-NEXT: v_and_b32_e32 v1, v1, v3
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_and_v2i32:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX10PLUS-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_and_v2i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = and <2 x i32> %num, %den
+ ret <2 x i32> %result
+}
+
+define amdgpu_cs i33 @s_and_i33(i33 inreg %num, i33 inreg %den) {
+; GCN-LABEL: s_and_i33:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_and_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_and_i33:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_and_b64 s[0:1], s[0:1], s[2:3]
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_and_i33:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_and_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: ; return to shader part epilog
+ %result = and i33 %num, %den
+ ret i33 %result
+}
+
+define amdgpu_ps i64 @s_and_i64(i64 inreg %num, i64 inreg %den) {
+; GCN-LABEL: s_and_i64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_and_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_and_i64:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_and_b64 s[0:1], s[0:1], s[2:3]
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_and_i64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_and_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: ; return to shader part epilog
+ %result = and i64 %num, %den
+ ret i64 %result
+}
+
+define i64 @v_and_i64(i64 %num, i64 %den) {
+; GCN-LABEL: v_and_i64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v0, v0, v2
+; GCN-NEXT: v_and_b32_e32 v1, v1, v3
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_and_i64:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX10PLUS-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_and_i64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = and i64 %num, %den
+ ret i64 %result
+}
+
+define amdgpu_ps <3 x i32> @s_and_i96(i96 inreg %num, i96 inreg %den) {
+; GCN-LABEL: s_and_i96:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_mov_b32 s6, s3
+; GCN-NEXT: s_mov_b32 s7, s4
+; GCN-NEXT: s_and_b64 s[0:1], s[0:1], s[6:7]
+; GCN-NEXT: s_and_b32 s2, s2, s5
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_and_i96:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_mov_b32 s6, s3
+; GFX10PLUS-NEXT: s_mov_b32 s7, s4
+; GFX10PLUS-NEXT: s_and_b32 s2, s2, s5
+; GFX10PLUS-NEXT: s_and_b64 s[0:1], s[0:1], s[6:7]
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_and_i96:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_mov_b32 s6, s3
+; GFX12-NEXT: s_mov_b32 s7, s4
+; GFX12-NEXT: s_and_b32 s2, s2, s5
+; GFX12-NEXT: s_and_b64 s[0:1], s[0:1], s[6:7]
+; GFX12-NEXT: ; return to shader part epilog
+ %result = and i96 %num, %den
+ %cast = bitcast i96 %result to <3 x i32>
+ ret <3 x i32> %cast
+}
+
+define i96 @v_and_i96(i96 %num, i96 %den) {
+; GCN-LABEL: v_and_i96:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v0, v0, v3
+; GCN-NEXT: v_and_b32_e32 v1, v1, v4
+; GCN-NEXT: v_and_b32_e32 v2, v2, v5
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_and_i96:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_and_b32_e32 v0, v0, v3
+; GFX10PLUS-NEXT: v_and_b32_e32 v1, v1, v4
+; GFX10PLUS-NEXT: v_and_b32_e32 v2, v2, v5
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_and_i96:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v3
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v4
+; GFX12-NEXT: v_and_b32_e32 v2, v2, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = and i96 %num, %den
+ ret i96 %result
+}
+
+define amdgpu_ps <4 x i32> @s_and_i128(i128 inreg %num, i128 inreg %den) {
+; GCN-LABEL: s_and_i128:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_and_b64 s[0:1], s[0:1], s[4:5]
+; GCN-NEXT: s_and_b64 s[2:3], s[2:3], s[6:7]
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_and_i128:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_and_b64 s[0:1], s[0:1], s[4:5]
+; GFX10PLUS-NEXT: s_and_b64 s[2:3], s[2:3], s[6:7]
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_and_i128:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_and_b64 s[0:1], s[0:1], s[4:5]
+; GFX12-NEXT: s_and_b64 s[2:3], s[2:3], s[6:7]
+; GFX12-NEXT: ; return to shader part epilog
+ %result = and i128 %num, %den
+ %cast = bitcast i128 %result to <4 x i32>
+ ret <4 x i32> %cast
+}
+
+define i128 @v_and_i128(i128 %num, i128 %den) {
+; GCN-LABEL: v_and_i128:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v0, v0, v4
+; GCN-NEXT: v_and_b32_e32 v1, v1, v5
+; GCN-NEXT: v_and_b32_e32 v2, v2, v6
+; GCN-NEXT: v_and_b32_e32 v3, v3, v7
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_and_i128:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX10PLUS-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX10PLUS-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX10PLUS-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_and_i128:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v4
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v5
+; GFX12-NEXT: v_and_b32_e32 v2, v2, v6
+; GFX12-NEXT: v_and_b32_e32 v3, v3, v7
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = and i128 %num, %den
+ ret i128 %result
+}
+
+define amdgpu_ps <8 x i32> @s_and_i256(i256 inreg %num, i256 inreg %den) {
+; GCN-LABEL: s_and_i256:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_and_b64 s[0:1], s[0:1], s[8:9]
+; GCN-NEXT: s_and_b64 s[2:3], s[2:3], s[10:11]
+; GCN-NEXT: s_and_b64 s[4:5], s[4:5], s[12:13]
+; GCN-NEXT: s_and_b64 s[6:7], s[6:7], s[14:15]
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_and_i256:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_and_b64 s[0:1], s[0:1], s[8:9]
+; GFX10PLUS-NEXT: s_and_b64 s[2:3], s[2:3], s[10:11]
+; GFX10PLUS-NEXT: s_and_b64 s[4:5], s[4:5], s[12:13]
+; GFX10PLUS-NEXT: s_and_b64 s[6:7], s[6:7], s[14:15]
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_and_i256:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_and_b64 s[0:1], s[0:1], s[8:9]
+; GFX12-NEXT: s_and_b64 s[2:3], s[2:3], s[10:11]
+; GFX12-NEXT: s_and_b64 s[4:5], s[4:5], s[12:13]
+; GFX12-NEXT: s_and_b64 s[6:7], s[6:7], s[14:15]
+; GFX12-NEXT: ; return to shader part epilog
+ %result = and i256 %num, %den
+ %cast = bitcast i256 %result to <8 x i32>
+ ret <8 x i32> %cast
+}
+
+define i256 @v_and_i256(i256 %num, i256 %den) {
+; GCN-LABEL: v_and_i256:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v0, v0, v8
+; GCN-NEXT: v_and_b32_e32 v1, v1, v9
+; GCN-NEXT: v_and_b32_e32 v2, v2, v10
+; GCN-NEXT: v_and_b32_e32 v3, v3, v11
+; GCN-NEXT: v_and_b32_e32 v4, v4, v12
+; GCN-NEXT: v_and_b32_e32 v5, v5, v13
+; GCN-NEXT: v_and_b32_e32 v6, v6, v14
+; GCN-NEXT: v_and_b32_e32 v7, v7, v15
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_and_i256:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX10PLUS-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX10PLUS-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX10PLUS-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX10PLUS-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX10PLUS-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX10PLUS-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX10PLUS-NEXT: v_and_b32_e32 v7, v7, v15
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_and_i256:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_and_b32_e32 v0, v0, v8
+; GFX12-NEXT: v_and_b32_e32 v1, v1, v9
+; GFX12-NEXT: v_and_b32_e32 v2, v2, v10
+; GFX12-NEXT: v_and_b32_e32 v3, v3, v11
+; GFX12-NEXT: v_and_b32_e32 v4, v4, v12
+; GFX12-NEXT: v_and_b32_e32 v5, v5, v13
+; GFX12-NEXT: v_and_b32_e32 v6, v6, v14
+; GFX12-NEXT: v_and_b32_e32 v7, v7, v15
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = and i256 %num, %den
+ ret i256 %result
+}
+
+define amdgpu_ps void @s_and_u64_zext_with_vregs(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GFX7-LABEL: s_and_u64_zext_with_vregs:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_mov_b32 s2, 0
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: buffer_load_dword v2, v[2:3], s[0:3], 0 addr64
+; GFX7-NEXT: v_mov_b32_e32 v3, 0
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v2, 0x50, v2
+; GFX7-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: s_and_u64_zext_with_vregs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: flat_load_dword v2, v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, 0
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v2, 0x50, v2
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8-NEXT: s_endpgm
+;
+; GFX9-LABEL: s_and_u64_zext_with_vregs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dword v2, v[2:3], off
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v2, 0x50, v2
+; GFX9-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: s_and_u64_zext_with_vregs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dword v2, v[2:3], off
+; GFX10-NEXT: v_mov_b32_e32 v3, 0
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v2, 0x50, v2
+; GFX10-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: s_and_u64_zext_with_vregs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b32 v2, v[2:3], off
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_and_b32 v2, 0x50, v2
+; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_and_u64_zext_with_vregs:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: global_load_b32 v2, v[2:3], off
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_and_b32 v2, 0x50, v2
+; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX12-NEXT: s_endpgm
+ %val = load i32, ptr addrspace(1) %in, align 4
+ %ext = zext i32 %val to i64
+ %and = and i64 %ext, 80
+ store i64 %and, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_kernel void @s_and_u64_zext_with_sregs(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GFX7-LABEL: s_and_u64_zext_with_sregs:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_mov_b32 s5, 0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[2:3], 0x0
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], 0x50
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: s_and_u64_zext_with_sregs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX8-NEXT: s_mov_b32 s3, 0
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_and_b64 s[2:3], s[2:3], 0x50
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_endpgm
+;
+; GFX9-LABEL: s_and_u64_zext_with_sregs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX9-NEXT: s_mov_b32 s3, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_and_b64 s[2:3], s[2:3], 0x50
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: s_and_u64_zext_with_sregs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-NEXT: v_mov_b32_e32 v2, 0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX10-NEXT: s_mov_b32 s3, 0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_and_b64 s[2:3], s[2:3], 0x50
+; GFX10-NEXT: v_mov_b32_e32 v0, s2
+; GFX10-NEXT: v_mov_b32_e32 v1, s3
+; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: s_and_u64_zext_with_sregs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX11-NEXT: s_mov_b32 s3, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_and_b64 s[2:3], s[2:3], 0x50
+; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_and_u64_zext_with_sregs:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX12-NEXT: s_mov_b32 s3, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_and_b64 s[2:3], s[2:3], 0x50
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX12-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX12-NEXT: s_endpgm
+ %val = load i32, ptr addrspace(1) %in, align 4
+ %ext = zext i32 %val to i64
+ %and = and i64 %ext, 80
+ store i64 %and, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @s_and_u64_sext_with_vregs(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GFX7-LABEL: s_and_u64_sext_with_vregs:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_mov_b32 s2, 0
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: buffer_load_dword v2, v[2:3], s[0:3], 0 addr64
+; GFX7-NEXT: v_mov_b32_e32 v3, 0
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v2, 0x50, v2
+; GFX7-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: s_and_u64_sext_with_vregs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: flat_load_dword v2, v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, 0
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v2, 0x50, v2
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8-NEXT: s_endpgm
+;
+; GFX9-LABEL: s_and_u64_sext_with_vregs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dword v2, v[2:3], off
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v2, 0x50, v2
+; GFX9-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: s_and_u64_sext_with_vregs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dword v2, v[2:3], off
+; GFX10-NEXT: v_mov_b32_e32 v3, 0
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_and_b32_e32 v2, 0x50, v2
+; GFX10-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: s_and_u64_sext_with_vregs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b32 v2, v[2:3], off
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_and_b32 v2, 0x50, v2
+; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_and_u64_sext_with_vregs:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: global_load_b32 v2, v[2:3], off
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_and_b32 v2, 0x50, v2
+; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX12-NEXT: s_endpgm
+ %val = load i32, ptr addrspace(1) %in, align 4
+ %ext = sext i32 %val to i64
+ %and = and i64 %ext, 80
+ store i64 %and, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_kernel void @s_and_u64_sext_with_sregs(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GFX7-LABEL: s_and_u64_sext_with_sregs:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[2:3], 0x0
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_ashr_i32 s5, s4, 31
+; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], 0x50
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: s_and_u64_sext_with_sregs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_ashr_i32 s3, s2, 31
+; GFX8-NEXT: s_and_b64 s[2:3], s[2:3], 0x50
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_endpgm
+;
+; GFX9-LABEL: s_and_u64_sext_with_sregs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_ashr_i32 s3, s2, 31
+; GFX9-NEXT: s_and_b64 s[2:3], s[2:3], 0x50
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: s_and_u64_sext_with_sregs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-NEXT: v_mov_b32_e32 v2, 0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_ashr_i32 s3, s2, 31
+; GFX10-NEXT: s_and_b64 s[2:3], s[2:3], 0x50
+; GFX10-NEXT: v_mov_b32_e32 v0, s2
+; GFX10-NEXT: v_mov_b32_e32 v1, s3
+; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: s_and_u64_sext_with_sregs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_ashr_i32 s3, s2, 31
+; GFX11-NEXT: s_and_b64 s[2:3], s[2:3], 0x50
+; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_and_u64_sext_with_sregs:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_ashr_i32 s3, s2, 31
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_b64 s[2:3], s[2:3], 0x50
+; GFX12-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX12-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX12-NEXT: s_endpgm
+ %val = load i32, ptr addrspace(1) %in, align 4
+ %ext = sext i32 %val to i64
+ %and = and i64 %ext, 80
+ store i64 %and, ptr addrspace(1) %out, align 8
+ ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX11-FAKE16: {{.*}}
+; GFX11-TRUE16: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/or.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/or.ll
new file mode 100644
index 0000000000000..df034d82118b1
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/or.ll
@@ -0,0 +1,843 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 < %s | FileCheck -check-prefixes=GCN,GFX7 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 < %s | FileCheck -check-prefixes=GCN,GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16, -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11,GFX11-TRUE16 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16, -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11,GFX11-FAKE16 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12 %s
+
+define amdgpu_ps i16 @s_or_i16(i16 inreg %num, i16 inreg %den) {
+; GCN-LABEL: s_or_i16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_or_i16:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_or_b32 s0, s0, s1
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_or_i16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_or_b32 s0, s0, s1
+; GFX12-NEXT: ; return to shader part epilog
+ %result = or i16 %num, %den
+ ret i16 %result
+}
+
+define i16 @v_or_i16(i16 %num, i16 %den) {
+; GCN-LABEL: v_or_i16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_or_b32_e32 v0, v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_or_i16:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_or_i16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = or i16 %num, %den
+ ret i16 %result
+}
+
+define amdgpu_ps zeroext i16 @s_or_i16_zeroext(i16 inreg zeroext %num, i16 inreg zeroext %den) {
+; GFX7-LABEL: s_or_i16_zeroext:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_or_b32 s0, s0, s1
+; GFX7-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_or_i16_zeroext:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_or_i16_zeroext:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_or_b32 s0, s0, s1
+; GFX9-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_or_i16_zeroext:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_or_b32 s0, s0, s1
+; GFX10PLUS-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_or_i16_zeroext:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_or_b32 s0, s0, s1
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX12-NEXT: ; return to shader part epilog
+ %result = or i16 %num, %den
+ ret i16 %result
+}
+
+define zeroext i16 @v_or_i16_zeroext(i16 zeroext %num, i16 zeroext %den) {
+; GCN-LABEL: v_or_i16_zeroext:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_or_b32_e32 v0, v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_or_i16_zeroext:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_or_i16_zeroext:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = or i16 %num, %den
+ ret i16 %result
+}
+
+define amdgpu_ps signext i16 @s_or_i16_signext(i16 inreg signext %num, i16 inreg signext %den) {
+; GFX7-LABEL: s_or_i16_signext:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_or_b32 s0, s0, s1
+; GFX7-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_or_i16_signext:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: s_sext_i32_i16 s0, s0
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_or_i16_signext:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_or_b32 s0, s0, s1
+; GFX9-NEXT: s_sext_i32_i16 s0, s0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_or_i16_signext:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_or_b32 s0, s0, s1
+; GFX10PLUS-NEXT: s_sext_i32_i16 s0, s0
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_or_i16_signext:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_or_b32 s0, s0, s1
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_sext_i32_i16 s0, s0
+; GFX12-NEXT: ; return to shader part epilog
+ %result = or i16 %num, %den
+ ret i16 %result
+}
+
+define signext i16 @v_or_i16_signext(i16 signext %num, i16 signext %den) {
+; GCN-LABEL: v_or_i16_signext:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_or_b32_e32 v0, v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_or_i16_signext:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_or_i16_signext:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = or i16 %num, %den
+ ret i16 %result
+}
+
+define amdgpu_ps i32 @s_or_i32(i32 inreg %num, i32 inreg %den) {
+; GCN-LABEL: s_or_i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_or_b32 s0, s0, s1
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_or_i32:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_or_b32 s0, s0, s1
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_or_i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_or_b32 s0, s0, s1
+; GFX12-NEXT: ; return to shader part epilog
+ %result = or i32 %num, %den
+ ret i32 %result
+}
+
+define i32 @v_or_i32(i32 %num, i32 %den) {
+; GCN-LABEL: v_or_i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_or_b32_e32 v0, v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_or_i32:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_or_i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = or i32 %num, %den
+ ret i32 %result
+}
+
+define amdgpu_ps <2 x i32> @s_or_v2i32(<2 x i32> inreg %num, <2 x i32> inreg %den) {
+; GCN-LABEL: s_or_v2i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_or_v2i32:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_or_v2i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: ; return to shader part epilog
+ %result = or <2 x i32> %num, %den
+ ret <2 x i32> %result
+}
+
+define <2 x i32> @v_or_v2i32(<2 x i32> %num, <2 x i32> %den) {
+; GCN-LABEL: v_or_v2i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_or_b32_e32 v0, v0, v2
+; GCN-NEXT: v_or_b32_e32 v1, v1, v3
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_or_v2i32:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX10PLUS-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_or_v2i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = or <2 x i32> %num, %den
+ ret <2 x i32> %result
+}
+
+define amdgpu_cs i33 @s_or_i33(i33 inreg %num, i33 inreg %den) {
+; GCN-LABEL: s_or_i33:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_or_i33:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_or_i33:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: ; return to shader part epilog
+ %result = or i33 %num, %den
+ ret i33 %result
+}
+
+define amdgpu_ps i64 @s_or_i64(i64 inreg %num, i64 inreg %den) {
+; GCN-LABEL: s_or_i64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_or_i64:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_or_i64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: ; return to shader part epilog
+ %result = or i64 %num, %den
+ ret i64 %result
+}
+
+define i64 @v_or_i64(i64 %num, i64 %den) {
+; GCN-LABEL: v_or_i64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_or_b32_e32 v0, v0, v2
+; GCN-NEXT: v_or_b32_e32 v1, v1, v3
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_or_i64:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX10PLUS-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_or_i64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = or i64 %num, %den
+ ret i64 %result
+}
+
+define amdgpu_ps <3 x i32> @s_or_i96(i96 inreg %num, i96 inreg %den) {
+; GCN-LABEL: s_or_i96:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_mov_b32 s6, s3
+; GCN-NEXT: s_mov_b32 s7, s4
+; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[6:7]
+; GCN-NEXT: s_or_b32 s2, s2, s5
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_or_i96:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_mov_b32 s6, s3
+; GFX10PLUS-NEXT: s_mov_b32 s7, s4
+; GFX10PLUS-NEXT: s_or_b32 s2, s2, s5
+; GFX10PLUS-NEXT: s_or_b64 s[0:1], s[0:1], s[6:7]
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_or_i96:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_mov_b32 s6, s3
+; GFX12-NEXT: s_mov_b32 s7, s4
+; GFX12-NEXT: s_or_b32 s2, s2, s5
+; GFX12-NEXT: s_or_b64 s[0:1], s[0:1], s[6:7]
+; GFX12-NEXT: ; return to shader part epilog
+ %result = or i96 %num, %den
+ %cast = bitcast i96 %result to <3 x i32>
+ ret <3 x i32> %cast
+}
+
+define i96 @v_or_i96(i96 %num, i96 %den) {
+; GCN-LABEL: v_or_i96:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_or_b32_e32 v0, v0, v3
+; GCN-NEXT: v_or_b32_e32 v1, v1, v4
+; GCN-NEXT: v_or_b32_e32 v2, v2, v5
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_or_i96:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_or_b32_e32 v0, v0, v3
+; GFX10PLUS-NEXT: v_or_b32_e32 v1, v1, v4
+; GFX10PLUS-NEXT: v_or_b32_e32 v2, v2, v5
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_or_i96:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v3
+; GFX12-NEXT: v_or_b32_e32 v1, v1, v4
+; GFX12-NEXT: v_or_b32_e32 v2, v2, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = or i96 %num, %den
+ ret i96 %result
+}
+
+define amdgpu_ps <4 x i32> @s_or_i128(i128 inreg %num, i128 inreg %den) {
+; GCN-LABEL: s_or_i128:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
+; GCN-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_or_i128:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX10PLUS-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_or_i128:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX12-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
+; GFX12-NEXT: ; return to shader part epilog
+ %result = or i128 %num, %den
+ %cast = bitcast i128 %result to <4 x i32>
+ ret <4 x i32> %cast
+}
+
+define i128 @v_or_i128(i128 %num, i128 %den) {
+; GCN-LABEL: v_or_i128:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_or_b32_e32 v0, v0, v4
+; GCN-NEXT: v_or_b32_e32 v1, v1, v5
+; GCN-NEXT: v_or_b32_e32 v2, v2, v6
+; GCN-NEXT: v_or_b32_e32 v3, v3, v7
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_or_i128:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX10PLUS-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX10PLUS-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX10PLUS-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_or_i128:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX12-NEXT: v_or_b32_e32 v1, v1, v5
+; GFX12-NEXT: v_or_b32_e32 v2, v2, v6
+; GFX12-NEXT: v_or_b32_e32 v3, v3, v7
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = or i128 %num, %den
+ ret i128 %result
+}
+
+define amdgpu_ps <8 x i32> @s_or_i256(i256 inreg %num, i256 inreg %den) {
+; GCN-LABEL: s_or_i256:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[8:9]
+; GCN-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11]
+; GCN-NEXT: s_or_b64 s[4:5], s[4:5], s[12:13]
+; GCN-NEXT: s_or_b64 s[6:7], s[6:7], s[14:15]
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_or_i256:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_or_b64 s[0:1], s[0:1], s[8:9]
+; GFX10PLUS-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11]
+; GFX10PLUS-NEXT: s_or_b64 s[4:5], s[4:5], s[12:13]
+; GFX10PLUS-NEXT: s_or_b64 s[6:7], s[6:7], s[14:15]
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_or_i256:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_or_b64 s[0:1], s[0:1], s[8:9]
+; GFX12-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11]
+; GFX12-NEXT: s_or_b64 s[4:5], s[4:5], s[12:13]
+; GFX12-NEXT: s_or_b64 s[6:7], s[6:7], s[14:15]
+; GFX12-NEXT: ; return to shader part epilog
+ %result = or i256 %num, %den
+ %cast = bitcast i256 %result to <8 x i32>
+ ret <8 x i32> %cast
+}
+
+define i256 @v_or_i256(i256 %num, i256 %den) {
+; GCN-LABEL: v_or_i256:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_or_b32_e32 v0, v0, v8
+; GCN-NEXT: v_or_b32_e32 v1, v1, v9
+; GCN-NEXT: v_or_b32_e32 v2, v2, v10
+; GCN-NEXT: v_or_b32_e32 v3, v3, v11
+; GCN-NEXT: v_or_b32_e32 v4, v4, v12
+; GCN-NEXT: v_or_b32_e32 v5, v5, v13
+; GCN-NEXT: v_or_b32_e32 v6, v6, v14
+; GCN-NEXT: v_or_b32_e32 v7, v7, v15
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_or_i256:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_or_b32_e32 v0, v0, v8
+; GFX10PLUS-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX10PLUS-NEXT: v_or_b32_e32 v2, v2, v10
+; GFX10PLUS-NEXT: v_or_b32_e32 v3, v3, v11
+; GFX10PLUS-NEXT: v_or_b32_e32 v4, v4, v12
+; GFX10PLUS-NEXT: v_or_b32_e32 v5, v5, v13
+; GFX10PLUS-NEXT: v_or_b32_e32 v6, v6, v14
+; GFX10PLUS-NEXT: v_or_b32_e32 v7, v7, v15
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_or_i256:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v0, v0, v8
+; GFX12-NEXT: v_or_b32_e32 v1, v1, v9
+; GFX12-NEXT: v_or_b32_e32 v2, v2, v10
+; GFX12-NEXT: v_or_b32_e32 v3, v3, v11
+; GFX12-NEXT: v_or_b32_e32 v4, v4, v12
+; GFX12-NEXT: v_or_b32_e32 v5, v5, v13
+; GFX12-NEXT: v_or_b32_e32 v6, v6, v14
+; GFX12-NEXT: v_or_b32_e32 v7, v7, v15
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = or i256 %num, %den
+ ret i256 %result
+}
+
+define amdgpu_ps void @s_or_u64_zext_with_vregs(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GFX7-LABEL: s_or_u64_zext_with_vregs:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_mov_b32 s2, 0
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: buffer_load_dword v2, v[2:3], s[0:3], 0 addr64
+; GFX7-NEXT: v_mov_b32_e32 v3, 0
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v2, 0x50, v2
+; GFX7-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: s_or_u64_zext_with_vregs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: flat_load_dword v2, v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, 0
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_or_b32_e32 v2, 0x50, v2
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8-NEXT: s_endpgm
+;
+; GFX9-LABEL: s_or_u64_zext_with_vregs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dword v2, v[2:3], off
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_e32 v2, 0x50, v2
+; GFX9-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: s_or_u64_zext_with_vregs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dword v2, v[2:3], off
+; GFX10-NEXT: v_mov_b32_e32 v3, 0
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_or_b32_e32 v2, 0x50, v2
+; GFX10-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: s_or_u64_zext_with_vregs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b32 v2, v[2:3], off
+; GFX11-NEXT: v_mov_b32_e32 v3, 0
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_or_b32_e32 v2, 0x50, v2
+; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_or_u64_zext_with_vregs:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: global_load_b32 v2, v[2:3], off
+; GFX12-NEXT: v_mov_b32_e32 v3, 0
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_or_b32_e32 v2, 0x50, v2
+; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX12-NEXT: s_endpgm
+ %val = load i32, ptr addrspace(1) %in, align 4
+ %ext = zext i32 %val to i64
+ %or = or i64 %ext, 80
+ store i64 %or, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_kernel void @s_or_u64_zext_with_sregs(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GFX7-LABEL: s_or_u64_zext_with_sregs:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_mov_b32 s5, 0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[2:3], 0x0
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_or_b64 s[4:5], s[4:5], 0x50
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: s_or_u64_zext_with_sregs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX8-NEXT: s_mov_b32 s3, 0
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_or_b64 s[2:3], s[2:3], 0x50
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_endpgm
+;
+; GFX9-LABEL: s_or_u64_zext_with_sregs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX9-NEXT: s_mov_b32 s3, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_or_b64 s[2:3], s[2:3], 0x50
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: s_or_u64_zext_with_sregs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-NEXT: v_mov_b32_e32 v2, 0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX10-NEXT: s_mov_b32 s3, 0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], 0x50
+; GFX10-NEXT: v_mov_b32_e32 v0, s2
+; GFX10-NEXT: v_mov_b32_e32 v1, s3
+; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: s_or_u64_zext_with_sregs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX11-NEXT: s_mov_b32 s3, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_or_b64 s[2:3], s[2:3], 0x50
+; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_or_u64_zext_with_sregs:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX12-NEXT: s_mov_b32 s3, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_or_b64 s[2:3], s[2:3], 0x50
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX12-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX12-NEXT: s_endpgm
+ %val = load i32, ptr addrspace(1) %in, align 4
+ %ext = zext i32 %val to i64
+ %or = or i64 %ext, 80
+ store i64 %or, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @s_or_u64_sext_with_vregs(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GFX7-LABEL: s_or_u64_sext_with_vregs:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_mov_b32 s2, 0
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: buffer_load_dword v2, v[2:3], s[0:3], 0 addr64
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX7-NEXT: v_or_b32_e32 v2, 0x50, v2
+; GFX7-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: s_or_u64_sext_with_vregs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: flat_load_dword v2, v[2:3]
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x50, v2
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8-NEXT: s_endpgm
+;
+; GFX9-LABEL: s_or_u64_sext_with_vregs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dword v2, v[2:3], off
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x50, v2
+; GFX9-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: s_or_u64_sext_with_vregs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dword v2, v[2:3], off
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX10-NEXT: v_or_b32_e32 v2, 0x50, v2
+; GFX10-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: s_or_u64_sext_with_vregs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b32 v2, v[2:3], off
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX11-NEXT: v_or_b32_e32 v2, 0x50, v2
+; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_or_u64_sext_with_vregs:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: global_load_b32 v2, v[2:3], off
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX12-NEXT: v_or_b32_e32 v2, 0x50, v2
+; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX12-NEXT: s_endpgm
+ %val = load i32, ptr addrspace(1) %in, align 4
+ %ext = sext i32 %val to i64
+ %or = or i64 %ext, 80
+ store i64 %or, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_kernel void @s_or_u64_sext_with_sregs(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GFX7-LABEL: s_or_u64_sext_with_sregs:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[2:3], 0x0
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_ashr_i32 s5, s4, 31
+; GFX7-NEXT: s_or_b64 s[4:5], s[4:5], 0x50
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: s_or_u64_sext_with_sregs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_ashr_i32 s3, s2, 31
+; GFX8-NEXT: s_or_b64 s[2:3], s[2:3], 0x50
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_endpgm
+;
+; GFX9-LABEL: s_or_u64_sext_with_sregs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_ashr_i32 s3, s2, 31
+; GFX9-NEXT: s_or_b64 s[2:3], s[2:3], 0x50
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: s_or_u64_sext_with_sregs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-NEXT: v_mov_b32_e32 v2, 0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_ashr_i32 s3, s2, 31
+; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], 0x50
+; GFX10-NEXT: v_mov_b32_e32 v0, s2
+; GFX10-NEXT: v_mov_b32_e32 v1, s3
+; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: s_or_u64_sext_with_sregs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_ashr_i32 s3, s2, 31
+; GFX11-NEXT: s_or_b64 s[2:3], s[2:3], 0x50
+; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_or_u64_sext_with_sregs:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_ashr_i32 s3, s2, 31
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_or_b64 s[2:3], s[2:3], 0x50
+; GFX12-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX12-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX12-NEXT: s_endpgm
+ %val = load i32, ptr addrspace(1) %in, align 4
+ %ext = sext i32 %val to i64
+ %or = or i64 %ext, 80
+ store i64 %or, ptr addrspace(1) %out, align 8
+ ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX11-FAKE16: {{.*}}
+; GFX11-TRUE16: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/xor.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/xor.ll
new file mode 100644
index 0000000000000..b27a35ce0753a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/xor.ll
@@ -0,0 +1,843 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx700 < %s | FileCheck -check-prefixes=GCN,GFX7 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx801 < %s | FileCheck -check-prefixes=GCN,GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX10 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16, -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11,GFX11-TRUE16 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16, -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11,GFX11-FAKE16 %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12 %s
+
+define amdgpu_ps i16 @s_xor_i16(i16 inreg %num, i16 inreg %den) {
+; GCN-LABEL: s_xor_i16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_xor_i16:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_xor_b32 s0, s0, s1
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_xor_i16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_xor_b32 s0, s0, s1
+; GFX12-NEXT: ; return to shader part epilog
+ %result = xor i16 %num, %den
+ ret i16 %result
+}
+
+define i16 @v_xor_i16(i16 %num, i16 %den) {
+; GCN-LABEL: v_xor_i16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_xor_b32_e32 v0, v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_xor_i16:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_xor_i16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = xor i16 %num, %den
+ ret i16 %result
+}
+
+define amdgpu_ps zeroext i16 @s_xor_i16_zeroext(i16 inreg zeroext %num, i16 inreg zeroext %den) {
+; GFX7-LABEL: s_xor_i16_zeroext:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_xor_b32 s0, s0, s1
+; GFX7-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_xor_i16_zeroext:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_xor_b32 s0, s0, s1
+; GFX8-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_xor_i16_zeroext:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_xor_b32 s0, s0, s1
+; GFX9-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_xor_i16_zeroext:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_xor_b32 s0, s0, s1
+; GFX10PLUS-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_xor_i16_zeroext:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_xor_b32 s0, s0, s1
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX12-NEXT: ; return to shader part epilog
+ %result = xor i16 %num, %den
+ ret i16 %result
+}
+
+define zeroext i16 @v_xor_i16_zeroext(i16 zeroext %num, i16 zeroext %den) {
+; GCN-LABEL: v_xor_i16_zeroext:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_xor_b32_e32 v0, v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_xor_i16_zeroext:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_xor_i16_zeroext:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = xor i16 %num, %den
+ ret i16 %result
+}
+
+define amdgpu_ps signext i16 @s_xor_i16_signext(i16 inreg signext %num, i16 inreg signext %den) {
+; GFX7-LABEL: s_xor_i16_signext:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_xor_b32 s0, s0, s1
+; GFX7-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_xor_i16_signext:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_xor_b32 s0, s0, s1
+; GFX8-NEXT: s_sext_i32_i16 s0, s0
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_xor_i16_signext:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_xor_b32 s0, s0, s1
+; GFX9-NEXT: s_sext_i32_i16 s0, s0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_xor_i16_signext:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_xor_b32 s0, s0, s1
+; GFX10PLUS-NEXT: s_sext_i32_i16 s0, s0
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_xor_i16_signext:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_xor_b32 s0, s0, s1
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_sext_i32_i16 s0, s0
+; GFX12-NEXT: ; return to shader part epilog
+ %result = xor i16 %num, %den
+ ret i16 %result
+}
+
+define signext i16 @v_xor_i16_signext(i16 signext %num, i16 signext %den) {
+; GCN-LABEL: v_xor_i16_signext:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_xor_b32_e32 v0, v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_xor_i16_signext:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_xor_i16_signext:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = xor i16 %num, %den
+ ret i16 %result
+}
+
+define amdgpu_ps i32 @s_xor_i32(i32 inreg %num, i32 inreg %den) {
+; GCN-LABEL: s_xor_i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_xor_b32 s0, s0, s1
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_xor_i32:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_xor_b32 s0, s0, s1
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_xor_i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_xor_b32 s0, s0, s1
+; GFX12-NEXT: ; return to shader part epilog
+ %result = xor i32 %num, %den
+ ret i32 %result
+}
+
+define i32 @v_xor_i32(i32 %num, i32 %den) {
+; GCN-LABEL: v_xor_i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_xor_b32_e32 v0, v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_xor_i32:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_xor_i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = xor i32 %num, %den
+ ret i32 %result
+}
+
+define amdgpu_ps <2 x i32> @s_xor_v2i32(<2 x i32> inreg %num, <2 x i32> inreg %den) {
+; GCN-LABEL: s_xor_v2i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_xor_v2i32:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_xor_v2i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: ; return to shader part epilog
+ %result = xor <2 x i32> %num, %den
+ ret <2 x i32> %result
+}
+
+define <2 x i32> @v_xor_v2i32(<2 x i32> %num, <2 x i32> %den) {
+; GCN-LABEL: v_xor_v2i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_xor_b32_e32 v0, v0, v2
+; GCN-NEXT: v_xor_b32_e32 v1, v1, v3
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_xor_v2i32:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX10PLUS-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_xor_v2i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = xor <2 x i32> %num, %den
+ ret <2 x i32> %result
+}
+
+define amdgpu_cs i33 @s_xor_i33(i33 inreg %num, i33 inreg %den) {
+; GCN-LABEL: s_xor_i33:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_xor_i33:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_xor_i33:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: ; return to shader part epilog
+ %result = xor i33 %num, %den
+ ret i33 %result
+}
+
+define amdgpu_ps i64 @s_xor_i64(i64 inreg %num, i64 inreg %den) {
+; GCN-LABEL: s_xor_i64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_xor_i64:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_xor_i64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: ; return to shader part epilog
+ %result = xor i64 %num, %den
+ ret i64 %result
+}
+
+define i64 @v_xor_i64(i64 %num, i64 %den) {
+; GCN-LABEL: v_xor_i64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_xor_b32_e32 v0, v0, v2
+; GCN-NEXT: v_xor_b32_e32 v1, v1, v3
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_xor_i64:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX10PLUS-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_xor_i64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v2
+; GFX12-NEXT: v_xor_b32_e32 v1, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = xor i64 %num, %den
+ ret i64 %result
+}
+
+define amdgpu_ps <3 x i32> @s_xor_i96(i96 inreg %num, i96 inreg %den) {
+; GCN-LABEL: s_xor_i96:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_mov_b32 s6, s3
+; GCN-NEXT: s_mov_b32 s7, s4
+; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], s[6:7]
+; GCN-NEXT: s_xor_b32 s2, s2, s5
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_xor_i96:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_mov_b32 s6, s3
+; GFX10PLUS-NEXT: s_mov_b32 s7, s4
+; GFX10PLUS-NEXT: s_xor_b32 s2, s2, s5
+; GFX10PLUS-NEXT: s_xor_b64 s[0:1], s[0:1], s[6:7]
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_xor_i96:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_mov_b32 s6, s3
+; GFX12-NEXT: s_mov_b32 s7, s4
+; GFX12-NEXT: s_xor_b32 s2, s2, s5
+; GFX12-NEXT: s_xor_b64 s[0:1], s[0:1], s[6:7]
+; GFX12-NEXT: ; return to shader part epilog
+ %result = xor i96 %num, %den
+ %cast = bitcast i96 %result to <3 x i32>
+ ret <3 x i32> %cast
+}
+
+define i96 @v_xor_i96(i96 %num, i96 %den) {
+; GCN-LABEL: v_xor_i96:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_xor_b32_e32 v0, v0, v3
+; GCN-NEXT: v_xor_b32_e32 v1, v1, v4
+; GCN-NEXT: v_xor_b32_e32 v2, v2, v5
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_xor_i96:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_xor_b32_e32 v0, v0, v3
+; GFX10PLUS-NEXT: v_xor_b32_e32 v1, v1, v4
+; GFX10PLUS-NEXT: v_xor_b32_e32 v2, v2, v5
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_xor_i96:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v3
+; GFX12-NEXT: v_xor_b32_e32 v1, v1, v4
+; GFX12-NEXT: v_xor_b32_e32 v2, v2, v5
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = xor i96 %num, %den
+ ret i96 %result
+}
+
+define amdgpu_ps <4 x i32> @s_xor_i128(i128 inreg %num, i128 inreg %den) {
+; GCN-LABEL: s_xor_i128:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GCN-NEXT: s_xor_b64 s[2:3], s[2:3], s[6:7]
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_xor_i128:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GFX10PLUS-NEXT: s_xor_b64 s[2:3], s[2:3], s[6:7]
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_xor_i128:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GFX12-NEXT: s_xor_b64 s[2:3], s[2:3], s[6:7]
+; GFX12-NEXT: ; return to shader part epilog
+ %result = xor i128 %num, %den
+ %cast = bitcast i128 %result to <4 x i32>
+ ret <4 x i32> %cast
+}
+
+define i128 @v_xor_i128(i128 %num, i128 %den) {
+; GCN-LABEL: v_xor_i128:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_xor_b32_e32 v0, v0, v4
+; GCN-NEXT: v_xor_b32_e32 v1, v1, v5
+; GCN-NEXT: v_xor_b32_e32 v2, v2, v6
+; GCN-NEXT: v_xor_b32_e32 v3, v3, v7
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_xor_i128:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX10PLUS-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX10PLUS-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX10PLUS-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_xor_i128:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v4
+; GFX12-NEXT: v_xor_b32_e32 v1, v1, v5
+; GFX12-NEXT: v_xor_b32_e32 v2, v2, v6
+; GFX12-NEXT: v_xor_b32_e32 v3, v3, v7
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = xor i128 %num, %den
+ ret i128 %result
+}
+
+define amdgpu_ps <8 x i32> @s_xor_i256(i256 inreg %num, i256 inreg %den) {
+; GCN-LABEL: s_xor_i256:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], s[8:9]
+; GCN-NEXT: s_xor_b64 s[2:3], s[2:3], s[10:11]
+; GCN-NEXT: s_xor_b64 s[4:5], s[4:5], s[12:13]
+; GCN-NEXT: s_xor_b64 s[6:7], s[6:7], s[14:15]
+; GCN-NEXT: ; return to shader part epilog
+;
+; GFX10PLUS-LABEL: s_xor_i256:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_xor_b64 s[0:1], s[0:1], s[8:9]
+; GFX10PLUS-NEXT: s_xor_b64 s[2:3], s[2:3], s[10:11]
+; GFX10PLUS-NEXT: s_xor_b64 s[4:5], s[4:5], s[12:13]
+; GFX10PLUS-NEXT: s_xor_b64 s[6:7], s[6:7], s[14:15]
+; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_xor_i256:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_xor_b64 s[0:1], s[0:1], s[8:9]
+; GFX12-NEXT: s_xor_b64 s[2:3], s[2:3], s[10:11]
+; GFX12-NEXT: s_xor_b64 s[4:5], s[4:5], s[12:13]
+; GFX12-NEXT: s_xor_b64 s[6:7], s[6:7], s[14:15]
+; GFX12-NEXT: ; return to shader part epilog
+ %result = xor i256 %num, %den
+ %cast = bitcast i256 %result to <8 x i32>
+ ret <8 x i32> %cast
+}
+
+define i256 @v_xor_i256(i256 %num, i256 %den) {
+; GCN-LABEL: v_xor_i256:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_xor_b32_e32 v0, v0, v8
+; GCN-NEXT: v_xor_b32_e32 v1, v1, v9
+; GCN-NEXT: v_xor_b32_e32 v2, v2, v10
+; GCN-NEXT: v_xor_b32_e32 v3, v3, v11
+; GCN-NEXT: v_xor_b32_e32 v4, v4, v12
+; GCN-NEXT: v_xor_b32_e32 v5, v5, v13
+; GCN-NEXT: v_xor_b32_e32 v6, v6, v14
+; GCN-NEXT: v_xor_b32_e32 v7, v7, v15
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_xor_i256:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_xor_b32_e32 v0, v0, v8
+; GFX10PLUS-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX10PLUS-NEXT: v_xor_b32_e32 v2, v2, v10
+; GFX10PLUS-NEXT: v_xor_b32_e32 v3, v3, v11
+; GFX10PLUS-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX10PLUS-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX10PLUS-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX10PLUS-NEXT: v_xor_b32_e32 v7, v7, v15
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_xor_i256:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v0, v0, v8
+; GFX12-NEXT: v_xor_b32_e32 v1, v1, v9
+; GFX12-NEXT: v_xor_b32_e32 v2, v2, v10
+; GFX12-NEXT: v_xor_b32_e32 v3, v3, v11
+; GFX12-NEXT: v_xor_b32_e32 v4, v4, v12
+; GFX12-NEXT: v_xor_b32_e32 v5, v5, v13
+; GFX12-NEXT: v_xor_b32_e32 v6, v6, v14
+; GFX12-NEXT: v_xor_b32_e32 v7, v7, v15
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %result = xor i256 %num, %den
+ ret i256 %result
+}
+
+define amdgpu_ps void @s_xor_u64_zext_with_vregs(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GFX7-LABEL: s_xor_u64_zext_with_vregs:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_mov_b32 s2, 0
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: buffer_load_dword v2, v[2:3], s[0:3], 0 addr64
+; GFX7-NEXT: v_mov_b32_e32 v3, 0
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v2, 0x50, v2
+; GFX7-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: s_xor_u64_zext_with_vregs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: flat_load_dword v2, v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, 0
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v2, 0x50, v2
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8-NEXT: s_endpgm
+;
+; GFX9-LABEL: s_xor_u64_zext_with_vregs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dword v2, v[2:3], off
+; GFX9-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v2, 0x50, v2
+; GFX9-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: s_xor_u64_zext_with_vregs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dword v2, v[2:3], off
+; GFX10-NEXT: v_mov_b32_e32 v3, 0
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_xor_b32_e32 v2, 0x50, v2
+; GFX10-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: s_xor_u64_zext_with_vregs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b32 v2, v[2:3], off
+; GFX11-NEXT: v_mov_b32_e32 v3, 0
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_xor_b32_e32 v2, 0x50, v2
+; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_xor_u64_zext_with_vregs:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: global_load_b32 v2, v[2:3], off
+; GFX12-NEXT: v_mov_b32_e32 v3, 0
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_xor_b32_e32 v2, 0x50, v2
+; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX12-NEXT: s_endpgm
+ %val = load i32, ptr addrspace(1) %in, align 4
+ %ext = zext i32 %val to i64
+ %xor = xor i64 %ext, 80
+ store i64 %xor, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_kernel void @s_xor_u64_zext_with_sregs(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GFX7-LABEL: s_xor_u64_zext_with_sregs:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_mov_b32 s5, 0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[2:3], 0x0
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_xor_b64 s[4:5], s[4:5], 0x50
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: s_xor_u64_zext_with_sregs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX8-NEXT: s_mov_b32 s3, 0
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_xor_b64 s[2:3], s[2:3], 0x50
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_endpgm
+;
+; GFX9-LABEL: s_xor_u64_zext_with_sregs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX9-NEXT: s_mov_b32 s3, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_xor_b64 s[2:3], s[2:3], 0x50
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: s_xor_u64_zext_with_sregs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-NEXT: v_mov_b32_e32 v2, 0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX10-NEXT: s_mov_b32 s3, 0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_xor_b64 s[2:3], s[2:3], 0x50
+; GFX10-NEXT: v_mov_b32_e32 v0, s2
+; GFX10-NEXT: v_mov_b32_e32 v1, s3
+; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: s_xor_u64_zext_with_sregs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX11-NEXT: s_mov_b32 s3, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_xor_b64 s[2:3], s[2:3], 0x50
+; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_xor_u64_zext_with_sregs:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX12-NEXT: s_mov_b32 s3, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_xor_b64 s[2:3], s[2:3], 0x50
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX12-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX12-NEXT: s_endpgm
+ %val = load i32, ptr addrspace(1) %in, align 4
+ %ext = zext i32 %val to i64
+ %xor = xor i64 %ext, 80
+ store i64 %xor, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @s_xor_u64_sext_with_vregs(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GFX7-LABEL: s_xor_u64_sext_with_vregs:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_mov_b32 s2, 0
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: buffer_load_dword v2, v[2:3], s[0:3], 0 addr64
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX7-NEXT: v_xor_b32_e32 v2, 0x50, v2
+; GFX7-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: s_xor_u64_sext_with_vregs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: flat_load_dword v2, v[2:3]
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX8-NEXT: v_xor_b32_e32 v2, 0x50, v2
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8-NEXT: s_endpgm
+;
+; GFX9-LABEL: s_xor_u64_sext_with_vregs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dword v2, v[2:3], off
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX9-NEXT: v_xor_b32_e32 v2, 0x50, v2
+; GFX9-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: s_xor_u64_sext_with_vregs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dword v2, v[2:3], off
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX10-NEXT: v_xor_b32_e32 v2, 0x50, v2
+; GFX10-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: s_xor_u64_sext_with_vregs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b32 v2, v[2:3], off
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX11-NEXT: v_xor_b32_e32 v2, 0x50, v2
+; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_xor_u64_sext_with_vregs:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: global_load_b32 v2, v[2:3], off
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX12-NEXT: v_xor_b32_e32 v2, 0x50, v2
+; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX12-NEXT: s_endpgm
+ %val = load i32, ptr addrspace(1) %in, align 4
+ %ext = sext i32 %val to i64
+ %xor = xor i64 %ext, 80
+ store i64 %xor, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_kernel void @s_xor_u64_sext_with_sregs(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GFX7-LABEL: s_xor_u64_sext_with_sregs:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s4, s[2:3], 0x0
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_ashr_i32 s5, s4, 31
+; GFX7-NEXT: s_xor_b64 s[4:5], s[4:5], 0x50
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: s_xor_u64_sext_with_sregs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_ashr_i32 s3, s2, 31
+; GFX8-NEXT: s_xor_b64 s[2:3], s[2:3], 0x50
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_endpgm
+;
+; GFX9-LABEL: s_xor_u64_sext_with_sregs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_ashr_i32 s3, s2, 31
+; GFX9-NEXT: s_xor_b64 s[2:3], s[2:3], 0x50
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: s_xor_u64_sext_with_sregs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-NEXT: v_mov_b32_e32 v2, 0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_ashr_i32 s3, s2, 31
+; GFX10-NEXT: s_xor_b64 s[2:3], s[2:3], 0x50
+; GFX10-NEXT: v_mov_b32_e32 v0, s2
+; GFX10-NEXT: v_mov_b32_e32 v1, s3
+; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: s_xor_u64_sext_with_sregs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_ashr_i32 s3, s2, 31
+; GFX11-NEXT: s_xor_b64 s[2:3], s[2:3], 0x50
+; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_xor_u64_sext_with_sregs:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_ashr_i32 s3, s2, 31
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_xor_b64 s[2:3], s[2:3], 0x50
+; GFX12-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX12-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX12-NEXT: s_endpgm
+ %val = load i32, ptr addrspace(1) %in, align 4
+ %ext = sext i32 %val to i64
+ %xor = xor i64 %ext, 80
+ store i64 %xor, ptr addrspace(1) %out, align 8
+ ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX11-FAKE16: {{.*}}
+; GFX11-TRUE16: {{.*}}
More information about the llvm-commits
mailing list