[llvm] 3a5af23 - [GlobalISel][AMDGPU] Fix handling of v2i128 type for AND, OR, XOR (#138574)

via llvm-commits llvm-commits at lists.llvm.org
Thu May 8 10:31:31 PDT 2025


Author: Chinmay Deshpande
Date: 2025-05-08T19:31:28+02:00
New Revision: 3a5af231fd3af4b5890ed28f7792b17e56386ffd

URL: https://github.com/llvm/llvm-project/commit/3a5af231fd3af4b5890ed28f7792b17e56386ffd
DIFF: https://github.com/llvm/llvm-project/commit/3a5af231fd3af4b5890ed28f7792b17e56386ffd.diff

LOG: [GlobalISel][AMDGPU] Fix handling of v2i128 type for AND, OR, XOR (#138574)

Current behavior crashes the compiler.

This bug was found using the AMDGPU Fuzzing project.

Fixes SWDEV-508816.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/and.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/or.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/xor.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 9d13fac60efcd..7bb461e0a239f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -872,12 +872,14 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
   // Report legal for any types we can handle anywhere. For the cases only legal
   // on the SALU, RegBankSelect will be able to re-legalize.
   getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
-    .legalFor({S32, S1, S64, V2S32, S16, V2S16, V4S16})
-    .clampScalar(0, S32, S64)
-    .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
-    .fewerElementsIf(vectorWiderThan(0, 64), fewerEltsToSize64Vector(0))
-    .widenScalarToNextPow2(0)
-    .scalarize(0);
+      .legalFor({S32, S1, S64, V2S32, S16, V2S16, V4S16})
+      .clampScalar(0, S32, S64)
+      .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
+      .fewerElementsIf(
+          all(vectorWiderThan(0, 64), scalarOrEltNarrowerThan(0, 64)),
+          fewerEltsToSize64Vector(0))
+      .widenScalarToNextPow2(0)
+      .scalarize(0);
 
   getActionDefinitionsBuilder(
       {G_UADDO, G_USUBO, G_UADDE, G_SADDE, G_USUBE, G_SSUBE})

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/and.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/and.ll
index ed3720a950b38..18578c55697cf 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/and.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/and.ll
@@ -834,6 +834,300 @@ define amdgpu_kernel void @s_and_u64_sext_with_sregs(ptr addrspace(1) %out, ptr
   store i64 %and, ptr addrspace(1) %out, align 8
   ret void
 }
+
+define <2 x i128> @v_and_v2i128(<2 x i128> %a, <2 x i128> %b) {
+; GCN-LABEL: v_and_v2i128:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, v0, v8
+; GCN-NEXT:    v_and_b32_e32 v1, v1, v9
+; GCN-NEXT:    v_and_b32_e32 v2, v2, v10
+; GCN-NEXT:    v_and_b32_e32 v3, v3, v11
+; GCN-NEXT:    v_and_b32_e32 v4, v4, v12
+; GCN-NEXT:    v_and_b32_e32 v5, v5, v13
+; GCN-NEXT:    v_and_b32_e32 v6, v6, v14
+; GCN-NEXT:    v_and_b32_e32 v7, v7, v15
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_and_v2i128:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_and_b32_e32 v0, v0, v8
+; GFX10PLUS-NEXT:    v_and_b32_e32 v1, v1, v9
+; GFX10PLUS-NEXT:    v_and_b32_e32 v2, v2, v10
+; GFX10PLUS-NEXT:    v_and_b32_e32 v3, v3, v11
+; GFX10PLUS-NEXT:    v_and_b32_e32 v4, v4, v12
+; GFX10PLUS-NEXT:    v_and_b32_e32 v5, v5, v13
+; GFX10PLUS-NEXT:    v_and_b32_e32 v6, v6, v14
+; GFX10PLUS-NEXT:    v_and_b32_e32 v7, v7, v15
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_and_v2i128:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    v_and_b32_e32 v0, v0, v8
+; GFX12-NEXT:    v_and_b32_e32 v1, v1, v9
+; GFX12-NEXT:    v_and_b32_e32 v2, v2, v10
+; GFX12-NEXT:    v_and_b32_e32 v3, v3, v11
+; GFX12-NEXT:    v_and_b32_e32 v4, v4, v12
+; GFX12-NEXT:    v_and_b32_e32 v5, v5, v13
+; GFX12-NEXT:    v_and_b32_e32 v6, v6, v14
+; GFX12-NEXT:    v_and_b32_e32 v7, v7, v15
+; GFX12-NEXT:    s_setpc_b64 s[30:31]
+  %and = and <2 x i128> %a, %b
+  ret <2 x i128> %and
+}
+
+define <2 x i128> @v_and_v2i128_inline_imm(<2 x i128> %a) {
+; GCN-LABEL: v_and_v2i128_inline_imm:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, 64, v0
+; GCN-NEXT:    v_and_b32_e32 v4, 64, v4
+; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_and_v2i128_inline_imm:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    v_and_b32_e32 v0, 64, v0
+; GFX10-NEXT:    v_and_b32_e32 v4, 64, v4
+; GFX10-NEXT:    v_mov_b32_e32 v1, 0
+; GFX10-NEXT:    v_mov_b32_e32 v2, 0
+; GFX10-NEXT:    v_mov_b32_e32 v3, 0
+; GFX10-NEXT:    v_mov_b32_e32 v5, 0
+; GFX10-NEXT:    v_mov_b32_e32 v6, 0
+; GFX10-NEXT:    v_mov_b32_e32 v7, 0
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_and_v2i128_inline_imm:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 64, v0
+; GFX11-NEXT:    v_dual_mov_b32 v3, 0 :: v_dual_and_b32 v4, 64, v4
+; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v5, 0
+; GFX11-NEXT:    v_dual_mov_b32 v6, 0 :: v_dual_mov_b32 v7, 0
+; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_and_v2i128_inline_imm:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 64, v0
+; GFX12-NEXT:    v_dual_mov_b32 v3, 0 :: v_dual_and_b32 v4, 64, v4
+; GFX12-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v5, 0
+; GFX12-NEXT:    v_dual_mov_b32 v6, 0 :: v_dual_mov_b32 v7, 0
+; GFX12-NEXT:    s_setpc_b64 s[30:31]
+  %and = and <2 x i128> %a, <i128 64, i128 64>
+  ret <2 x i128> %and
+}
+
+define <3 x i128> @v_and_v3i128(<3 x i128> %a, <3 x i128> %b) {
+; GCN-LABEL: v_and_v3i128:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, v0, v12
+; GCN-NEXT:    v_and_b32_e32 v1, v1, v13
+; GCN-NEXT:    v_and_b32_e32 v2, v2, v14
+; GCN-NEXT:    v_and_b32_e32 v3, v3, v15
+; GCN-NEXT:    v_and_b32_e32 v4, v4, v16
+; GCN-NEXT:    v_and_b32_e32 v5, v5, v17
+; GCN-NEXT:    v_and_b32_e32 v6, v6, v18
+; GCN-NEXT:    v_and_b32_e32 v7, v7, v19
+; GCN-NEXT:    v_and_b32_e32 v8, v8, v20
+; GCN-NEXT:    v_and_b32_e32 v9, v9, v21
+; GCN-NEXT:    v_and_b32_e32 v10, v10, v22
+; GCN-NEXT:    v_and_b32_e32 v11, v11, v23
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_and_v3i128:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_and_b32_e32 v0, v0, v12
+; GFX10PLUS-NEXT:    v_and_b32_e32 v1, v1, v13
+; GFX10PLUS-NEXT:    v_and_b32_e32 v2, v2, v14
+; GFX10PLUS-NEXT:    v_and_b32_e32 v3, v3, v15
+; GFX10PLUS-NEXT:    v_and_b32_e32 v4, v4, v16
+; GFX10PLUS-NEXT:    v_and_b32_e32 v5, v5, v17
+; GFX10PLUS-NEXT:    v_and_b32_e32 v6, v6, v18
+; GFX10PLUS-NEXT:    v_and_b32_e32 v7, v7, v19
+; GFX10PLUS-NEXT:    v_and_b32_e32 v8, v8, v20
+; GFX10PLUS-NEXT:    v_and_b32_e32 v9, v9, v21
+; GFX10PLUS-NEXT:    v_and_b32_e32 v10, v10, v22
+; GFX10PLUS-NEXT:    v_and_b32_e32 v11, v11, v23
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_and_v3i128:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    v_and_b32_e32 v0, v0, v12
+; GFX12-NEXT:    v_and_b32_e32 v1, v1, v13
+; GFX12-NEXT:    v_and_b32_e32 v2, v2, v14
+; GFX12-NEXT:    v_and_b32_e32 v3, v3, v15
+; GFX12-NEXT:    v_and_b32_e32 v4, v4, v16
+; GFX12-NEXT:    v_and_b32_e32 v5, v5, v17
+; GFX12-NEXT:    v_and_b32_e32 v6, v6, v18
+; GFX12-NEXT:    v_and_b32_e32 v7, v7, v19
+; GFX12-NEXT:    v_and_b32_e32 v8, v8, v20
+; GFX12-NEXT:    v_and_b32_e32 v9, v9, v21
+; GFX12-NEXT:    v_and_b32_e32 v10, v10, v22
+; GFX12-NEXT:    v_and_b32_e32 v11, v11, v23
+; GFX12-NEXT:    s_setpc_b64 s[30:31]
+  %and = and <3 x i128> %a, %b
+  ret <3 x i128> %and
+}
+
+define <1 x i128> @v_and_v1i128(<1 x i128> %a, <1 x i128> %b) {
+; GCN-LABEL: v_and_v1i128:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, v0, v4
+; GCN-NEXT:    v_and_b32_e32 v1, v1, v5
+; GCN-NEXT:    v_and_b32_e32 v2, v2, v6
+; GCN-NEXT:    v_and_b32_e32 v3, v3, v7
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_and_v1i128:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_and_b32_e32 v0, v0, v4
+; GFX10PLUS-NEXT:    v_and_b32_e32 v1, v1, v5
+; GFX10PLUS-NEXT:    v_and_b32_e32 v2, v2, v6
+; GFX10PLUS-NEXT:    v_and_b32_e32 v3, v3, v7
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_and_v1i128:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    v_and_b32_e32 v0, v0, v4
+; GFX12-NEXT:    v_and_b32_e32 v1, v1, v5
+; GFX12-NEXT:    v_and_b32_e32 v2, v2, v6
+; GFX12-NEXT:    v_and_b32_e32 v3, v3, v7
+; GFX12-NEXT:    s_setpc_b64 s[30:31]
+  %and = and <1 x i128> %a, %b
+  ret <1 x i128> %and
+}
+
+define <2 x i256> @v_and_v2i256(<2 x i256> %a, <2 x i256> %b) {
+; GCN-LABEL: v_and_v2i256:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, v0, v16
+; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32
+; GCN-NEXT:    v_and_b32_e32 v1, v1, v17
+; GCN-NEXT:    v_and_b32_e32 v2, v2, v18
+; GCN-NEXT:    v_and_b32_e32 v3, v3, v19
+; GCN-NEXT:    v_and_b32_e32 v4, v4, v20
+; GCN-NEXT:    v_and_b32_e32 v5, v5, v21
+; GCN-NEXT:    v_and_b32_e32 v6, v6, v22
+; GCN-NEXT:    v_and_b32_e32 v7, v7, v23
+; GCN-NEXT:    v_and_b32_e32 v8, v8, v24
+; GCN-NEXT:    v_and_b32_e32 v9, v9, v25
+; GCN-NEXT:    v_and_b32_e32 v10, v10, v26
+; GCN-NEXT:    v_and_b32_e32 v11, v11, v27
+; GCN-NEXT:    v_and_b32_e32 v12, v12, v28
+; GCN-NEXT:    v_and_b32_e32 v13, v13, v29
+; GCN-NEXT:    v_and_b32_e32 v14, v14, v30
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v15, v15, v16
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_and_v2i256:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT:    v_and_b32_e32 v0, v0, v16
+; GFX10-NEXT:    v_and_b32_e32 v1, v1, v17
+; GFX10-NEXT:    v_and_b32_e32 v2, v2, v18
+; GFX10-NEXT:    v_and_b32_e32 v3, v3, v19
+; GFX10-NEXT:    v_and_b32_e32 v4, v4, v20
+; GFX10-NEXT:    v_and_b32_e32 v5, v5, v21
+; GFX10-NEXT:    v_and_b32_e32 v6, v6, v22
+; GFX10-NEXT:    v_and_b32_e32 v7, v7, v23
+; GFX10-NEXT:    v_and_b32_e32 v8, v8, v24
+; GFX10-NEXT:    v_and_b32_e32 v9, v9, v25
+; GFX10-NEXT:    v_and_b32_e32 v10, v10, v26
+; GFX10-NEXT:    v_and_b32_e32 v11, v11, v27
+; GFX10-NEXT:    v_and_b32_e32 v12, v12, v28
+; GFX10-NEXT:    v_and_b32_e32 v13, v13, v29
+; GFX10-NEXT:    v_and_b32_e32 v14, v14, v30
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    v_and_b32_e32 v15, v15, v31
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_and_v2i256:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    scratch_load_b32 v31, off, s32
+; GFX11-NEXT:    v_and_b32_e32 v0, v0, v16
+; GFX11-NEXT:    v_and_b32_e32 v1, v1, v17
+; GFX11-NEXT:    v_and_b32_e32 v2, v2, v18
+; GFX11-NEXT:    v_and_b32_e32 v3, v3, v19
+; GFX11-NEXT:    v_and_b32_e32 v4, v4, v20
+; GFX11-NEXT:    v_and_b32_e32 v5, v5, v21
+; GFX11-NEXT:    v_and_b32_e32 v6, v6, v22
+; GFX11-NEXT:    v_and_b32_e32 v7, v7, v23
+; GFX11-NEXT:    v_and_b32_e32 v8, v8, v24
+; GFX11-NEXT:    v_and_b32_e32 v9, v9, v25
+; GFX11-NEXT:    v_and_b32_e32 v10, v10, v26
+; GFX11-NEXT:    v_and_b32_e32 v11, v11, v27
+; GFX11-NEXT:    v_and_b32_e32 v12, v12, v28
+; GFX11-NEXT:    v_and_b32_e32 v13, v13, v29
+; GFX11-NEXT:    v_and_b32_e32 v14, v14, v30
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    v_and_b32_e32 v15, v15, v31
+; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_and_v2i256:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    scratch_load_b32 v31, off, s32
+; GFX12-NEXT:    v_and_b32_e32 v0, v0, v16
+; GFX12-NEXT:    v_and_b32_e32 v1, v1, v17
+; GFX12-NEXT:    v_and_b32_e32 v2, v2, v18
+; GFX12-NEXT:    v_and_b32_e32 v3, v3, v19
+; GFX12-NEXT:    v_and_b32_e32 v4, v4, v20
+; GFX12-NEXT:    v_and_b32_e32 v5, v5, v21
+; GFX12-NEXT:    v_and_b32_e32 v6, v6, v22
+; GFX12-NEXT:    v_and_b32_e32 v7, v7, v23
+; GFX12-NEXT:    v_and_b32_e32 v8, v8, v24
+; GFX12-NEXT:    v_and_b32_e32 v9, v9, v25
+; GFX12-NEXT:    v_and_b32_e32 v10, v10, v26
+; GFX12-NEXT:    v_and_b32_e32 v11, v11, v27
+; GFX12-NEXT:    v_and_b32_e32 v12, v12, v28
+; GFX12-NEXT:    v_and_b32_e32 v13, v13, v29
+; GFX12-NEXT:    v_and_b32_e32 v14, v14, v30
+; GFX12-NEXT:    s_wait_loadcnt 0x0
+; GFX12-NEXT:    v_and_b32_e32 v15, v15, v31
+; GFX12-NEXT:    s_setpc_b64 s[30:31]
+  %and = and <2 x i256> %a, %b
+  ret <2 x i256> %and
+}
+
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; GFX11-FAKE16: {{.*}}
 ; GFX11-TRUE16: {{.*}}

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/or.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/or.ll
index df034d82118b1..af377b1d76817 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/or.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/or.ll
@@ -838,6 +838,277 @@ define amdgpu_kernel void @s_or_u64_sext_with_sregs(ptr addrspace(1) %out, ptr a
   store i64 %or, ptr addrspace(1) %out, align 8
   ret void
 }
+
+define <2 x i128> @v_or_v2i128(<2 x i128> %a, <2 x i128> %b) {
+; GCN-LABEL: v_or_v2i128:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v8
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v9
+; GCN-NEXT:    v_or_b32_e32 v2, v2, v10
+; GCN-NEXT:    v_or_b32_e32 v3, v3, v11
+; GCN-NEXT:    v_or_b32_e32 v4, v4, v12
+; GCN-NEXT:    v_or_b32_e32 v5, v5, v13
+; GCN-NEXT:    v_or_b32_e32 v6, v6, v14
+; GCN-NEXT:    v_or_b32_e32 v7, v7, v15
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_or_v2i128:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_or_b32_e32 v0, v0, v8
+; GFX10PLUS-NEXT:    v_or_b32_e32 v1, v1, v9
+; GFX10PLUS-NEXT:    v_or_b32_e32 v2, v2, v10
+; GFX10PLUS-NEXT:    v_or_b32_e32 v3, v3, v11
+; GFX10PLUS-NEXT:    v_or_b32_e32 v4, v4, v12
+; GFX10PLUS-NEXT:    v_or_b32_e32 v5, v5, v13
+; GFX10PLUS-NEXT:    v_or_b32_e32 v6, v6, v14
+; GFX10PLUS-NEXT:    v_or_b32_e32 v7, v7, v15
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_or_v2i128:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    v_or_b32_e32 v0, v0, v8
+; GFX12-NEXT:    v_or_b32_e32 v1, v1, v9
+; GFX12-NEXT:    v_or_b32_e32 v2, v2, v10
+; GFX12-NEXT:    v_or_b32_e32 v3, v3, v11
+; GFX12-NEXT:    v_or_b32_e32 v4, v4, v12
+; GFX12-NEXT:    v_or_b32_e32 v5, v5, v13
+; GFX12-NEXT:    v_or_b32_e32 v6, v6, v14
+; GFX12-NEXT:    v_or_b32_e32 v7, v7, v15
+; GFX12-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <2 x i128> %a, %b
+  ret <2 x i128> %or
+}
+
+define <2 x i128> @v_or_v2i128_inline_imm(<2 x i128> %a) {
+; GCN-LABEL: v_or_v2i128_inline_imm:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v0, 64, v0
+; GCN-NEXT:    v_or_b32_e32 v4, 64, v4
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_or_v2i128_inline_imm:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_or_b32_e32 v0, 64, v0
+; GFX10PLUS-NEXT:    v_or_b32_e32 v4, 64, v4
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_or_v2i128_inline_imm:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    v_or_b32_e32 v0, 64, v0
+; GFX12-NEXT:    v_or_b32_e32 v4, 64, v4
+; GFX12-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <2 x i128> %a, <i128 64, i128 64>
+  ret <2 x i128> %or
+}
+
+define <3 x i128> @v_or_v3i128(<3 x i128> %a, <3 x i128> %b) {
+; GCN-LABEL: v_or_v3i128:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v12
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v13
+; GCN-NEXT:    v_or_b32_e32 v2, v2, v14
+; GCN-NEXT:    v_or_b32_e32 v3, v3, v15
+; GCN-NEXT:    v_or_b32_e32 v4, v4, v16
+; GCN-NEXT:    v_or_b32_e32 v5, v5, v17
+; GCN-NEXT:    v_or_b32_e32 v6, v6, v18
+; GCN-NEXT:    v_or_b32_e32 v7, v7, v19
+; GCN-NEXT:    v_or_b32_e32 v8, v8, v20
+; GCN-NEXT:    v_or_b32_e32 v9, v9, v21
+; GCN-NEXT:    v_or_b32_e32 v10, v10, v22
+; GCN-NEXT:    v_or_b32_e32 v11, v11, v23
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_or_v3i128:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_or_b32_e32 v0, v0, v12
+; GFX10PLUS-NEXT:    v_or_b32_e32 v1, v1, v13
+; GFX10PLUS-NEXT:    v_or_b32_e32 v2, v2, v14
+; GFX10PLUS-NEXT:    v_or_b32_e32 v3, v3, v15
+; GFX10PLUS-NEXT:    v_or_b32_e32 v4, v4, v16
+; GFX10PLUS-NEXT:    v_or_b32_e32 v5, v5, v17
+; GFX10PLUS-NEXT:    v_or_b32_e32 v6, v6, v18
+; GFX10PLUS-NEXT:    v_or_b32_e32 v7, v7, v19
+; GFX10PLUS-NEXT:    v_or_b32_e32 v8, v8, v20
+; GFX10PLUS-NEXT:    v_or_b32_e32 v9, v9, v21
+; GFX10PLUS-NEXT:    v_or_b32_e32 v10, v10, v22
+; GFX10PLUS-NEXT:    v_or_b32_e32 v11, v11, v23
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_or_v3i128:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    v_or_b32_e32 v0, v0, v12
+; GFX12-NEXT:    v_or_b32_e32 v1, v1, v13
+; GFX12-NEXT:    v_or_b32_e32 v2, v2, v14
+; GFX12-NEXT:    v_or_b32_e32 v3, v3, v15
+; GFX12-NEXT:    v_or_b32_e32 v4, v4, v16
+; GFX12-NEXT:    v_or_b32_e32 v5, v5, v17
+; GFX12-NEXT:    v_or_b32_e32 v6, v6, v18
+; GFX12-NEXT:    v_or_b32_e32 v7, v7, v19
+; GFX12-NEXT:    v_or_b32_e32 v8, v8, v20
+; GFX12-NEXT:    v_or_b32_e32 v9, v9, v21
+; GFX12-NEXT:    v_or_b32_e32 v10, v10, v22
+; GFX12-NEXT:    v_or_b32_e32 v11, v11, v23
+; GFX12-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <3 x i128> %a, %b
+  ret <3 x i128> %or
+}
+
+define <1 x i128> @v_or_v1i128(<1 x i128> %a, <1 x i128> %b) {
+; GCN-LABEL: v_or_v1i128:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v4
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v5
+; GCN-NEXT:    v_or_b32_e32 v2, v2, v6
+; GCN-NEXT:    v_or_b32_e32 v3, v3, v7
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_or_v1i128:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_or_b32_e32 v0, v0, v4
+; GFX10PLUS-NEXT:    v_or_b32_e32 v1, v1, v5
+; GFX10PLUS-NEXT:    v_or_b32_e32 v2, v2, v6
+; GFX10PLUS-NEXT:    v_or_b32_e32 v3, v3, v7
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_or_v1i128:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    v_or_b32_e32 v0, v0, v4
+; GFX12-NEXT:    v_or_b32_e32 v1, v1, v5
+; GFX12-NEXT:    v_or_b32_e32 v2, v2, v6
+; GFX12-NEXT:    v_or_b32_e32 v3, v3, v7
+; GFX12-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <1 x i128> %a, %b
+  ret <1 x i128> %or
+}
+
+define <2 x i256> @v_or_v2i256(<2 x i256> %a, <2 x i256> %b) {
+; GCN-LABEL: v_or_v2i256:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v16
+; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v17
+; GCN-NEXT:    v_or_b32_e32 v2, v2, v18
+; GCN-NEXT:    v_or_b32_e32 v3, v3, v19
+; GCN-NEXT:    v_or_b32_e32 v4, v4, v20
+; GCN-NEXT:    v_or_b32_e32 v5, v5, v21
+; GCN-NEXT:    v_or_b32_e32 v6, v6, v22
+; GCN-NEXT:    v_or_b32_e32 v7, v7, v23
+; GCN-NEXT:    v_or_b32_e32 v8, v8, v24
+; GCN-NEXT:    v_or_b32_e32 v9, v9, v25
+; GCN-NEXT:    v_or_b32_e32 v10, v10, v26
+; GCN-NEXT:    v_or_b32_e32 v11, v11, v27
+; GCN-NEXT:    v_or_b32_e32 v12, v12, v28
+; GCN-NEXT:    v_or_b32_e32 v13, v13, v29
+; GCN-NEXT:    v_or_b32_e32 v14, v14, v30
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v15, v15, v16
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_or_v2i256:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT:    v_or_b32_e32 v0, v0, v16
+; GFX10-NEXT:    v_or_b32_e32 v1, v1, v17
+; GFX10-NEXT:    v_or_b32_e32 v2, v2, v18
+; GFX10-NEXT:    v_or_b32_e32 v3, v3, v19
+; GFX10-NEXT:    v_or_b32_e32 v4, v4, v20
+; GFX10-NEXT:    v_or_b32_e32 v5, v5, v21
+; GFX10-NEXT:    v_or_b32_e32 v6, v6, v22
+; GFX10-NEXT:    v_or_b32_e32 v7, v7, v23
+; GFX10-NEXT:    v_or_b32_e32 v8, v8, v24
+; GFX10-NEXT:    v_or_b32_e32 v9, v9, v25
+; GFX10-NEXT:    v_or_b32_e32 v10, v10, v26
+; GFX10-NEXT:    v_or_b32_e32 v11, v11, v27
+; GFX10-NEXT:    v_or_b32_e32 v12, v12, v28
+; GFX10-NEXT:    v_or_b32_e32 v13, v13, v29
+; GFX10-NEXT:    v_or_b32_e32 v14, v14, v30
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    v_or_b32_e32 v15, v15, v31
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_or_v2i256:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    scratch_load_b32 v31, off, s32
+; GFX11-NEXT:    v_or_b32_e32 v0, v0, v16
+; GFX11-NEXT:    v_or_b32_e32 v1, v1, v17
+; GFX11-NEXT:    v_or_b32_e32 v2, v2, v18
+; GFX11-NEXT:    v_or_b32_e32 v3, v3, v19
+; GFX11-NEXT:    v_or_b32_e32 v4, v4, v20
+; GFX11-NEXT:    v_or_b32_e32 v5, v5, v21
+; GFX11-NEXT:    v_or_b32_e32 v6, v6, v22
+; GFX11-NEXT:    v_or_b32_e32 v7, v7, v23
+; GFX11-NEXT:    v_or_b32_e32 v8, v8, v24
+; GFX11-NEXT:    v_or_b32_e32 v9, v9, v25
+; GFX11-NEXT:    v_or_b32_e32 v10, v10, v26
+; GFX11-NEXT:    v_or_b32_e32 v11, v11, v27
+; GFX11-NEXT:    v_or_b32_e32 v12, v12, v28
+; GFX11-NEXT:    v_or_b32_e32 v13, v13, v29
+; GFX11-NEXT:    v_or_b32_e32 v14, v14, v30
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    v_or_b32_e32 v15, v15, v31
+; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_or_v2i256:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    scratch_load_b32 v31, off, s32
+; GFX12-NEXT:    v_or_b32_e32 v0, v0, v16
+; GFX12-NEXT:    v_or_b32_e32 v1, v1, v17
+; GFX12-NEXT:    v_or_b32_e32 v2, v2, v18
+; GFX12-NEXT:    v_or_b32_e32 v3, v3, v19
+; GFX12-NEXT:    v_or_b32_e32 v4, v4, v20
+; GFX12-NEXT:    v_or_b32_e32 v5, v5, v21
+; GFX12-NEXT:    v_or_b32_e32 v6, v6, v22
+; GFX12-NEXT:    v_or_b32_e32 v7, v7, v23
+; GFX12-NEXT:    v_or_b32_e32 v8, v8, v24
+; GFX12-NEXT:    v_or_b32_e32 v9, v9, v25
+; GFX12-NEXT:    v_or_b32_e32 v10, v10, v26
+; GFX12-NEXT:    v_or_b32_e32 v11, v11, v27
+; GFX12-NEXT:    v_or_b32_e32 v12, v12, v28
+; GFX12-NEXT:    v_or_b32_e32 v13, v13, v29
+; GFX12-NEXT:    v_or_b32_e32 v14, v14, v30
+; GFX12-NEXT:    s_wait_loadcnt 0x0
+; GFX12-NEXT:    v_or_b32_e32 v15, v15, v31
+; GFX12-NEXT:    s_setpc_b64 s[30:31]
+  %or = or <2 x i256> %a, %b
+  ret <2 x i256> %or
+}
+
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; GFX11-FAKE16: {{.*}}
 ; GFX11-TRUE16: {{.*}}

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/xor.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/xor.ll
index b27a35ce0753a..4755da1392684 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/xor.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/xor.ll
@@ -838,6 +838,277 @@ define amdgpu_kernel void @s_xor_u64_sext_with_sregs(ptr addrspace(1) %out, ptr
   store i64 %xor, ptr addrspace(1) %out, align 8
   ret void
 }
+
+define <2 x i128> @v_xor_v2i128(<2 x i128> %a, <2 x i128> %b) {
+; GCN-LABEL: v_xor_v2i128:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_xor_b32_e32 v0, v0, v8
+; GCN-NEXT:    v_xor_b32_e32 v1, v1, v9
+; GCN-NEXT:    v_xor_b32_e32 v2, v2, v10
+; GCN-NEXT:    v_xor_b32_e32 v3, v3, v11
+; GCN-NEXT:    v_xor_b32_e32 v4, v4, v12
+; GCN-NEXT:    v_xor_b32_e32 v5, v5, v13
+; GCN-NEXT:    v_xor_b32_e32 v6, v6, v14
+; GCN-NEXT:    v_xor_b32_e32 v7, v7, v15
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_xor_v2i128:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v0, v0, v8
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v1, v1, v9
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v2, v2, v10
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v3, v3, v11
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v4, v4, v12
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v5, v5, v13
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v6, v6, v14
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v7, v7, v15
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_xor_v2i128:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    v_xor_b32_e32 v0, v0, v8
+; GFX12-NEXT:    v_xor_b32_e32 v1, v1, v9
+; GFX12-NEXT:    v_xor_b32_e32 v2, v2, v10
+; GFX12-NEXT:    v_xor_b32_e32 v3, v3, v11
+; GFX12-NEXT:    v_xor_b32_e32 v4, v4, v12
+; GFX12-NEXT:    v_xor_b32_e32 v5, v5, v13
+; GFX12-NEXT:    v_xor_b32_e32 v6, v6, v14
+; GFX12-NEXT:    v_xor_b32_e32 v7, v7, v15
+; GFX12-NEXT:    s_setpc_b64 s[30:31]
+  %xor = xor <2 x i128> %a, %b
+  ret <2 x i128> %xor
+}
+
+define <2 x i128> @v_xor_v2i128_inline_imm(<2 x i128> %a) {
+; GCN-LABEL: v_xor_v2i128_inline_imm:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_xor_b32_e32 v0, 64, v0
+; GCN-NEXT:    v_xor_b32_e32 v4, 64, v4
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_xor_v2i128_inline_imm:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v0, 64, v0
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v4, 64, v4
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_xor_v2i128_inline_imm:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    v_xor_b32_e32 v0, 64, v0
+; GFX12-NEXT:    v_xor_b32_e32 v4, 64, v4
+; GFX12-NEXT:    s_setpc_b64 s[30:31]
+  %xor = xor <2 x i128> %a, <i128 64, i128 64>
+  ret <2 x i128> %xor
+}
+
+define <3 x i128> @v_xor_v3i128(<3 x i128> %a, <3 x i128> %b) {
+; GCN-LABEL: v_xor_v3i128:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_xor_b32_e32 v0, v0, v12
+; GCN-NEXT:    v_xor_b32_e32 v1, v1, v13
+; GCN-NEXT:    v_xor_b32_e32 v2, v2, v14
+; GCN-NEXT:    v_xor_b32_e32 v3, v3, v15
+; GCN-NEXT:    v_xor_b32_e32 v4, v4, v16
+; GCN-NEXT:    v_xor_b32_e32 v5, v5, v17
+; GCN-NEXT:    v_xor_b32_e32 v6, v6, v18
+; GCN-NEXT:    v_xor_b32_e32 v7, v7, v19
+; GCN-NEXT:    v_xor_b32_e32 v8, v8, v20
+; GCN-NEXT:    v_xor_b32_e32 v9, v9, v21
+; GCN-NEXT:    v_xor_b32_e32 v10, v10, v22
+; GCN-NEXT:    v_xor_b32_e32 v11, v11, v23
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_xor_v3i128:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v0, v0, v12
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v1, v1, v13
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v2, v2, v14
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v3, v3, v15
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v4, v4, v16
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v5, v5, v17
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v6, v6, v18
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v7, v7, v19
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v8, v8, v20
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v9, v9, v21
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v10, v10, v22
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v11, v11, v23
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_xor_v3i128:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    v_xor_b32_e32 v0, v0, v12
+; GFX12-NEXT:    v_xor_b32_e32 v1, v1, v13
+; GFX12-NEXT:    v_xor_b32_e32 v2, v2, v14
+; GFX12-NEXT:    v_xor_b32_e32 v3, v3, v15
+; GFX12-NEXT:    v_xor_b32_e32 v4, v4, v16
+; GFX12-NEXT:    v_xor_b32_e32 v5, v5, v17
+; GFX12-NEXT:    v_xor_b32_e32 v6, v6, v18
+; GFX12-NEXT:    v_xor_b32_e32 v7, v7, v19
+; GFX12-NEXT:    v_xor_b32_e32 v8, v8, v20
+; GFX12-NEXT:    v_xor_b32_e32 v9, v9, v21
+; GFX12-NEXT:    v_xor_b32_e32 v10, v10, v22
+; GFX12-NEXT:    v_xor_b32_e32 v11, v11, v23
+; GFX12-NEXT:    s_setpc_b64 s[30:31]
+  %xor = xor <3 x i128> %a, %b
+  ret <3 x i128> %xor
+}
+
+define <1 x i128> @v_xor_v1i128(<1 x i128> %a, <1 x i128> %b) {
+; GCN-LABEL: v_xor_v1i128:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GCN-NEXT:    v_xor_b32_e32 v1, v1, v5
+; GCN-NEXT:    v_xor_b32_e32 v2, v2, v6
+; GCN-NEXT:    v_xor_b32_e32 v3, v3, v7
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10PLUS-LABEL: v_xor_v1i128:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v1, v1, v5
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v2, v2, v6
+; GFX10PLUS-NEXT:    v_xor_b32_e32 v3, v3, v7
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_xor_v1i128:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GFX12-NEXT:    v_xor_b32_e32 v1, v1, v5
+; GFX12-NEXT:    v_xor_b32_e32 v2, v2, v6
+; GFX12-NEXT:    v_xor_b32_e32 v3, v3, v7
+; GFX12-NEXT:    s_setpc_b64 s[30:31]
+  %xor = xor <1 x i128> %a, %b
+  ret <1 x i128> %xor
+}
+
+define <2 x i256> @v_xor_v2i256(<2 x i256> %a, <2 x i256> %b) {
+; GCN-LABEL: v_xor_v2i256:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_xor_b32_e32 v0, v0, v16
+; GCN-NEXT:    buffer_load_dword v16, off, s[0:3], s32
+; GCN-NEXT:    v_xor_b32_e32 v1, v1, v17
+; GCN-NEXT:    v_xor_b32_e32 v2, v2, v18
+; GCN-NEXT:    v_xor_b32_e32 v3, v3, v19
+; GCN-NEXT:    v_xor_b32_e32 v4, v4, v20
+; GCN-NEXT:    v_xor_b32_e32 v5, v5, v21
+; GCN-NEXT:    v_xor_b32_e32 v6, v6, v22
+; GCN-NEXT:    v_xor_b32_e32 v7, v7, v23
+; GCN-NEXT:    v_xor_b32_e32 v8, v8, v24
+; GCN-NEXT:    v_xor_b32_e32 v9, v9, v25
+; GCN-NEXT:    v_xor_b32_e32 v10, v10, v26
+; GCN-NEXT:    v_xor_b32_e32 v11, v11, v27
+; GCN-NEXT:    v_xor_b32_e32 v12, v12, v28
+; GCN-NEXT:    v_xor_b32_e32 v13, v13, v29
+; GCN-NEXT:    v_xor_b32_e32 v14, v14, v30
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_xor_b32_e32 v15, v15, v16
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_xor_v2i256:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT:    v_xor_b32_e32 v0, v0, v16
+; GFX10-NEXT:    v_xor_b32_e32 v1, v1, v17
+; GFX10-NEXT:    v_xor_b32_e32 v2, v2, v18
+; GFX10-NEXT:    v_xor_b32_e32 v3, v3, v19
+; GFX10-NEXT:    v_xor_b32_e32 v4, v4, v20
+; GFX10-NEXT:    v_xor_b32_e32 v5, v5, v21
+; GFX10-NEXT:    v_xor_b32_e32 v6, v6, v22
+; GFX10-NEXT:    v_xor_b32_e32 v7, v7, v23
+; GFX10-NEXT:    v_xor_b32_e32 v8, v8, v24
+; GFX10-NEXT:    v_xor_b32_e32 v9, v9, v25
+; GFX10-NEXT:    v_xor_b32_e32 v10, v10, v26
+; GFX10-NEXT:    v_xor_b32_e32 v11, v11, v27
+; GFX10-NEXT:    v_xor_b32_e32 v12, v12, v28
+; GFX10-NEXT:    v_xor_b32_e32 v13, v13, v29
+; GFX10-NEXT:    v_xor_b32_e32 v14, v14, v30
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    v_xor_b32_e32 v15, v15, v31
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_xor_v2i256:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    scratch_load_b32 v31, off, s32
+; GFX11-NEXT:    v_xor_b32_e32 v0, v0, v16
+; GFX11-NEXT:    v_xor_b32_e32 v1, v1, v17
+; GFX11-NEXT:    v_xor_b32_e32 v2, v2, v18
+; GFX11-NEXT:    v_xor_b32_e32 v3, v3, v19
+; GFX11-NEXT:    v_xor_b32_e32 v4, v4, v20
+; GFX11-NEXT:    v_xor_b32_e32 v5, v5, v21
+; GFX11-NEXT:    v_xor_b32_e32 v6, v6, v22
+; GFX11-NEXT:    v_xor_b32_e32 v7, v7, v23
+; GFX11-NEXT:    v_xor_b32_e32 v8, v8, v24
+; GFX11-NEXT:    v_xor_b32_e32 v9, v9, v25
+; GFX11-NEXT:    v_xor_b32_e32 v10, v10, v26
+; GFX11-NEXT:    v_xor_b32_e32 v11, v11, v27
+; GFX11-NEXT:    v_xor_b32_e32 v12, v12, v28
+; GFX11-NEXT:    v_xor_b32_e32 v13, v13, v29
+; GFX11-NEXT:    v_xor_b32_e32 v14, v14, v30
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    v_xor_b32_e32 v15, v15, v31
+; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_xor_v2i256:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    scratch_load_b32 v31, off, s32
+; GFX12-NEXT:    v_xor_b32_e32 v0, v0, v16
+; GFX12-NEXT:    v_xor_b32_e32 v1, v1, v17
+; GFX12-NEXT:    v_xor_b32_e32 v2, v2, v18
+; GFX12-NEXT:    v_xor_b32_e32 v3, v3, v19
+; GFX12-NEXT:    v_xor_b32_e32 v4, v4, v20
+; GFX12-NEXT:    v_xor_b32_e32 v5, v5, v21
+; GFX12-NEXT:    v_xor_b32_e32 v6, v6, v22
+; GFX12-NEXT:    v_xor_b32_e32 v7, v7, v23
+; GFX12-NEXT:    v_xor_b32_e32 v8, v8, v24
+; GFX12-NEXT:    v_xor_b32_e32 v9, v9, v25
+; GFX12-NEXT:    v_xor_b32_e32 v10, v10, v26
+; GFX12-NEXT:    v_xor_b32_e32 v11, v11, v27
+; GFX12-NEXT:    v_xor_b32_e32 v12, v12, v28
+; GFX12-NEXT:    v_xor_b32_e32 v13, v13, v29
+; GFX12-NEXT:    v_xor_b32_e32 v14, v14, v30
+; GFX12-NEXT:    s_wait_loadcnt 0x0
+; GFX12-NEXT:    v_xor_b32_e32 v15, v15, v31
+; GFX12-NEXT:    s_setpc_b64 s[30:31]
+  %xor = xor <2 x i256> %a, %b
+  ret <2 x i256> %xor
+}
+
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; GFX11-FAKE16: {{.*}}
 ; GFX11-TRUE16: {{.*}}


        


More information about the llvm-commits mailing list