[llvm-branch-commits] [llvm] 2d69280 - AMDGPU: Always custom lower extract_subvector

Tobias Hieta via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Thu Jul 27 06:51:57 PDT 2023


Author: Matt Arsenault
Date: 2023-07-27T15:51:32+02:00
New Revision: 2d69280660800324f774d400dcf13282de16671f

URL: https://github.com/llvm/llvm-project/commit/2d69280660800324f774d400dcf13282de16671f
DIFF: https://github.com/llvm/llvm-project/commit/2d69280660800324f774d400dcf13282de16671f.diff

LOG: AMDGPU: Always custom lower extract_subvector

The patterns were ripped out in
a4a3ac10cb1a40ccebed4e81cd7e94f1eb71602d so this always needs to be
custom lowered. I absolutely hate how difficult it is to write tests
for these, I have no doubt there are more of these hidden.

Fixes #64142

(cherry picked from commit 95e5a461f52f9046bc7a06d70812b2bec509a432)

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 3148f49ff0d530..b7b90e23e8951d 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -278,10 +278,10 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
       case ISD::UNDEF:
       case ISD::EXTRACT_VECTOR_ELT:
       case ISD::INSERT_VECTOR_ELT:
-      case ISD::EXTRACT_SUBVECTOR:
       case ISD::SCALAR_TO_VECTOR:
       case ISD::IS_FPCLASS:
         break;
+      case ISD::EXTRACT_SUBVECTOR:
       case ISD::INSERT_SUBVECTOR:
       case ISD::CONCAT_VECTORS:
         setOperationAction(Op, VT, Custom);

diff  --git a/llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll b/llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll
index c6b715e0778120..7c469c9f4ccae4 100644
--- a/llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll
@@ -1198,3 +1198,576 @@ define <8 x i16> @large_vector(ptr addrspace(3) %p, i32 %idxp) {
   %z.3 = shufflevector <8 x i16> %z.2, <8 x i16> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
   ret <8 x i16> %z.3
 }
+
+define amdgpu_gfx <8 x i16> @vec_16xi16_extract_8xi16_0(i1 inreg %cond, ptr addrspace(1) %p0, ptr addrspace(1) %p1) {
+; SI-LABEL: vec_16xi16_extract_8xi16_0:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_load_ubyte v4, off, s[0:3], s32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v4, 1, v4
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
+; SI-NEXT:    s_and_b64 s[34:35], vcc, exec
+; SI-NEXT:    s_mov_b32 s38, 0
+; SI-NEXT:    s_cbranch_scc0 .LBB7_2
+; SI-NEXT:  ; %bb.1: ; %F
+; SI-NEXT:    s_mov_b32 s39, 0xf000
+; SI-NEXT:    s_mov_b32 s36, s38
+; SI-NEXT:    s_mov_b32 s37, s38
+; SI-NEXT:    buffer_load_ushort v8, v[2:3], s[36:39], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v5, v[2:3], s[36:39], 0 addr64 offset:2 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v10, v[2:3], s[36:39], 0 addr64 offset:4 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v4, v[2:3], s[36:39], 0 addr64 offset:6 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v9, v[2:3], s[36:39], 0 addr64 offset:8 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v6, v[2:3], s[36:39], 0 addr64 offset:10 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v11, v[2:3], s[36:39], 0 addr64 offset:12 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v7, v[2:3], s[36:39], 0 addr64 offset:14 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v12, v[2:3], s[36:39], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v12, v[2:3], s[36:39], 0 addr64 offset:18 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v12, v[2:3], s[36:39], 0 addr64 offset:20 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v12, v[2:3], s[36:39], 0 addr64 offset:22 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v12, v[2:3], s[36:39], 0 addr64 offset:24 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v12, v[2:3], s[36:39], 0 addr64 offset:26 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v12, v[2:3], s[36:39], 0 addr64 offset:28 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v2, v[2:3], s[36:39], 0 addr64 offset:30 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v7
+; SI-NEXT:    v_lshlrev_b32_e32 v12, 16, v6
+; SI-NEXT:    v_lshlrev_b32_e32 v13, 16, v4
+; SI-NEXT:    v_lshlrev_b32_e32 v14, 16, v5
+; SI-NEXT:    v_or_b32_e32 v3, v11, v2
+; SI-NEXT:    v_or_b32_e32 v9, v9, v12
+; SI-NEXT:    v_or_b32_e32 v2, v10, v13
+; SI-NEXT:    v_or_b32_e32 v8, v8, v14
+; SI-NEXT:    s_mov_b64 vcc, exec
+; SI-NEXT:    s_cbranch_execz .LBB7_3
+; SI-NEXT:    s_branch .LBB7_4
+; SI-NEXT:  .LBB7_2:
+; SI-NEXT:    ; implicit-def: $vgpr8
+; SI-NEXT:    ; implicit-def: $vgpr5
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    ; implicit-def: $vgpr4
+; SI-NEXT:    ; implicit-def: $vgpr9
+; SI-NEXT:    ; implicit-def: $vgpr6
+; SI-NEXT:    ; implicit-def: $vgpr3
+; SI-NEXT:    ; implicit-def: $vgpr7
+; SI-NEXT:    s_mov_b64 vcc, 0
+; SI-NEXT:  .LBB7_3: ; %T
+; SI-NEXT:    s_mov_b32 s39, 0xf000
+; SI-NEXT:    s_mov_b32 s36, s38
+; SI-NEXT:    s_mov_b32 s37, s38
+; SI-NEXT:    buffer_load_ushort v8, v[0:1], s[36:39], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v5, v[0:1], s[36:39], 0 addr64 offset:2 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v2, v[0:1], s[36:39], 0 addr64 offset:4 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v4, v[0:1], s[36:39], 0 addr64 offset:6 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v9, v[0:1], s[36:39], 0 addr64 offset:8 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v6, v[0:1], s[36:39], 0 addr64 offset:10 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v3, v[0:1], s[36:39], 0 addr64 offset:12 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v7, v[0:1], s[36:39], 0 addr64 offset:14 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v10, v[0:1], s[36:39], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v10, v[0:1], s[36:39], 0 addr64 offset:18 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v10, v[0:1], s[36:39], 0 addr64 offset:20 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v10, v[0:1], s[36:39], 0 addr64 offset:22 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v10, v[0:1], s[36:39], 0 addr64 offset:24 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v10, v[0:1], s[36:39], 0 addr64 offset:26 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v10, v[0:1], s[36:39], 0 addr64 offset:28 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v0, v[0:1], s[36:39], 0 addr64 offset:30 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 16, v7
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v6
+; SI-NEXT:    v_lshlrev_b32_e32 v10, 16, v4
+; SI-NEXT:    v_lshlrev_b32_e32 v11, 16, v5
+; SI-NEXT:    v_or_b32_e32 v3, v3, v0
+; SI-NEXT:    v_or_b32_e32 v9, v9, v1
+; SI-NEXT:    v_or_b32_e32 v2, v2, v10
+; SI-NEXT:    v_or_b32_e32 v8, v8, v11
+; SI-NEXT:  .LBB7_4: ; %exit
+; SI-NEXT:    v_and_b32_e32 v0, 0xffff, v8
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff, v5
+; SI-NEXT:    v_and_b32_e32 v5, 0xffff, v9
+; SI-NEXT:    v_and_b32_e32 v6, 0xffff, v6
+; SI-NEXT:    v_and_b32_e32 v3, 0xffff, v3
+; SI-NEXT:    v_and_b32_e32 v7, 0xffff, v7
+; SI-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT:    v_and_b32_e32 v4, 0xffff, v4
+; SI-NEXT:    s_movk_i32 s34, 0x3800
+; SI-NEXT:    v_mov_b32_e32 v8, 0x3d00
+; SI-NEXT:    v_mov_b32_e32 v9, 0x3900
+; SI-NEXT:    v_mov_b32_e32 v10, 0x3d000000
+; SI-NEXT:    v_mov_b32_e32 v11, 0x39000000
+; SI-NEXT:    v_cmp_lt_u32_e32 vcc, s34, v0
+; SI-NEXT:    v_cndmask_b32_e32 v0, v8, v9, vcc
+; SI-NEXT:    v_cmp_lt_u32_e32 vcc, s34, v1
+; SI-NEXT:    v_cndmask_b32_e32 v1, v10, v11, vcc
+; SI-NEXT:    v_cmp_lt_u32_e32 vcc, s34, v5
+; SI-NEXT:    v_cndmask_b32_e32 v5, v8, v9, vcc
+; SI-NEXT:    v_cmp_lt_u32_e32 vcc, s34, v6
+; SI-NEXT:    v_cndmask_b32_e32 v12, v10, v11, vcc
+; SI-NEXT:    v_cmp_lt_u32_e32 vcc, s34, v3
+; SI-NEXT:    v_cndmask_b32_e32 v3, v8, v9, vcc
+; SI-NEXT:    v_cmp_lt_u32_e32 vcc, s34, v7
+; SI-NEXT:    v_cndmask_b32_e32 v7, v10, v11, vcc
+; SI-NEXT:    v_cmp_lt_u32_e32 vcc, s34, v2
+; SI-NEXT:    v_cndmask_b32_e32 v2, v8, v9, vcc
+; SI-NEXT:    v_cmp_lt_u32_e32 vcc, s34, v4
+; SI-NEXT:    v_cndmask_b32_e32 v8, v10, v11, vcc
+; SI-NEXT:    v_or_b32_e32 v0, v0, v1
+; SI-NEXT:    v_or_b32_e32 v4, v5, v12
+; SI-NEXT:    v_or_b32_e32 v6, v3, v7
+; SI-NEXT:    v_or_b32_e32 v2, v2, v8
+; SI-NEXT:    v_lshrrev_b32_e32 v3, 16, v8
+; SI-NEXT:    v_alignbit_b32 v1, v2, v1, 16
+; SI-NEXT:    v_alignbit_b32 v5, v6, v12, 16
+; SI-NEXT:    v_lshrrev_b32_e32 v7, 16, v7
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: vec_16xi16_extract_8xi16_0:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    buffer_load_ubyte v4, off, s[0:3], s32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_and_b32_e32 v4, 1, v4
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
+; GFX9-NEXT:    s_and_b64 s[34:35], vcc, exec
+; GFX9-NEXT:    s_cbranch_scc0 .LBB7_2
+; GFX9-NEXT:  ; %bb.1: ; %F
+; GFX9-NEXT:    global_load_dwordx4 v[4:7], v[2:3], off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    global_load_dwordx4 v[4:7], v[2:3], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    ; kill: killed $vgpr2 killed $vgpr3
+; GFX9-NEXT:    s_cbranch_execz .LBB7_3
+; GFX9-NEXT:    s_branch .LBB7_4
+; GFX9-NEXT:  .LBB7_2:
+; GFX9-NEXT:    ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11
+; GFX9-NEXT:  .LBB7_3: ; %T
+; GFX9-NEXT:    global_load_dwordx4 v[2:5], v[0:1], off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    global_load_dwordx4 v[4:7], v[0:1], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    ; kill: killed $vgpr0 killed $vgpr1
+; GFX9-NEXT:  .LBB7_4: ; %exit
+; GFX9-NEXT:    s_movk_i32 s35, 0x3801
+; GFX9-NEXT:    s_movk_i32 s34, 0x3800
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0x3900
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0x3d00
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cmp_gt_u16_e32 vcc, s35, v7
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
+; GFX9-NEXT:    v_cmp_gt_u16_sdwa vcc, v7, s34 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT:    v_cndmask_b32_e32 v7, v1, v0, vcc
+; GFX9-NEXT:    v_cmp_gt_u16_e32 vcc, s35, v6
+; GFX9-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
+; GFX9-NEXT:    v_cmp_lt_u16_sdwa vcc, v6, s35 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT:    v_cndmask_b32_e32 v6, v0, v1, vcc
+; GFX9-NEXT:    v_cmp_gt_u16_e32 vcc, s35, v5
+; GFX9-NEXT:    v_cndmask_b32_e32 v8, v0, v1, vcc
+; GFX9-NEXT:    v_cmp_lt_u16_sdwa vcc, v5, s35 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v0, v1, vcc
+; GFX9-NEXT:    v_cmp_gt_u16_e32 vcc, s35, v4
+; GFX9-NEXT:    v_cndmask_b32_e32 v9, v0, v1, vcc
+; GFX9-NEXT:    v_cmp_lt_u16_sdwa vcc, v4, s35 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX9-NEXT:    s_mov_b32 s34, 0x5040100
+; GFX9-NEXT:    v_perm_b32 v0, v0, v9, s34
+; GFX9-NEXT:    v_perm_b32 v1, v5, v8, s34
+; GFX9-NEXT:    v_perm_b32 v2, v6, v2, s34
+; GFX9-NEXT:    v_perm_b32 v3, v7, v3, s34
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: vec_16xi16_extract_8xi16_0:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    scratch_load_u8 v4, off, s32
+; GFX11-NEXT:    s_mov_b32 s0, 0
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    v_and_b32_e32 v4, 1, v4
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v4
+; GFX11-NEXT:    s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT:    s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT:  ; %bb.1: ; %F
+; GFX11-NEXT:    global_load_b128 v[4:7], v[2:3], off offset:16 glc dlc
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    global_load_b128 v[2:5], v[2:3], off glc dlc
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT:    s_cbranch_vccz .LBB7_3
+; GFX11-NEXT:    s_branch .LBB7_4
+; GFX11-NEXT:  .LBB7_2:
+; GFX11-NEXT:    ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
+; GFX11-NEXT:  .LBB7_3: ; %T
+; GFX11-NEXT:    global_load_b128 v[2:5], v[0:1], off offset:16 glc dlc
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    global_load_b128 v[2:5], v[0:1], off glc dlc
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:  .LBB7_4: ; %exit
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    v_cmp_gt_u16_e32 vcc_lo, 0x3801, v5
+; GFX11-NEXT:    v_mov_b32_e32 v9, 0x3900
+; GFX11-NEXT:    v_mov_b32_e32 v1, 0x3d00
+; GFX11-NEXT:    v_lshrrev_b32_e32 v7, 16, v4
+; GFX11-NEXT:    v_lshrrev_b32_e32 v8, 16, v5
+; GFX11-NEXT:    v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-NEXT:    v_lshrrev_b32_e32 v6, 16, v3
+; GFX11-NEXT:    v_cndmask_b32_e32 v5, 0x3900, v1, vcc_lo
+; GFX11-NEXT:    v_cmp_gt_u16_e32 vcc_lo, 0x3801, v4
+; GFX11-NEXT:    v_cndmask_b32_e32 v4, 0x3900, v1, vcc_lo
+; GFX11-NEXT:    v_cmp_gt_u16_e32 vcc_lo, 0x3801, v7
+; GFX11-NEXT:    v_cndmask_b32_e32 v7, 0x3900, v1, vcc_lo
+; GFX11-NEXT:    v_cmp_gt_u16_e32 vcc_lo, 0x3801, v3
+; GFX11-NEXT:    v_cndmask_b32_e32 v3, 0x3900, v1, vcc_lo
+; GFX11-NEXT:    v_cmp_gt_u16_e32 vcc_lo, 0x3801, v2
+; GFX11-NEXT:    v_cndmask_b32_e32 v2, 0x3900, v1, vcc_lo
+; GFX11-NEXT:    v_cmp_gt_u16_e32 vcc_lo, 0x3801, v0
+; GFX11-NEXT:    v_cndmask_b32_e32 v0, 0x3900, v1, vcc_lo
+; GFX11-NEXT:    v_cmp_gt_u16_e32 vcc_lo, 0x3801, v6
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_perm_b32 v0, v0, v2, 0x5040100
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, 0x3900, v1, vcc_lo
+; GFX11-NEXT:    v_cmp_lt_u16_e32 vcc_lo, 0x3800, v8
+; GFX11-NEXT:    v_perm_b32 v2, v7, v4, 0x5040100
+; GFX11-NEXT:    v_perm_b32 v1, v1, v3, 0x5040100
+; GFX11-NEXT:    v_cndmask_b32_e32 v6, 0x3d00, v9, vcc_lo
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT:    v_perm_b32 v3, v6, v5, 0x5040100
+; GFX11-NEXT:    s_setpc_b64 s[30:31]
+  br i1 %cond, label %T, label %F
+
+T:
+  %t = load volatile <16 x i16>, ptr addrspace(1) %p0
+  br label %exit
+
+F:
+  %f = load volatile <16 x i16>, ptr addrspace(1) %p1
+  br label %exit
+
+exit:
+  %m = phi <16 x i16> [ %t, %T ], [ %f, %F ]
+  %v2 = shufflevector <16 x i16> %m, <16 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %b2 = icmp ugt <8 x i16> %v2, <i16 u0x3800, i16 u0x3800, i16 u0x3800, i16 u0x3800, i16 u0x3800, i16 u0x3800, i16 u0x3800, i16 u0x3800>
+  %r2 = select <8 x i1> %b2, <8 x i16> <i16 u0x3900, i16 u0x3900, i16 u0x3900, i16 u0x3900, i16 u0x3900, i16 u0x3900, i16 u0x3900, i16 u0x3900>, <8 x i16> <i16 u0x3D00, i16 u0x3D00, i16 u0x3D00, i16 u0x3D00, i16 u0x3D00, i16 u0x3D00, i16 u0x3D00, i16 u0x3D00>
+  ret <8 x i16> %r2
+}
+
+define amdgpu_gfx <8 x half> @vec_16xf16_extract_8xf16_0(i1 inreg %cond, ptr addrspace(1) %p0, ptr addrspace(1) %p1) {
+; SI-LABEL: vec_16xf16_extract_8xf16_0:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    buffer_load_ubyte v4, off, s[0:3], s32
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_and_b32_e32 v4, 1, v4
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
+; SI-NEXT:    s_and_b64 s[34:35], vcc, exec
+; SI-NEXT:    s_mov_b32 s38, 0
+; SI-NEXT:    s_cbranch_scc0 .LBB8_2
+; SI-NEXT:  ; %bb.1: ; %F
+; SI-NEXT:    s_mov_b32 s39, 0xf000
+; SI-NEXT:    s_mov_b32 s36, s38
+; SI-NEXT:    s_mov_b32 s37, s38
+; SI-NEXT:    buffer_load_ushort v5, v[2:3], s[36:39], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v6, v[2:3], s[36:39], 0 addr64 offset:2 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v8, v[2:3], s[36:39], 0 addr64 offset:4 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v4, v[2:3], s[36:39], 0 addr64 offset:6 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v9, v[2:3], s[36:39], 0 addr64 offset:8 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v7, v[2:3], s[36:39], 0 addr64 offset:10 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v10, v[2:3], s[36:39], 0 addr64 offset:12 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v11, v[2:3], s[36:39], 0 addr64 offset:14 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v12, v[2:3], s[36:39], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v12, v[2:3], s[36:39], 0 addr64 offset:18 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v12, v[2:3], s[36:39], 0 addr64 offset:20 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v12, v[2:3], s[36:39], 0 addr64 offset:22 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v12, v[2:3], s[36:39], 0 addr64 offset:24 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v12, v[2:3], s[36:39], 0 addr64 offset:26 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v12, v[2:3], s[36:39], 0 addr64 offset:28 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v2, v[2:3], s[36:39], 0 addr64 offset:30 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v12, 16, v11
+; SI-NEXT:    v_lshlrev_b32_e32 v13, 16, v7
+; SI-NEXT:    v_lshlrev_b32_e32 v14, 16, v4
+; SI-NEXT:    v_lshlrev_b32_e32 v15, 16, v6
+; SI-NEXT:    v_cvt_f32_f16_e32 v2, v11
+; SI-NEXT:    v_cvt_f32_f16_e32 v3, v7
+; SI-NEXT:    v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT:    v_cvt_f32_f16_e32 v7, v6
+; SI-NEXT:    v_or_b32_e32 v6, v10, v12
+; SI-NEXT:    v_or_b32_e32 v9, v9, v13
+; SI-NEXT:    v_or_b32_e32 v8, v8, v14
+; SI-NEXT:    v_or_b32_e32 v10, v5, v15
+; SI-NEXT:    v_cvt_f32_f16_e32 v5, v6
+; SI-NEXT:    v_cvt_f32_f16_e32 v6, v9
+; SI-NEXT:    v_cvt_f32_f16_e32 v8, v8
+; SI-NEXT:    v_cvt_f32_f16_e32 v9, v10
+; SI-NEXT:    s_mov_b64 vcc, exec
+; SI-NEXT:    s_cbranch_execz .LBB8_3
+; SI-NEXT:    s_branch .LBB8_4
+; SI-NEXT:  .LBB8_2:
+; SI-NEXT:    ; implicit-def: $vgpr9
+; SI-NEXT:    ; implicit-def: $vgpr7
+; SI-NEXT:    ; implicit-def: $vgpr8
+; SI-NEXT:    ; implicit-def: $vgpr4
+; SI-NEXT:    ; implicit-def: $vgpr6
+; SI-NEXT:    ; implicit-def: $vgpr3
+; SI-NEXT:    ; implicit-def: $vgpr5
+; SI-NEXT:    ; implicit-def: $vgpr2
+; SI-NEXT:    s_mov_b64 vcc, 0
+; SI-NEXT:  .LBB8_3: ; %T
+; SI-NEXT:    s_mov_b32 s39, 0xf000
+; SI-NEXT:    s_mov_b32 s36, s38
+; SI-NEXT:    s_mov_b32 s37, s38
+; SI-NEXT:    buffer_load_ushort v5, v[0:1], s[36:39], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v7, v[0:1], s[36:39], 0 addr64 offset:2 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v6, v[0:1], s[36:39], 0 addr64 offset:4 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v4, v[0:1], s[36:39], 0 addr64 offset:6 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v8, v[0:1], s[36:39], 0 addr64 offset:8 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v3, v[0:1], s[36:39], 0 addr64 offset:10 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v9, v[0:1], s[36:39], 0 addr64 offset:12 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v2, v[0:1], s[36:39], 0 addr64 offset:14 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v10, v[0:1], s[36:39], 0 addr64 offset:16 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v10, v[0:1], s[36:39], 0 addr64 offset:18 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v10, v[0:1], s[36:39], 0 addr64 offset:20 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v10, v[0:1], s[36:39], 0 addr64 offset:22 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v10, v[0:1], s[36:39], 0 addr64 offset:24 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v10, v[0:1], s[36:39], 0 addr64 offset:26 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v10, v[0:1], s[36:39], 0 addr64 offset:28 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_ushort v0, v[0:1], s[36:39], 0 addr64 offset:30 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 16, v2
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v3
+; SI-NEXT:    v_lshlrev_b32_e32 v10, 16, v4
+; SI-NEXT:    v_lshlrev_b32_e32 v11, 16, v7
+; SI-NEXT:    v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT:    v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT:    v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT:    v_or_b32_e32 v0, v9, v0
+; SI-NEXT:    v_or_b32_e32 v1, v8, v1
+; SI-NEXT:    v_or_b32_e32 v8, v6, v10
+; SI-NEXT:    v_or_b32_e32 v9, v5, v11
+; SI-NEXT:    v_cvt_f32_f16_e32 v5, v0
+; SI-NEXT:    v_cvt_f32_f16_e32 v6, v1
+; SI-NEXT:    v_cvt_f32_f16_e32 v8, v8
+; SI-NEXT:    v_cvt_f32_f16_e32 v9, v9
+; SI-NEXT:    v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT:  .LBB8_4: ; %exit
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v9
+; SI-NEXT:    v_cvt_f16_f32_e32 v1, v7
+; SI-NEXT:    v_cvt_f16_f32_e32 v7, v8
+; SI-NEXT:    v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT:    v_cvt_f16_f32_e32 v6, v6
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT:    v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT:    v_mov_b32_e32 v8, 0x3fa00000
+; SI-NEXT:    v_mov_b32_e32 v9, 0x3f200000
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT:    v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT:    v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT:    v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT:    v_cvt_f32_f16_e32 v6, v6
+; SI-NEXT:    v_cvt_f32_f16_e32 v10, v3
+; SI-NEXT:    v_cvt_f32_f16_e32 v11, v5
+; SI-NEXT:    v_cvt_f32_f16_e32 v12, v2
+; SI-NEXT:    v_cmp_nge_f32_e32 vcc, 0.5, v0
+; SI-NEXT:    v_cndmask_b32_e32 v0, v8, v9, vcc
+; SI-NEXT:    v_cmp_nge_f32_e32 vcc, 0.5, v1
+; SI-NEXT:    v_cndmask_b32_e32 v1, v8, v9, vcc
+; SI-NEXT:    v_cmp_nge_f32_e32 vcc, 0.5, v7
+; SI-NEXT:    v_cndmask_b32_e32 v2, v8, v9, vcc
+; SI-NEXT:    v_cmp_nge_f32_e32 vcc, 0.5, v4
+; SI-NEXT:    v_cndmask_b32_e32 v3, v8, v9, vcc
+; SI-NEXT:    v_cmp_nge_f32_e32 vcc, 0.5, v6
+; SI-NEXT:    v_cndmask_b32_e32 v4, v8, v9, vcc
+; SI-NEXT:    v_cmp_nge_f32_e32 vcc, 0.5, v10
+; SI-NEXT:    v_cndmask_b32_e32 v5, v8, v9, vcc
+; SI-NEXT:    v_cmp_nge_f32_e32 vcc, 0.5, v11
+; SI-NEXT:    v_cndmask_b32_e32 v6, v8, v9, vcc
+; SI-NEXT:    v_cmp_nge_f32_e32 vcc, 0.5, v12
+; SI-NEXT:    v_cndmask_b32_e32 v7, v8, v9, vcc
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: vec_16xf16_extract_8xf16_0:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    buffer_load_ubyte v4, off, s[0:3], s32
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_and_b32_e32 v4, 1, v4
+; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
+; GFX9-NEXT:    s_and_b64 s[34:35], vcc, exec
+; GFX9-NEXT:    s_cbranch_scc0 .LBB8_2
+; GFX9-NEXT:  ; %bb.1: ; %F
+; GFX9-NEXT:    global_load_dwordx4 v[4:7], v[2:3], off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    global_load_dwordx4 v[4:7], v[2:3], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    ; kill: killed $vgpr2 killed $vgpr3
+; GFX9-NEXT:    s_cbranch_execz .LBB8_3
+; GFX9-NEXT:    s_branch .LBB8_4
+; GFX9-NEXT:  .LBB8_2:
+; GFX9-NEXT:    ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11
+; GFX9-NEXT:  .LBB8_3: ; %T
+; GFX9-NEXT:    global_load_dwordx4 v[2:5], v[0:1], off offset:16 glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    global_load_dwordx4 v[4:7], v[0:1], off glc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    ; kill: killed $vgpr0 killed $vgpr1
+; GFX9-NEXT:  .LBB8_4: ; %exit
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0x3800
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0x3900
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0x3d00
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_cmp_ge_f16_e32 vcc, 0.5, v7
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v2, vcc
+; GFX9-NEXT:    v_cmp_nle_f16_sdwa vcc, v7, v0 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT:    v_cndmask_b32_e32 v7, v2, v1, vcc
+; GFX9-NEXT:    v_cmp_ge_f16_e32 vcc, 0.5, v6
+; GFX9-NEXT:    v_cndmask_b32_e32 v8, v1, v2, vcc
+; GFX9-NEXT:    v_cmp_le_f16_sdwa vcc, v6, v0 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT:    v_cndmask_b32_e32 v6, v1, v2, vcc
+; GFX9-NEXT:    v_cmp_ge_f16_e32 vcc, 0.5, v5
+; GFX9-NEXT:    v_cndmask_b32_e32 v9, v1, v2, vcc
+; GFX9-NEXT:    v_cmp_le_f16_sdwa vcc, v5, v0 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT:    v_cndmask_b32_e32 v5, v1, v2, vcc
+; GFX9-NEXT:    v_cmp_ge_f16_e32 vcc, 0.5, v4
+; GFX9-NEXT:    v_cndmask_b32_e32 v10, v1, v2, vcc
+; GFX9-NEXT:    v_cmp_le_f16_sdwa vcc, v4, v0 src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
+; GFX9-NEXT:    v_pack_b32_f16 v0, v10, v0
+; GFX9-NEXT:    v_pack_b32_f16 v1, v9, v5
+; GFX9-NEXT:    v_pack_b32_f16 v2, v8, v6
+; GFX9-NEXT:    v_pack_b32_f16 v3, v3, v7
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: vec_16xf16_extract_8xf16_0:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    scratch_load_u8 v4, off, s32
+; GFX11-NEXT:    s_mov_b32 s0, 0
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    v_and_b32_e32 v4, 1, v4
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v4
+; GFX11-NEXT:    s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT:    s_cbranch_scc0 .LBB8_2
+; GFX11-NEXT:  ; %bb.1: ; %F
+; GFX11-NEXT:    global_load_b128 v[4:7], v[2:3], off offset:16 glc dlc
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    global_load_b128 v[2:5], v[2:3], off glc dlc
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT:    s_cbranch_vccz .LBB8_3
+; GFX11-NEXT:    s_branch .LBB8_4
+; GFX11-NEXT:  .LBB8_2:
+; GFX11-NEXT:    ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
+; GFX11-NEXT:  .LBB8_3: ; %T
+; GFX11-NEXT:    global_load_b128 v[2:5], v[0:1], off offset:16 glc dlc
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    global_load_b128 v[2:5], v[0:1], off glc dlc
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:  .LBB8_4: ; %exit
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    v_cmp_ge_f16_e32 vcc_lo, 0.5, v5
+; GFX11-NEXT:    v_mov_b32_e32 v9, 0x3900
+; GFX11-NEXT:    v_mov_b32_e32 v1, 0x3d00
+; GFX11-NEXT:    v_lshrrev_b32_e32 v7, 16, v4
+; GFX11-NEXT:    v_lshrrev_b32_e32 v8, 16, v5
+; GFX11-NEXT:    v_lshrrev_b32_e32 v0, 16, v2
+; GFX11-NEXT:    v_lshrrev_b32_e32 v6, 16, v3
+; GFX11-NEXT:    v_cndmask_b32_e32 v5, 0x3900, v1, vcc_lo
+; GFX11-NEXT:    v_cmp_ge_f16_e32 vcc_lo, 0.5, v4
+; GFX11-NEXT:    v_cndmask_b32_e32 v4, 0x3900, v1, vcc_lo
+; GFX11-NEXT:    v_cmp_ge_f16_e32 vcc_lo, 0.5, v7
+; GFX11-NEXT:    v_cndmask_b32_e32 v7, 0x3900, v1, vcc_lo
+; GFX11-NEXT:    v_cmp_ge_f16_e32 vcc_lo, 0.5, v3
+; GFX11-NEXT:    v_cndmask_b32_e32 v3, 0x3900, v1, vcc_lo
+; GFX11-NEXT:    v_cmp_ge_f16_e32 vcc_lo, 0.5, v2
+; GFX11-NEXT:    v_cndmask_b32_e32 v2, 0x3900, v1, vcc_lo
+; GFX11-NEXT:    v_cmp_ge_f16_e32 vcc_lo, 0.5, v0
+; GFX11-NEXT:    v_cndmask_b32_e32 v0, 0x3900, v1, vcc_lo
+; GFX11-NEXT:    v_cmp_ge_f16_e32 vcc_lo, 0.5, v6
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_pack_b32_f16 v0, v2, v0
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, 0x3900, v1, vcc_lo
+; GFX11-NEXT:    v_cmp_nge_f16_e32 vcc_lo, 0.5, v8
+; GFX11-NEXT:    v_pack_b32_f16 v2, v4, v7
+; GFX11-NEXT:    v_pack_b32_f16 v1, v3, v1
+; GFX11-NEXT:    v_cndmask_b32_e32 v6, 0x3d00, v9, vcc_lo
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT:    v_pack_b32_f16 v3, v5, v6
+; GFX11-NEXT:    s_setpc_b64 s[30:31]
+  br i1 %cond, label %T, label %F
+
+T:
+  %t = load volatile <16 x half>, ptr addrspace(1) %p0
+  br label %exit
+
+F:
+  %f = load volatile <16 x half>, ptr addrspace(1) %p1
+  br label %exit
+
+exit:
+  %m = phi <16 x half> [ %t, %T ], [ %f, %F ]
+  %v2 = shufflevector <16 x half> %m, <16 x half> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %b2 = fcmp ugt <8 x half> %v2, <half 0xH3800, half 0xH3800, half 0xH3800, half 0xH3800, half 0xH3800, half 0xH3800, half 0xH3800, half 0xH3800>
+  %r2 = select <8 x i1> %b2, <8 x half> <half 0xH3900, half 0xH3900, half 0xH3900, half 0xH3900, half 0xH3900, half 0xH3900, half 0xH3900, half 0xH3900>, <8 x half> <half 0xH3D00, half 0xH3D00, half 0xH3D00, half 0xH3D00, half 0xH3D00, half 0xH3D00, half 0xH3D00, half 0xH3D00>
+  ret <8 x half> %r2
+}


        


More information about the llvm-branch-commits mailing list