[llvm] 589b9df - [AMDGPU] Fix scalar_to_vector for v8i16/v8f16

via llvm-commits llvm-commits at lists.llvm.org
Mon May 2 19:31:08 PDT 2022


Author: hsmahesha
Date: 2022-05-03T07:28:15+05:30
New Revision: 589b9df4e15131348b8d94406e0667d6e1b7518b

URL: https://github.com/llvm/llvm-project/commit/589b9df4e15131348b8d94406e0667d6e1b7518b
DIFF: https://github.com/llvm/llvm-project/commit/589b9df4e15131348b8d94406e0667d6e1b7518b.diff

LOG: [AMDGPU] Fix scalar_to_vector for v8i16/v8f16

so that the stack access is avoided.

Reviewed By: rampitec

Differential Revision: https://reviews.llvm.org/D124734

Added: 
    llvm/test/CodeGen/AMDGPU/scalar_to_vector.v8i16.ll

Modified: 
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/lib/Target/AMDGPU/SIISelLowering.h
    llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 521d17faa6fff..a8310c2a0c275 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -737,7 +737,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::BUILD_VECTOR, Vec16, Custom);
       setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec16, Custom);
       setOperationAction(ISD::INSERT_VECTOR_ELT, Vec16, Expand);
-      setOperationAction(ISD::SCALAR_TO_VECTOR, Vec16, Expand);
+      setOperationAction(ISD::SCALAR_TO_VECTOR, Vec16, Custom);
     }
   }
 
@@ -4772,6 +4772,8 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
   case ISD::VECTOR_SHUFFLE:
     return lowerVECTOR_SHUFFLE(Op, DAG);
+  case ISD::SCALAR_TO_VECTOR:
+    return lowerSCALAR_TO_VECTOR(Op, DAG);
   case ISD::BUILD_VECTOR:
     return lowerBUILD_VECTOR(Op, DAG);
   case ISD::FP_ROUND:
@@ -5957,6 +5959,22 @@ SDValue SITargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
   return DAG.getNode(ISD::CONCAT_VECTORS, SL, ResultVT, Pieces);
 }
 
+SDValue SITargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op,
+                                                SelectionDAG &DAG) const {
+  SDValue SVal = Op.getOperand(0);
+  EVT ResultVT = Op.getValueType();
+  EVT SValVT = SVal.getValueType();
+  SDValue UndefVal = DAG.getUNDEF(SValVT);
+  SDLoc SL(Op);
+
+  SmallVector<SDValue, 8> VElts;
+  VElts.push_back(SVal);
+  for (int I = 1, E = ResultVT.getVectorNumElements(); I < E; ++I)
+    VElts.push_back(UndefVal);
+
+  return DAG.getBuildVector(ResultVT, SL, VElts);
+}
+
 SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
                                             SelectionDAG &DAG) const {
   SDLoc SL(Op);

diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index 6105fe6b0a1fa..18bb9fb0bb7e3 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -151,6 +151,7 @@ class SITargetLowering final : public AMDGPUTargetLowering {
   SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
 
   SDValue lowerTRAP(SDValue Op, SelectionDAG &DAG) const;

diff  --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll
index a4eb3a409c47f..129a2c2af8cb6 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll
@@ -1748,46 +1748,35 @@ define amdgpu_kernel void @v_insertelement_v4f16_dynamic_sgpr(<4 x half> addrspa
 define amdgpu_kernel void @v_insertelement_v8f16_3(<8 x half> addrspace(1)* %out, <8 x half> addrspace(1)* %in, i32 %val) {
 ; GFX9-LABEL: v_insertelement_v8f16_3:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x0
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
 ; GFX9-NEXT:    s_load_dword s6, s[4:5], 0x10
-; GFX9-NEXT:    s_add_u32 s0, s0, s7
-; GFX9-NEXT:    s_addc_u32 s1, s1, 0
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    global_load_dwordx4 v[0:3], v4, s[10:11]
-; GFX9-NEXT:    v_mov_b32_e32 v5, s6
-; GFX9-NEXT:    buffer_store_short v5, off, s[0:3], 0 offset:16
-; GFX9-NEXT:    buffer_load_dword v5, off, s[0:3], 0 offset:16
-; GFX9-NEXT:    s_waitcnt vmcnt(2)
-; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX9-NEXT:    global_load_dwordx4 v[0:3], v4, s[2:3]
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_lshl_or_b32 v1, v5, 16, v1
-; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[8:9]
+; GFX9-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX9-NEXT:    v_lshl_or_b32 v1, s6, 16, v1
+; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[0:1]
 ; GFX9-NEXT:    s_endpgm
 ;
 ; VI-LABEL: v_insertelement_v8f16_3:
 ; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
 ; VI-NEXT:    s_load_dword s4, s[4:5], 0x10
 ; VI-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
-; VI-NEXT:    s_add_u32 s0, s0, s7
-; VI-NEXT:    s_addc_u32 s1, s1, 0
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s11
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s10, v4
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
 ; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v5, s4
 ; VI-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
-; VI-NEXT:    buffer_store_short v5, off, s[0:3], 0 offset:16
-; VI-NEXT:    buffer_load_dword v6, off, s[0:3], 0 offset:16
-; VI-NEXT:    v_mov_b32_e32 v5, s9
-; VI-NEXT:    v_add_u32_e32 v4, vcc, s8, v4
-; VI-NEXT:    s_mov_b32 s4, 0xffff
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    s_lshl_b32 s1, s4, 16
+; VI-NEXT:    s_mov_b32 s2, 0xffff
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s0, v4
+; VI-NEXT:    v_mov_b32_e32 v6, s1
 ; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; VI-NEXT:    s_waitcnt vmcnt(2)
-; VI-NEXT:    v_bfi_b32 v3, s4, v3, v3
 ; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_lshlrev_b32_e32 v6, 16, v6
+; VI-NEXT:    v_bfi_b32 v3, s2, v3, v3
 ; VI-NEXT:    v_or_b32_sdwa v1, v1, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; VI-NEXT:    s_endpgm
@@ -1826,45 +1815,35 @@ define amdgpu_kernel void @v_insertelement_v8f16_3(<8 x half> addrspace(1)* %out
 define amdgpu_kernel void @v_insertelement_v8i16_6(<8 x i16> addrspace(1)* %out, <8 x i16> addrspace(1)* %in, i32 %val) {
 ; GFX9-LABEL: v_insertelement_v8i16_6:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x0
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
 ; GFX9-NEXT:    s_load_dword s6, s[4:5], 0x10
-; GFX9-NEXT:    s_add_u32 s0, s0, s7
-; GFX9-NEXT:    s_addc_u32 s1, s1, 0
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
+; GFX9-NEXT:    v_mov_b32_e32 v5, 0xffff
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    global_load_dwordx4 v[0:3], v4, s[10:11]
-; GFX9-NEXT:    v_mov_b32_e32 v5, s6
-; GFX9-NEXT:    buffer_store_short v5, off, s[0:3], 0 offset:16
-; GFX9-NEXT:    buffer_load_dword v5, off, s[0:3], 0 offset:16
-; GFX9-NEXT:    v_mov_b32_e32 v6, 0xffff
+; GFX9-NEXT:    global_load_dwordx4 v[0:3], v4, s[2:3]
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_bfi_b32 v3, v6, v5, v3
-; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[8:9]
+; GFX9-NEXT:    v_bfi_b32 v3, v5, s6, v3
+; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[0:1]
 ; GFX9-NEXT:    s_endpgm
 ;
 ; VI-LABEL: v_insertelement_v8i16_6:
 ; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x0
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
 ; VI-NEXT:    s_load_dword s4, s[4:5], 0x10
 ; VI-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
-; VI-NEXT:    s_add_u32 s0, s0, s7
-; VI-NEXT:    s_addc_u32 s1, s1, 0
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s11
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s10, v4
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
 ; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v5, s4
 ; VI-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
-; VI-NEXT:    buffer_store_short v5, off, s[0:3], 0 offset:16
-; VI-NEXT:    buffer_load_dword v6, off, s[0:3], 0 offset:16
-; VI-NEXT:    s_mov_b32 s4, 0xffff
-; VI-NEXT:    v_mov_b32_e32 v5, s9
-; VI-NEXT:    v_add_u32_e32 v4, vcc, s8, v4
+; VI-NEXT:    s_mov_b32 s2, 0xffff
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_mov_b32_e32 v6, s4
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s0, v4
 ; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; VI-NEXT:    s_waitcnt vmcnt(2)
-; VI-NEXT:    v_bfi_b32 v1, s4, v1, v1
 ; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_bfi_b32 v3, s4, v6, v3
+; VI-NEXT:    v_bfi_b32 v3, s2, v6, v3
+; VI-NEXT:    v_bfi_b32 v1, s2, v1, v1
 ; VI-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; VI-NEXT:    s_endpgm
 ;

diff  --git a/llvm/test/CodeGen/AMDGPU/scalar_to_vector.v8i16.ll b/llvm/test/CodeGen/AMDGPU/scalar_to_vector.v8i16.ll
new file mode 100644
index 0000000000000..d4ca078505a33
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/scalar_to_vector.v8i16.ll
@@ -0,0 +1,186 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX900 %s
+; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 < %s | FileCheck -check-prefixes=GFX906 %s
+; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 < %s | FileCheck -check-prefixes=GFX908 %s
+; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a < %s | FileCheck -check-prefixes=GFX90A %s
+
+define amdgpu_kernel void @scalar_to_vector_v8i16(<2 x i32> %in, <8 x i16>* %out) #0 {
+; GFX900-LABEL: scalar_to_vector_v8i16:
+; GFX900:       ; %bb.0: ; %entry
+; GFX900-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX900-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x8
+; GFX900-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    s_pack_lh_b32_b16 s4, s0, s0
+; GFX900-NEXT:    v_mov_b32_e32 v6, s3
+; GFX900-NEXT:    v_add_co_u32_e32 v5, vcc, s2, v0
+; GFX900-NEXT:    v_mov_b32_e32 v2, s1
+; GFX900-NEXT:    v_mov_b32_e32 v4, s0
+; GFX900-NEXT:    v_mov_b32_e32 v1, s4
+; GFX900-NEXT:    v_mov_b32_e32 v3, s4
+; GFX900-NEXT:    v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; GFX900-NEXT:    flat_store_dwordx4 v[5:6], v[1:4]
+; GFX900-NEXT:    s_endpgm
+;
+; GFX906-LABEL: scalar_to_vector_v8i16:
+; GFX906:       ; %bb.0: ; %entry
+; GFX906-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX906-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x8
+; GFX906-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX906-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX906-NEXT:    s_pack_lh_b32_b16 s4, s0, s0
+; GFX906-NEXT:    v_mov_b32_e32 v6, s3
+; GFX906-NEXT:    v_add_co_u32_e32 v5, vcc, s2, v0
+; GFX906-NEXT:    v_mov_b32_e32 v2, s1
+; GFX906-NEXT:    v_mov_b32_e32 v4, s0
+; GFX906-NEXT:    v_mov_b32_e32 v1, s4
+; GFX906-NEXT:    v_mov_b32_e32 v3, s4
+; GFX906-NEXT:    v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; GFX906-NEXT:    flat_store_dwordx4 v[5:6], v[1:4]
+; GFX906-NEXT:    s_endpgm
+;
+; GFX908-LABEL: scalar_to_vector_v8i16:
+; GFX908:       ; %bb.0: ; %entry
+; GFX908-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX908-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x8
+; GFX908-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX908-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX908-NEXT:    s_pack_lh_b32_b16 s4, s0, s0
+; GFX908-NEXT:    v_mov_b32_e32 v6, s3
+; GFX908-NEXT:    v_add_co_u32_e32 v5, vcc, s2, v0
+; GFX908-NEXT:    v_mov_b32_e32 v2, s1
+; GFX908-NEXT:    v_mov_b32_e32 v4, s0
+; GFX908-NEXT:    v_mov_b32_e32 v1, s4
+; GFX908-NEXT:    v_mov_b32_e32 v3, s4
+; GFX908-NEXT:    v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; GFX908-NEXT:    flat_store_dwordx4 v[5:6], v[1:4]
+; GFX908-NEXT:    s_endpgm
+;
+; GFX90A-LABEL: scalar_to_vector_v8i16:
+; GFX90A:       ; %bb.0: ; %entry
+; GFX90A-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90A-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x8
+; GFX90A-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT:    s_pack_lh_b32_b16 s4, s0, s0
+; GFX90A-NEXT:    v_mov_b32_e32 v1, s3
+; GFX90A-NEXT:    v_add_co_u32_e32 v0, vcc, s2, v0
+; GFX90A-NEXT:    v_mov_b32_e32 v3, s1
+; GFX90A-NEXT:    v_mov_b32_e32 v5, s0
+; GFX90A-NEXT:    v_mov_b32_e32 v2, s4
+; GFX90A-NEXT:    v_mov_b32_e32 v4, s4
+; GFX90A-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX90A-NEXT:    flat_store_dwordx4 v[0:1], v[2:5]
+; GFX90A-NEXT:    s_endpgm
+entry:
+  %val.1.i32 = extractelement <2 x i32> %in, i64 0
+  %val.2.vec2.i16 = bitcast i32 %val.1.i32 to <2 x i16>
+  %val.3.vec8.i16 = shufflevector <2 x i16> %val.2.vec2.i16, <2 x i16> %val.2.vec2.i16, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+
+  %val.4.vec4.i32 = shufflevector <2 x i32> %in, <2 x i32> %in, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %val.5.vec8.i16 = bitcast <4 x i32> %val.4.vec4.i32 to <8 x i16>
+
+  %val.6.vec8.i16 = shufflevector <8 x i16> %val.5.vec8.i16, <8 x i16> %val.3.vec8.i16, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
+
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %tid.ext = sext i32 %tid to i64
+  %out.gep = getelementptr inbounds <8 x i16>, <8 x i16>* %out, i64 %tid.ext
+  store <8 x i16> %val.6.vec8.i16, <8 x i16>* %out.gep, align 16
+
+  ret void
+}
+
+define amdgpu_kernel void @scalar_to_vector_v8f16(<2 x float> %in, <8 x half>* %out) #0 {
+; GFX900-LABEL: scalar_to_vector_v8f16:
+; GFX900:       ; %bb.0: ; %entry
+; GFX900-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX900-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x8
+; GFX900-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX900-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX900-NEXT:    s_lshr_b32 s4, s0, 16
+; GFX900-NEXT:    v_mov_b32_e32 v3, s0
+; GFX900-NEXT:    s_pack_ll_b32_b16 s0, s0, s4
+; GFX900-NEXT:    v_mov_b32_e32 v6, s3
+; GFX900-NEXT:    v_add_co_u32_e32 v5, vcc, s2, v0
+; GFX900-NEXT:    v_mov_b32_e32 v2, s1
+; GFX900-NEXT:    v_mov_b32_e32 v1, s0
+; GFX900-NEXT:    v_mov_b32_e32 v4, s0
+; GFX900-NEXT:    v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; GFX900-NEXT:    flat_store_dwordx4 v[5:6], v[1:4]
+; GFX900-NEXT:    s_endpgm
+;
+; GFX906-LABEL: scalar_to_vector_v8f16:
+; GFX906:       ; %bb.0: ; %entry
+; GFX906-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX906-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x8
+; GFX906-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX906-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX906-NEXT:    s_lshr_b32 s4, s0, 16
+; GFX906-NEXT:    v_mov_b32_e32 v3, s0
+; GFX906-NEXT:    s_pack_ll_b32_b16 s0, s0, s4
+; GFX906-NEXT:    v_mov_b32_e32 v6, s3
+; GFX906-NEXT:    v_add_co_u32_e32 v5, vcc, s2, v0
+; GFX906-NEXT:    v_mov_b32_e32 v2, s1
+; GFX906-NEXT:    v_mov_b32_e32 v1, s0
+; GFX906-NEXT:    v_mov_b32_e32 v4, s0
+; GFX906-NEXT:    v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; GFX906-NEXT:    flat_store_dwordx4 v[5:6], v[1:4]
+; GFX906-NEXT:    s_endpgm
+;
+; GFX908-LABEL: scalar_to_vector_v8f16:
+; GFX908:       ; %bb.0: ; %entry
+; GFX908-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX908-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x8
+; GFX908-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX908-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX908-NEXT:    s_lshr_b32 s4, s0, 16
+; GFX908-NEXT:    v_mov_b32_e32 v3, s0
+; GFX908-NEXT:    s_pack_ll_b32_b16 s0, s0, s4
+; GFX908-NEXT:    v_mov_b32_e32 v6, s3
+; GFX908-NEXT:    v_add_co_u32_e32 v5, vcc, s2, v0
+; GFX908-NEXT:    v_mov_b32_e32 v2, s1
+; GFX908-NEXT:    v_mov_b32_e32 v1, s0
+; GFX908-NEXT:    v_mov_b32_e32 v4, s0
+; GFX908-NEXT:    v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; GFX908-NEXT:    flat_store_dwordx4 v[5:6], v[1:4]
+; GFX908-NEXT:    s_endpgm
+;
+; GFX90A-LABEL: scalar_to_vector_v8f16:
+; GFX90A:       ; %bb.0: ; %entry
+; GFX90A-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90A-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x8
+; GFX90A-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT:    s_lshr_b32 s4, s0, 16
+; GFX90A-NEXT:    v_mov_b32_e32 v4, s0
+; GFX90A-NEXT:    s_pack_ll_b32_b16 s0, s0, s4
+; GFX90A-NEXT:    v_mov_b32_e32 v1, s3
+; GFX90A-NEXT:    v_add_co_u32_e32 v0, vcc, s2, v0
+; GFX90A-NEXT:    v_mov_b32_e32 v3, s1
+; GFX90A-NEXT:    v_mov_b32_e32 v2, s0
+; GFX90A-NEXT:    v_mov_b32_e32 v5, s0
+; GFX90A-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX90A-NEXT:    flat_store_dwordx4 v[0:1], v[2:5]
+; GFX90A-NEXT:    s_endpgm
+entry:
+  %val.1.float = extractelement <2 x float> %in, i64 0
+  %val.2.vec2.half = bitcast float %val.1.float to <2 x half>
+  %val.3.vec8.half = shufflevector <2 x half> %val.2.vec2.half, <2 x half> %val.2.vec2.half, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+
+  %val.4.vec4.float = shufflevector <2 x float> %in, <2 x float> %in, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %val.5.vec8.half = bitcast <4 x float> %val.4.vec4.float to <8 x half>
+
+  %val.6.vec8.half = shufflevector <8 x half> %val.5.vec8.half, <8 x half> %val.3.vec8.half, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9>
+
+  %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+  %tid.ext = sext i32 %tid to i64
+  %out.gep = getelementptr inbounds <8 x half>, <8 x half>* %out, i64 %tid.ext
+  store <8 x half> %val.6.vec8.half, <8 x half>* %out.gep, align 16
+
+  ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }


        


More information about the llvm-commits mailing list