[llvm] 9681901 - AMDGPU/GlobalISel: Fix RegBankSelect for v2s16 shifts

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Sat Apr 11 17:55:46 PDT 2020


Author: Matt Arsenault
Date: 2020-04-11T20:55:33-04:00
New Revision: 96819011caa8e590b6455be485a0f5711c7513e7

URL: https://github.com/llvm/llvm-project/commit/96819011caa8e590b6455be485a0f5711c7513e7
DIFF: https://github.com/llvm/llvm-project/commit/96819011caa8e590b6455be485a0f5711c7513e7.diff

LOG: AMDGPU/GlobalISel: Fix RegBankSelect for v2s16 shifts

These need to be promoted and scalarized for the SALU.

Added: 
    llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/shl.ll

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-ashr.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-lshr.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-shl.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index adfcecd2329b..db88b4ffb580 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -2074,11 +2074,16 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
   }
   case AMDGPU::G_ADD:
   case AMDGPU::G_SUB:
-  case AMDGPU::G_MUL: {
+  case AMDGPU::G_MUL:
+  case AMDGPU::G_SHL:
+  case AMDGPU::G_LSHR:
+  case AMDGPU::G_ASHR: {
     Register DstReg = MI.getOperand(0).getReg();
     LLT DstTy = MRI.getType(DstReg);
-    const LLT S32 = LLT::scalar(32);
-    if (DstTy == S32)
+
+    // 16-bit operations are VALU only, but can be promoted to 32-bit SALU.
+    // Packed 16-bit operations need to be scalarized and promoted.
+    if (DstTy != LLT::scalar(16) && DstTy != LLT::vector(2, 16))
       break;
 
     const RegisterBank *DstBank =
@@ -2086,9 +2091,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
     if (DstBank == &AMDGPU::VGPRRegBank)
       break;
 
-    // 16-bit operations are VALU only, but can be promoted to 32-bit SALU.
-    // Packed 16-bit operations need to be scalarized and promoted.
-
+    const LLT S32 = LLT::scalar(32);
     MachineFunction *MF = MI.getParent()->getParent();
     MachineIRBuilder B(MI);
     ApplyRegBankMapping ApplySALU(*this, MRI, &AMDGPU::SGPRRegBank);
@@ -2113,6 +2116,13 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
 
       if (Helper.widenScalar(MI, 0, S32) != LegalizerHelper::Legalized)
         llvm_unreachable("widen scalar should have succeeded");
+
+      // FIXME: s16 shift amounts should be lgeal.
+      if (Opc == AMDGPU::G_SHL || Opc == AMDGPU::G_LSHR ||
+          Opc == AMDGPU::G_ASHR) {
+        if (Helper.widenScalar(MI, 1, S32) != LegalizerHelper::Legalized)
+          llvm_unreachable("widen scalar should have succeeded");
+      }
     }
 
     return;

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll
new file mode 100644
index 000000000000..71ee562f0ecc
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll
@@ -0,0 +1,1224 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=tahiti < %s | FileCheck -check-prefixes=GCN,GFX6 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=fiji < %s | FileCheck -check-prefixes=GCN,GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+
+define i8 @v_ashr_i8(i8 %value, i8 %amount) {
+; GFX6-LABEL: v_ashr_i8:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GFX6-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX6-NEXT:    v_ashrrev_i32_e32 v0, v1, v0
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ashr_i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT:    v_ashrrev_i16_sdwa v0, v1, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ashr_i8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_ashrrev_i16_sdwa v0, v1, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr i8 %value, %amount
+  ret i8 %result
+}
+
+define i8 @v_ashr_i8_7(i8 %value) {
+; GFX6-LABEL: v_ashr_i8_7:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_bfe_i32 v0, v0, 0, 8
+; GFX6-NEXT:    v_ashrrev_i32_e32 v0, 7, v0
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ashr_i8_7:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT:    v_mov_b32_e32 v1, 7
+; GFX8-NEXT:    v_ashrrev_i16_sdwa v0, v1, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ashr_i8_7:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    s_mov_b32 s4, 7
+; GFX9-NEXT:    v_ashrrev_i16_sdwa v0, s4, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr i8 %value, 7
+  ret i8 %result
+}
+
+define amdgpu_ps i8 @s_ashr_i8(i8 inreg %value, i8 inreg %amount) {
+; GFX6-LABEL: s_ashr_i8:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_and_b32 s1, s1, 0xff
+; GFX6-NEXT:    s_sext_i32_i8 s0, s0
+; GFX6-NEXT:    s_ashr_i32 s0, s0, s1
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_ashr_i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_sext_i32_i8 s0, s0
+; GFX8-NEXT:    s_sext_i32_i8 s1, s1
+; GFX8-NEXT:    s_ashr_i32 s0, s0, s1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_ashr_i8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_sext_i32_i8 s0, s0
+; GFX9-NEXT:    s_sext_i32_i8 s1, s1
+; GFX9-NEXT:    s_ashr_i32 s0, s0, s1
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = ashr i8 %value, %amount
+  ret i8 %result
+}
+
+define amdgpu_ps i8 @s_ashr_i8_7(i8 inreg %value) {
+; GCN-LABEL: s_ashr_i8_7:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_sext_i32_i8 s0, s0
+; GCN-NEXT:    s_ashr_i32 s0, s0, 7
+; GCN-NEXT:    ; return to shader part epilog
+  %result = ashr i8 %value, 7
+  ret i8 %result
+}
+
+
+define i24 @v_ashr_i24(i24 %value, i24 %amount) {
+; GCN-LABEL: v_ashr_i24:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v1, 0xffffff, v1
+; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-NEXT:    v_ashrrev_i32_e32 v0, v1, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr i24 %value, %amount
+  ret i24 %result
+}
+
+define i24 @v_ashr_i24_7(i24 %value) {
+; GCN-LABEL: v_ashr_i24_7:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-NEXT:    v_ashrrev_i32_e32 v0, 7, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr i24 %value, 7
+  ret i24 %result
+}
+
+define amdgpu_ps i24 @s_ashr_i24(i24 inreg %value, i24 inreg %amount) {
+; GCN-LABEL: s_ashr_i24:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_and_b32 s1, s1, 0xffffff
+; GCN-NEXT:    s_bfe_i32 s0, s0, 0x180000
+; GCN-NEXT:    s_ashr_i32 s0, s0, s1
+; GCN-NEXT:    ; return to shader part epilog
+  %result = ashr i24 %value, %amount
+  ret i24 %result
+}
+
+define amdgpu_ps i24 @s_ashr_i24_7(i24 inreg %value) {
+; GCN-LABEL: s_ashr_i24_7:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_bfe_i32 s0, s0, 0x180000
+; GCN-NEXT:    s_ashr_i32 s0, s0, 7
+; GCN-NEXT:    ; return to shader part epilog
+  %result = ashr i24 %value, 7
+  ret i24 %result
+}
+
+define i32 @v_ashr_i32(i32 %value, i32 %amount) {
+; GCN-LABEL: v_ashr_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_ashrrev_i32_e32 v0, v1, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr i32 %value, %amount
+  ret i32 %result
+}
+
+define i32 @v_ashr_i32_31(i32 %value) {
+; GCN-LABEL: v_ashr_i32_31:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_ashrrev_i32_e32 v0, 31, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr i32 %value, 31
+  ret i32 %result
+}
+
+define amdgpu_ps i32 @s_ashr_i32(i32 inreg %value, i32 inreg %amount) {
+; GCN-LABEL: s_ashr_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_ashr_i32 s0, s0, s1
+; GCN-NEXT:    ; return to shader part epilog
+  %result = ashr i32 %value, %amount
+  ret i32 %result
+}
+
+define amdgpu_ps i32 @s_ashr_i32_31(i32 inreg %value) {
+; GCN-LABEL: s_ashr_i32_31:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_ashr_i32 s0, s0, 31
+; GCN-NEXT:    ; return to shader part epilog
+  %result = ashr i32 %value, 31
+  ret i32 %result
+}
+
+define amdgpu_ps float @ashr_i32_sv(i32 inreg %value, i32 %amount) {
+; GFX6-LABEL: ashr_i32_sv:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    v_ashr_i32_e32 v0, s0, v0
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: ashr_i32_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_ashrrev_i32_e64 v0, v0, s0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: ashr_i32_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_ashrrev_i32_e64 v0, v0, s0
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = ashr i32 %value, %amount
+  %cast = bitcast i32 %result to float
+  ret float %cast
+}
+
+define amdgpu_ps float @ashr_i32_vs(i32 %value, i32 inreg %amount) {
+; GCN-LABEL: ashr_i32_vs:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_ashrrev_i32_e32 v0, s0, v0
+; GCN-NEXT:    ; return to shader part epilog
+  %result = ashr i32 %value, %amount
+  %cast = bitcast i32 %result to float
+  ret float %cast
+}
+
+define <2 x i32> @v_ashr_v2i32(<2 x i32> %value, <2 x i32> %amount) {
+; GCN-LABEL: v_ashr_v2i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_ashrrev_i32_e32 v0, v2, v0
+; GCN-NEXT:    v_ashrrev_i32_e32 v1, v3, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr <2 x i32> %value, %amount
+  ret <2 x i32> %result
+}
+
+define <2 x i32> @v_ashr_v2i32_31(<2 x i32> %value) {
+; GCN-LABEL: v_ashr_v2i32_31:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_ashrrev_i32_e32 v0, 31, v0
+; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr <2 x i32> %value, <i32 31, i32 31>
+  ret <2 x i32> %result
+}
+
+define amdgpu_ps <2 x i32> @s_ashr_v2i32(<2 x i32> inreg %value, <2 x i32> inreg %amount) {
+; GCN-LABEL: s_ashr_v2i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_ashr_i32 s0, s0, s2
+; GCN-NEXT:    s_ashr_i32 s1, s1, s3
+; GCN-NEXT:    ; return to shader part epilog
+  %result = ashr <2 x i32> %value, %amount
+  ret <2 x i32> %result
+}
+
+define <3 x i32> @v_ashr_v3i32(<3 x i32> %value, <3 x i32> %amount) {
+; GCN-LABEL: v_ashr_v3i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_ashrrev_i32_e32 v0, v3, v0
+; GCN-NEXT:    v_ashrrev_i32_e32 v1, v4, v1
+; GCN-NEXT:    v_ashrrev_i32_e32 v2, v5, v2
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr <3 x i32> %value, %amount
+  ret <3 x i32> %result
+}
+
+define amdgpu_ps <3 x i32> @s_ashr_v3i32(<3 x i32> inreg %value, <3 x i32> inreg %amount) {
+; GCN-LABEL: s_ashr_v3i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_ashr_i32 s0, s0, s3
+; GCN-NEXT:    s_ashr_i32 s1, s1, s4
+; GCN-NEXT:    s_ashr_i32 s2, s2, s5
+; GCN-NEXT:    ; return to shader part epilog
+  %result = ashr <3 x i32> %value, %amount
+  ret <3 x i32> %result
+}
+
+define <4 x i32> @v_ashr_v4i32(<4 x i32> %value, <4 x i32> %amount) {
+; GCN-LABEL: v_ashr_v4i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_ashrrev_i32_e32 v0, v4, v0
+; GCN-NEXT:    v_ashrrev_i32_e32 v1, v5, v1
+; GCN-NEXT:    v_ashrrev_i32_e32 v2, v6, v2
+; GCN-NEXT:    v_ashrrev_i32_e32 v3, v7, v3
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr <4 x i32> %value, %amount
+  ret <4 x i32> %result
+}
+
+define amdgpu_ps <4 x i32> @s_ashr_v4i32(<4 x i32> inreg %value, <4 x i32> inreg %amount) {
+; GCN-LABEL: s_ashr_v4i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_ashr_i32 s0, s0, s4
+; GCN-NEXT:    s_ashr_i32 s1, s1, s5
+; GCN-NEXT:    s_ashr_i32 s2, s2, s6
+; GCN-NEXT:    s_ashr_i32 s3, s3, s7
+; GCN-NEXT:    ; return to shader part epilog
+  %result = ashr <4 x i32> %value, %amount
+  ret <4 x i32> %result
+}
+
+define <5 x i32> @v_ashr_v5i32(<5 x i32> %value, <5 x i32> %amount) {
+; GCN-LABEL: v_ashr_v5i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_ashrrev_i32_e32 v0, v5, v0
+; GCN-NEXT:    v_ashrrev_i32_e32 v1, v6, v1
+; GCN-NEXT:    v_ashrrev_i32_e32 v2, v7, v2
+; GCN-NEXT:    v_ashrrev_i32_e32 v3, v8, v3
+; GCN-NEXT:    v_ashrrev_i32_e32 v4, v9, v4
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr <5 x i32> %value, %amount
+  ret <5 x i32> %result
+}
+
+define amdgpu_ps <5 x i32> @s_ashr_v5i32(<5 x i32> inreg %value, <5 x i32> inreg %amount) {
+; GCN-LABEL: s_ashr_v5i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_ashr_i32 s0, s0, s5
+; GCN-NEXT:    s_ashr_i32 s1, s1, s6
+; GCN-NEXT:    s_ashr_i32 s2, s2, s7
+; GCN-NEXT:    s_ashr_i32 s3, s3, s8
+; GCN-NEXT:    s_ashr_i32 s4, s4, s9
+; GCN-NEXT:    ; return to shader part epilog
+  %result = ashr <5 x i32> %value, %amount
+  ret <5 x i32> %result
+}
+
+define <16 x i32> @v_ashr_v16i32(<16 x i32> %value, <16 x i32> %amount) {
+; GCN-LABEL: v_ashr_v16i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_ashrrev_i32_e32 v0, v16, v0
+; GCN-NEXT:    v_ashrrev_i32_e32 v1, v17, v1
+; GCN-NEXT:    v_ashrrev_i32_e32 v2, v18, v2
+; GCN-NEXT:    v_ashrrev_i32_e32 v3, v19, v3
+; GCN-NEXT:    v_ashrrev_i32_e32 v4, v20, v4
+; GCN-NEXT:    v_ashrrev_i32_e32 v5, v21, v5
+; GCN-NEXT:    v_ashrrev_i32_e32 v6, v22, v6
+; GCN-NEXT:    v_ashrrev_i32_e32 v7, v23, v7
+; GCN-NEXT:    v_ashrrev_i32_e32 v8, v24, v8
+; GCN-NEXT:    v_ashrrev_i32_e32 v9, v25, v9
+; GCN-NEXT:    v_ashrrev_i32_e32 v10, v26, v10
+; GCN-NEXT:    v_ashrrev_i32_e32 v11, v27, v11
+; GCN-NEXT:    v_ashrrev_i32_e32 v12, v28, v12
+; GCN-NEXT:    v_ashrrev_i32_e32 v13, v29, v13
+; GCN-NEXT:    v_ashrrev_i32_e32 v14, v30, v14
+; GCN-NEXT:    v_ashrrev_i32_e32 v15, v31, v15
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr <16 x i32> %value, %amount
+  ret <16 x i32> %result
+}
+
+define amdgpu_ps <16 x i32> @s_ashr_v16i32(<16 x i32> inreg %value, <16 x i32> inreg %amount) {
+; GCN-LABEL: s_ashr_v16i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_ashr_i32 s0, s0, s16
+; GCN-NEXT:    s_ashr_i32 s1, s1, s17
+; GCN-NEXT:    s_ashr_i32 s2, s2, s18
+; GCN-NEXT:    s_ashr_i32 s3, s3, s19
+; GCN-NEXT:    s_ashr_i32 s4, s4, s20
+; GCN-NEXT:    s_ashr_i32 s5, s5, s21
+; GCN-NEXT:    s_ashr_i32 s6, s6, s22
+; GCN-NEXT:    s_ashr_i32 s7, s7, s23
+; GCN-NEXT:    s_ashr_i32 s8, s8, s24
+; GCN-NEXT:    s_ashr_i32 s9, s9, s25
+; GCN-NEXT:    s_ashr_i32 s10, s10, s26
+; GCN-NEXT:    s_ashr_i32 s11, s11, s27
+; GCN-NEXT:    s_ashr_i32 s12, s12, s28
+; GCN-NEXT:    s_ashr_i32 s13, s13, s29
+; GCN-NEXT:    s_ashr_i32 s14, s14, s30
+; GCN-NEXT:    s_ashr_i32 s15, s15, s31
+; GCN-NEXT:    ; return to shader part epilog
+  %result = ashr <16 x i32> %value, %amount
+  ret <16 x i32> %result
+}
+
+define i16 @v_ashr_i16(i16 %value, i16 %amount) {
+; GFX6-LABEL: v_ashr_i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX6-NEXT:    v_bfe_i32 v0, v0, 0, 16
+; GFX6-NEXT:    v_ashrrev_i32_e32 v0, v1, v0
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ashr_i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_ashrrev_i16_e32 v0, v1, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ashr_i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_ashrrev_i16_e32 v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr i16 %value, %amount
+  ret i16 %result
+}
+
+define i16 @v_ashr_i16_31(i16 %value) {
+; GCN-LABEL: v_ashr_i16_31:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr i16 %value, 31
+  ret i16 %result
+}
+
+define amdgpu_ps i16 @s_ashr_i16(i16 inreg %value, i16 inreg %amount) {
+; GFX6-LABEL: s_ashr_i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_and_b32 s1, s1, 0xffff
+; GFX6-NEXT:    s_sext_i32_i16 s0, s0
+; GFX6-NEXT:    s_ashr_i32 s0, s0, s1
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_ashr_i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_sext_i32_i16 s0, s0
+; GFX8-NEXT:    s_sext_i32_i16 s1, s1
+; GFX8-NEXT:    s_ashr_i32 s0, s0, s1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_ashr_i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_sext_i32_i16 s0, s0
+; GFX9-NEXT:    s_sext_i32_i16 s1, s1
+; GFX9-NEXT:    s_ashr_i32 s0, s0, s1
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = ashr i16 %value, %amount
+  ret i16 %result
+}
+
+define amdgpu_ps i16 @s_ashr_i16_15(i16 inreg %value) {
+; GCN-LABEL: s_ashr_i16_15:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_sext_i32_i16 s0, s0
+; GCN-NEXT:    s_ashr_i32 s0, s0, 15
+; GCN-NEXT:    ; return to shader part epilog
+  %result = ashr i16 %value, 15
+  ret i16 %result
+}
+
+define amdgpu_ps half @ashr_i16_sv(i16 inreg %value, i16 %amount) {
+; GFX6-LABEL: ashr_i16_sv:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX6-NEXT:    s_sext_i32_i16 s0, s0
+; GFX6-NEXT:    v_ashr_i32_e32 v0, s0, v0
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: ashr_i16_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_ashrrev_i16_e64 v0, v0, s0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: ashr_i16_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_ashrrev_i16_e64 v0, v0, s0
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = ashr i16 %value, %amount
+  %cast = bitcast i16 %result to half
+  ret half %cast
+}
+
+define amdgpu_ps half @ashr_i16_vs(i16 %value, i16 inreg %amount) {
+; GFX6-LABEL: ashr_i16_vs:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_and_b32 s0, s0, 0xffff
+; GFX6-NEXT:    v_bfe_i32 v0, v0, 0, 16
+; GFX6-NEXT:    v_ashrrev_i32_e32 v0, s0, v0
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: ashr_i16_vs:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_ashrrev_i16_e32 v0, s0, v0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: ashr_i16_vs:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_ashrrev_i16_e32 v0, s0, v0
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = ashr i16 %value, %amount
+  %cast = bitcast i16 %result to half
+  ret half %cast
+}
+
+define <2 x i16> @v_ashr_v2i16(<2 x i16> %value, <2 x i16> %amount) {
+; GFX6-LABEL: v_ashr_v2i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b32 s4, 0xffff
+; GFX6-NEXT:    v_and_b32_e32 v2, s4, v2
+; GFX6-NEXT:    v_bfe_i32 v0, v0, 0, 16
+; GFX6-NEXT:    v_ashrrev_i32_e32 v0, v2, v0
+; GFX6-NEXT:    v_and_b32_e32 v2, s4, v3
+; GFX6-NEXT:    v_bfe_i32 v1, v1, 0, 16
+; GFX6-NEXT:    v_ashrrev_i32_e32 v1, v2, v1
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ashr_v2i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_ashrrev_i16_e32 v2, v1, v0
+; GFX8-NEXT:    v_ashrrev_i16_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v0, v2, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ashr_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_ashrrev_i16 v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr <2 x i16> %value, %amount
+  ret <2 x i16> %result
+}
+
+define <2 x i16> @v_ashr_v2i16_15(<2 x i16> %value) {
+; GFX6-LABEL: v_ashr_v2i16_15:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_bfe_i32 v0, v0, 0, 16
+; GFX6-NEXT:    v_bfe_i32 v1, v1, 0, 16
+; GFX6-NEXT:    v_ashrrev_i32_e32 v0, 15, v0
+; GFX6-NEXT:    v_ashrrev_i32_e32 v1, 15, v1
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ashr_v2i16_15:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v2, 15
+; GFX8-NEXT:    v_ashrrev_i16_e32 v1, 15, v0
+; GFX8-NEXT:    v_ashrrev_i16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ashr_v2i16_15:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    s_pack_ll_b32_b16 s4, 15, 15
+; GFX9-NEXT:    v_pk_ashrrev_i16 v0, s4, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr <2 x i16> %value, <i16 15, i16 15>
+  ret <2 x i16> %result
+}
+
+define amdgpu_ps i32 @s_ashr_v2i16(<2 x i16> inreg %value, <2 x i16> inreg %amount) {
+; GFX6-LABEL: s_ashr_v2i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_mov_b32 s4, 0xffff
+; GFX6-NEXT:    s_and_b32 s2, s2, s4
+; GFX6-NEXT:    s_sext_i32_i16 s0, s0
+; GFX6-NEXT:    s_ashr_i32 s0, s0, s2
+; GFX6-NEXT:    s_and_b32 s2, s3, s4
+; GFX6-NEXT:    s_sext_i32_i16 s1, s1
+; GFX6-NEXT:    s_ashr_i32 s1, s1, s2
+; GFX6-NEXT:    s_and_b32 s1, s1, s4
+; GFX6-NEXT:    s_and_b32 s0, s0, s4
+; GFX6-NEXT:    s_lshl_b32 s1, s1, 16
+; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_ashr_v2i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshr_b32 s2, s0, 16
+; GFX8-NEXT:    s_lshr_b32 s3, s1, 16
+; GFX8-NEXT:    s_sext_i32_i16 s0, s0
+; GFX8-NEXT:    s_sext_i32_i16 s1, s1
+; GFX8-NEXT:    s_sext_i32_i16 s2, s2
+; GFX8-NEXT:    s_sext_i32_i16 s3, s3
+; GFX8-NEXT:    s_ashr_i32 s0, s0, s1
+; GFX8-NEXT:    s_ashr_i32 s1, s2, s3
+; GFX8-NEXT:    s_lshl_b32 s1, s1, 16
+; GFX8-NEXT:    s_and_b32 s0, s0, 0xffff
+; GFX8-NEXT:    s_or_b32 s0, s1, s0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_ashr_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_lshr_b32 s2, s0, 16
+; GFX9-NEXT:    s_lshr_b32 s3, s1, 16
+; GFX9-NEXT:    s_ashr_i32 s0, s0, s1
+; GFX9-NEXT:    s_ashr_i32 s1, s2, s3
+; GFX9-NEXT:    s_pack_ll_b32_b16 s0, s0, s1
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = ashr <2 x i16> %value, %amount
+  %cast = bitcast <2 x i16> %result to i32
+  ret i32 %cast
+}
+
+define amdgpu_ps float @ashr_v2i16_sv(<2 x i16> inreg %value, <2 x i16> %amount) {
+; GFX6-LABEL: ashr_v2i16_sv:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_mov_b32 s2, 0xffff
+; GFX6-NEXT:    v_and_b32_e32 v0, s2, v0
+; GFX6-NEXT:    s_sext_i32_i16 s0, s0
+; GFX6-NEXT:    v_ashr_i32_e32 v0, s0, v0
+; GFX6-NEXT:    v_and_b32_e32 v1, s2, v1
+; GFX6-NEXT:    s_sext_i32_i16 s0, s1
+; GFX6-NEXT:    v_ashr_i32_e32 v1, s0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, s2, v1
+; GFX6-NEXT:    v_and_b32_e32 v0, s2, v0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: ashr_v2i16_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshr_b32 s1, s0, 16
+; GFX8-NEXT:    v_mov_b32_e32 v2, s1
+; GFX8-NEXT:    v_ashrrev_i16_e64 v1, v0, s0
+; GFX8-NEXT:    v_ashrrev_i16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: ashr_v2i16_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_pk_ashrrev_i16 v0, v0, s0
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = ashr <2 x i16> %value, %amount
+  %cast = bitcast <2 x i16> %result to float
+  ret float %cast
+}
+
+define amdgpu_ps float @ashr_v2i16_vs(<2 x i16> %value, <2 x i16> inreg %amount) {
+; GFX6-LABEL: ashr_v2i16_vs:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_mov_b32 s2, 0xffff
+; GFX6-NEXT:    s_and_b32 s0, s0, s2
+; GFX6-NEXT:    v_bfe_i32 v0, v0, 0, 16
+; GFX6-NEXT:    v_ashrrev_i32_e32 v0, s0, v0
+; GFX6-NEXT:    s_and_b32 s0, s1, s2
+; GFX6-NEXT:    v_bfe_i32 v1, v1, 0, 16
+; GFX6-NEXT:    v_ashrrev_i32_e32 v1, s0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, s2, v1
+; GFX6-NEXT:    v_and_b32_e32 v0, s2, v0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: ashr_v2i16_vs:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshr_b32 s1, s0, 16
+; GFX8-NEXT:    v_mov_b32_e32 v2, s1
+; GFX8-NEXT:    v_ashrrev_i16_e32 v1, s0, v0
+; GFX8-NEXT:    v_ashrrev_i16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: ashr_v2i16_vs:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_pk_ashrrev_i16 v0, s0, v0
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = ashr <2 x i16> %value, %amount
+  %cast = bitcast <2 x i16> %result to float
+  ret float %cast
+}
+
+; FIXME
+; define <3 x i16> @v_ashr_v3i16(<3 x i16> %value, <3 x i16> %amount) {
+;   %result = ashr <3 x i16> %value, %amount
+;   ret <3 x i16> %result
+; }
+
+; define amdgpu_ps <3 x i16> @s_ashr_v3i16(<3 x i16> inreg %value, <3 x i16> inreg %amount) {
+;   %result = ashr <3 x i16> %value, %amount
+;   ret <3 x i16> %result
+; }
+
+define <2 x float> @v_ashr_v4i16(<4 x i16> %value, <4 x i16> %amount) {
+; GFX6-LABEL: v_ashr_v4i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b32 s4, 0xffff
+; GFX6-NEXT:    v_and_b32_e32 v4, s4, v4
+; GFX6-NEXT:    v_bfe_i32 v0, v0, 0, 16
+; GFX6-NEXT:    v_ashrrev_i32_e32 v0, v4, v0
+; GFX6-NEXT:    v_and_b32_e32 v4, s4, v5
+; GFX6-NEXT:    v_bfe_i32 v1, v1, 0, 16
+; GFX6-NEXT:    v_ashrrev_i32_e32 v1, v4, v1
+; GFX6-NEXT:    v_and_b32_e32 v4, s4, v6
+; GFX6-NEXT:    v_bfe_i32 v2, v2, 0, 16
+; GFX6-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT:    v_ashrrev_i32_e32 v2, v4, v2
+; GFX6-NEXT:    v_and_b32_e32 v4, s4, v7
+; GFX6-NEXT:    v_bfe_i32 v3, v3, 0, 16
+; GFX6-NEXT:    v_ashrrev_i32_e32 v3, v4, v3
+; GFX6-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT:    v_and_b32_e32 v2, s4, v3
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT:    v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ashr_v4i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_ashrrev_i16_e32 v4, v2, v0
+; GFX8-NEXT:    v_ashrrev_i16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_ashrrev_i16_e32 v2, v3, v1
+; GFX8-NEXT:    v_ashrrev_i16_sdwa v1, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v0, v4, v0
+; GFX8-NEXT:    v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ashr_v4i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_ashrrev_i16 v0, v2, v0
+; GFX9-NEXT:    v_pk_ashrrev_i16 v1, v3, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr <4 x i16> %value, %amount
+  %cast = bitcast <4 x i16> %result to <2 x float>
+  ret <2 x float> %cast
+}
+
+define amdgpu_ps <2 x i32> @s_ashr_v4i16(<4 x i16> inreg %value, <4 x i16> inreg %amount) {
+; GFX6-LABEL: s_ashr_v4i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_mov_b32 s8, 0xffff
+; GFX6-NEXT:    s_and_b32 s4, s4, s8
+; GFX6-NEXT:    s_sext_i32_i16 s0, s0
+; GFX6-NEXT:    s_ashr_i32 s0, s0, s4
+; GFX6-NEXT:    s_and_b32 s4, s5, s8
+; GFX6-NEXT:    s_sext_i32_i16 s1, s1
+; GFX6-NEXT:    s_ashr_i32 s1, s1, s4
+; GFX6-NEXT:    s_and_b32 s4, s6, s8
+; GFX6-NEXT:    s_sext_i32_i16 s2, s2
+; GFX6-NEXT:    s_and_b32 s1, s1, s8
+; GFX6-NEXT:    s_ashr_i32 s2, s2, s4
+; GFX6-NEXT:    s_and_b32 s4, s7, s8
+; GFX6-NEXT:    s_sext_i32_i16 s3, s3
+; GFX6-NEXT:    s_ashr_i32 s3, s3, s4
+; GFX6-NEXT:    s_and_b32 s0, s0, s8
+; GFX6-NEXT:    s_lshl_b32 s1, s1, 16
+; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    s_and_b32 s1, s2, s8
+; GFX6-NEXT:    s_and_b32 s2, s3, s8
+; GFX6-NEXT:    s_lshl_b32 s2, s2, 16
+; GFX6-NEXT:    s_or_b32 s1, s1, s2
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_ashr_v4i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshr_b32 s4, s0, 16
+; GFX8-NEXT:    s_lshr_b32 s6, s2, 16
+; GFX8-NEXT:    s_lshr_b32 s5, s1, 16
+; GFX8-NEXT:    s_lshr_b32 s7, s3, 16
+; GFX8-NEXT:    s_sext_i32_i16 s0, s0
+; GFX8-NEXT:    s_sext_i32_i16 s2, s2
+; GFX8-NEXT:    s_sext_i32_i16 s4, s4
+; GFX8-NEXT:    s_sext_i32_i16 s6, s6
+; GFX8-NEXT:    s_ashr_i32 s0, s0, s2
+; GFX8-NEXT:    s_ashr_i32 s2, s4, s6
+; GFX8-NEXT:    s_mov_b32 s4, 0xffff
+; GFX8-NEXT:    s_sext_i32_i16 s1, s1
+; GFX8-NEXT:    s_sext_i32_i16 s3, s3
+; GFX8-NEXT:    s_sext_i32_i16 s5, s5
+; GFX8-NEXT:    s_sext_i32_i16 s7, s7
+; GFX8-NEXT:    s_ashr_i32 s1, s1, s3
+; GFX8-NEXT:    s_ashr_i32 s3, s5, s7
+; GFX8-NEXT:    s_lshl_b32 s2, s2, 16
+; GFX8-NEXT:    s_and_b32 s0, s0, s4
+; GFX8-NEXT:    s_or_b32 s0, s2, s0
+; GFX8-NEXT:    s_lshl_b32 s2, s3, 16
+; GFX8-NEXT:    s_and_b32 s1, s1, s4
+; GFX8-NEXT:    s_or_b32 s1, s2, s1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_ashr_v4i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_lshr_b32 s4, s0, 16
+; GFX9-NEXT:    s_lshr_b32 s5, s2, 16
+; GFX9-NEXT:    s_ashr_i32 s0, s0, s2
+; GFX9-NEXT:    s_ashr_i32 s2, s4, s5
+; GFX9-NEXT:    s_pack_ll_b32_b16 s0, s0, s2
+; GFX9-NEXT:    s_lshr_b32 s2, s1, 16
+; GFX9-NEXT:    s_lshr_b32 s4, s3, 16
+; GFX9-NEXT:    s_ashr_i32 s1, s1, s3
+; GFX9-NEXT:    s_ashr_i32 s2, s2, s4
+; GFX9-NEXT:    s_pack_ll_b32_b16 s1, s1, s2
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = ashr <4 x i16> %value, %amount
+  %cast = bitcast <4 x i16> %result to <2 x i32>
+  ret <2 x i32> %cast
+}
+
+; FIXME
+; define <5 x i16> @v_ashr_v5i16(<5 x i16> %value, <5 x i16> %amount) {
+;   %result = ashr <5 x i16> %value, %amount
+;   ret <5 x i16> %result
+; }
+
+; define amdgpu_ps <5 x i16> @s_ashr_v5i16(<5 x i16> inreg %value, <5 x i16> inreg %amount) {
+;   %result = ashr <5 x i16> %value, %amount
+;   ret <5 x i16> %result
+; }
+
+; define <3 x float> @v_ashr_v6i16(<6 x i16> %value, <6 x i16> %amount) {
+;   %result = ashr <6 x i16> %value, %amount
+;   %cast = bitcast <6 x i16> %result to <3 x float>
+;   ret <3 x float> %cast
+; }
+
+; define amdgpu_ps <3 x i32> @s_ashr_v6i16(<6 x i16> inreg %value, <6 x i16> inreg %amount) {
+;   %result = ashr <6 x i16> %value, %amount
+;   %cast = bitcast <6 x i16> %result to <3 x i32>
+;   ret <3 x i32> %cast
+; }
+
+define <4 x float> @v_ashr_v8i16(<8 x i16> %value, <8 x i16> %amount) {
+; GFX6-LABEL: v_ashr_v8i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b32 s4, 0xffff
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v8
+; GFX6-NEXT:    v_bfe_i32 v0, v0, 0, 16
+; GFX6-NEXT:    v_ashrrev_i32_e32 v0, v8, v0
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v9
+; GFX6-NEXT:    v_bfe_i32 v1, v1, 0, 16
+; GFX6-NEXT:    v_ashrrev_i32_e32 v1, v8, v1
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v10
+; GFX6-NEXT:    v_bfe_i32 v2, v2, 0, 16
+; GFX6-NEXT:    v_ashrrev_i32_e32 v2, v8, v2
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v11
+; GFX6-NEXT:    v_bfe_i32 v3, v3, 0, 16
+; GFX6-NEXT:    v_mov_b32_e32 v16, 0xffff
+; GFX6-NEXT:    v_ashrrev_i32_e32 v3, v8, v3
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v12
+; GFX6-NEXT:    v_bfe_i32 v4, v4, 0, 16
+; GFX6-NEXT:    v_and_b32_e32 v1, v1, v16
+; GFX6-NEXT:    v_ashrrev_i32_e32 v4, v8, v4
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v13
+; GFX6-NEXT:    v_bfe_i32 v5, v5, 0, 16
+; GFX6-NEXT:    v_ashrrev_i32_e32 v5, v8, v5
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v14
+; GFX6-NEXT:    v_bfe_i32 v6, v6, 0, 16
+; GFX6-NEXT:    v_and_b32_e32 v0, v0, v16
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    v_ashrrev_i32_e32 v6, v8, v6
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, v2, v16
+; GFX6-NEXT:    v_and_b32_e32 v2, v3, v16
+; GFX6-NEXT:    v_and_b32_e32 v8, v15, v16
+; GFX6-NEXT:    v_bfe_i32 v7, v7, 0, 16
+; GFX6-NEXT:    v_and_b32_e32 v3, v5, v16
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT:    v_ashrrev_i32_e32 v7, v8, v7
+; GFX6-NEXT:    v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT:    v_and_b32_e32 v2, v4, v16
+; GFX6-NEXT:    v_and_b32_e32 v4, v7, v16
+; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT:    v_or_b32_e32 v2, v2, v3
+; GFX6-NEXT:    v_and_b32_e32 v3, v6, v16
+; GFX6-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT:    v_or_b32_e32 v3, v3, v4
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ashr_v8i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_ashrrev_i16_e32 v8, v4, v0
+; GFX8-NEXT:    v_ashrrev_i16_sdwa v0, v4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_ashrrev_i16_e32 v4, v5, v1
+; GFX8-NEXT:    v_ashrrev_i16_sdwa v1, v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v1, v4, v1
+; GFX8-NEXT:    v_ashrrev_i16_e32 v4, v6, v2
+; GFX8-NEXT:    v_ashrrev_i16_sdwa v2, v6, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT:    v_ashrrev_i16_e32 v4, v7, v3
+; GFX8-NEXT:    v_ashrrev_i16_sdwa v3, v7, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v0, v8, v0
+; GFX8-NEXT:    v_or_b32_e32 v3, v4, v3
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ashr_v8i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_ashrrev_i16 v0, v4, v0
+; GFX9-NEXT:    v_pk_ashrrev_i16 v1, v5, v1
+; GFX9-NEXT:    v_pk_ashrrev_i16 v2, v6, v2
+; GFX9-NEXT:    v_pk_ashrrev_i16 v3, v7, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr <8 x i16> %value, %amount
+  %cast = bitcast <8 x i16> %result to <4 x float>
+  ret <4 x float> %cast
+}
+
+define amdgpu_ps <4 x i32> @s_ashr_v8i16(<8 x i16> inreg %value, <8 x i16> inreg %amount) {
+; GFX6-LABEL: s_ashr_v8i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_mov_b32 s16, 0xffff
+; GFX6-NEXT:    s_and_b32 s8, s8, s16
+; GFX6-NEXT:    s_sext_i32_i16 s0, s0
+; GFX6-NEXT:    s_ashr_i32 s0, s0, s8
+; GFX6-NEXT:    s_and_b32 s8, s9, s16
+; GFX6-NEXT:    s_sext_i32_i16 s1, s1
+; GFX6-NEXT:    s_ashr_i32 s1, s1, s8
+; GFX6-NEXT:    s_and_b32 s8, s10, s16
+; GFX6-NEXT:    s_sext_i32_i16 s2, s2
+; GFX6-NEXT:    s_ashr_i32 s2, s2, s8
+; GFX6-NEXT:    s_and_b32 s8, s11, s16
+; GFX6-NEXT:    s_sext_i32_i16 s3, s3
+; GFX6-NEXT:    s_ashr_i32 s3, s3, s8
+; GFX6-NEXT:    s_and_b32 s8, s12, s16
+; GFX6-NEXT:    s_sext_i32_i16 s4, s4
+; GFX6-NEXT:    s_and_b32 s1, s1, s16
+; GFX6-NEXT:    s_ashr_i32 s4, s4, s8
+; GFX6-NEXT:    s_and_b32 s8, s13, s16
+; GFX6-NEXT:    s_sext_i32_i16 s5, s5
+; GFX6-NEXT:    s_ashr_i32 s5, s5, s8
+; GFX6-NEXT:    s_and_b32 s8, s14, s16
+; GFX6-NEXT:    s_sext_i32_i16 s6, s6
+; GFX6-NEXT:    s_and_b32 s0, s0, s16
+; GFX6-NEXT:    s_lshl_b32 s1, s1, 16
+; GFX6-NEXT:    s_ashr_i32 s6, s6, s8
+; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    s_and_b32 s1, s2, s16
+; GFX6-NEXT:    s_and_b32 s2, s3, s16
+; GFX6-NEXT:    s_and_b32 s8, s15, s16
+; GFX6-NEXT:    s_sext_i32_i16 s7, s7
+; GFX6-NEXT:    s_and_b32 s3, s5, s16
+; GFX6-NEXT:    s_lshl_b32 s2, s2, 16
+; GFX6-NEXT:    s_ashr_i32 s7, s7, s8
+; GFX6-NEXT:    s_or_b32 s1, s1, s2
+; GFX6-NEXT:    s_and_b32 s2, s4, s16
+; GFX6-NEXT:    s_and_b32 s4, s7, s16
+; GFX6-NEXT:    s_lshl_b32 s3, s3, 16
+; GFX6-NEXT:    s_or_b32 s2, s2, s3
+; GFX6-NEXT:    s_and_b32 s3, s6, s16
+; GFX6-NEXT:    s_lshl_b32 s4, s4, 16
+; GFX6-NEXT:    s_or_b32 s3, s3, s4
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_ashr_v8i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshr_b32 s8, s0, 16
+; GFX8-NEXT:    s_lshr_b32 s12, s4, 16
+; GFX8-NEXT:    s_lshr_b32 s9, s1, 16
+; GFX8-NEXT:    s_lshr_b32 s13, s5, 16
+; GFX8-NEXT:    s_sext_i32_i16 s0, s0
+; GFX8-NEXT:    s_sext_i32_i16 s4, s4
+; GFX8-NEXT:    s_sext_i32_i16 s8, s8
+; GFX8-NEXT:    s_sext_i32_i16 s12, s12
+; GFX8-NEXT:    s_lshr_b32 s10, s2, 16
+; GFX8-NEXT:    s_lshr_b32 s14, s6, 16
+; GFX8-NEXT:    s_ashr_i32 s0, s0, s4
+; GFX8-NEXT:    s_ashr_i32 s4, s8, s12
+; GFX8-NEXT:    s_mov_b32 s8, 0xffff
+; GFX8-NEXT:    s_sext_i32_i16 s1, s1
+; GFX8-NEXT:    s_sext_i32_i16 s5, s5
+; GFX8-NEXT:    s_sext_i32_i16 s9, s9
+; GFX8-NEXT:    s_sext_i32_i16 s13, s13
+; GFX8-NEXT:    s_lshr_b32 s11, s3, 16
+; GFX8-NEXT:    s_lshr_b32 s15, s7, 16
+; GFX8-NEXT:    s_ashr_i32 s1, s1, s5
+; GFX8-NEXT:    s_sext_i32_i16 s2, s2
+; GFX8-NEXT:    s_sext_i32_i16 s6, s6
+; GFX8-NEXT:    s_sext_i32_i16 s10, s10
+; GFX8-NEXT:    s_sext_i32_i16 s14, s14
+; GFX8-NEXT:    s_ashr_i32 s5, s9, s13
+; GFX8-NEXT:    s_lshl_b32 s4, s4, 16
+; GFX8-NEXT:    s_and_b32 s0, s0, s8
+; GFX8-NEXT:    s_ashr_i32 s2, s2, s6
+; GFX8-NEXT:    s_or_b32 s0, s4, s0
+; GFX8-NEXT:    s_sext_i32_i16 s3, s3
+; GFX8-NEXT:    s_sext_i32_i16 s7, s7
+; GFX8-NEXT:    s_sext_i32_i16 s11, s11
+; GFX8-NEXT:    s_sext_i32_i16 s15, s15
+; GFX8-NEXT:    s_ashr_i32 s6, s10, s14
+; GFX8-NEXT:    s_lshl_b32 s4, s5, 16
+; GFX8-NEXT:    s_and_b32 s1, s1, s8
+; GFX8-NEXT:    s_ashr_i32 s3, s3, s7
+; GFX8-NEXT:    s_or_b32 s1, s4, s1
+; GFX8-NEXT:    s_ashr_i32 s7, s11, s15
+; GFX8-NEXT:    s_lshl_b32 s4, s6, 16
+; GFX8-NEXT:    s_and_b32 s2, s2, s8
+; GFX8-NEXT:    s_or_b32 s2, s4, s2
+; GFX8-NEXT:    s_lshl_b32 s4, s7, 16
+; GFX8-NEXT:    s_and_b32 s3, s3, s8
+; GFX8-NEXT:    s_or_b32 s3, s4, s3
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_ashr_v8i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_lshr_b32 s8, s0, 16
+; GFX9-NEXT:    s_lshr_b32 s9, s4, 16
+; GFX9-NEXT:    s_ashr_i32 s0, s0, s4
+; GFX9-NEXT:    s_ashr_i32 s4, s8, s9
+; GFX9-NEXT:    s_pack_ll_b32_b16 s0, s0, s4
+; GFX9-NEXT:    s_lshr_b32 s4, s1, 16
+; GFX9-NEXT:    s_lshr_b32 s8, s5, 16
+; GFX9-NEXT:    s_ashr_i32 s1, s1, s5
+; GFX9-NEXT:    s_ashr_i32 s4, s4, s8
+; GFX9-NEXT:    s_pack_ll_b32_b16 s1, s1, s4
+; GFX9-NEXT:    s_lshr_b32 s4, s2, 16
+; GFX9-NEXT:    s_lshr_b32 s5, s6, 16
+; GFX9-NEXT:    s_ashr_i32 s4, s4, s5
+; GFX9-NEXT:    s_ashr_i32 s2, s2, s6
+; GFX9-NEXT:    s_pack_ll_b32_b16 s2, s2, s4
+; GFX9-NEXT:    s_lshr_b32 s4, s3, 16
+; GFX9-NEXT:    s_lshr_b32 s5, s7, 16
+; GFX9-NEXT:    s_ashr_i32 s3, s3, s7
+; GFX9-NEXT:    s_ashr_i32 s4, s4, s5
+; GFX9-NEXT:    s_pack_ll_b32_b16 s3, s3, s4
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = ashr <8 x i16> %value, %amount
+  %cast = bitcast <8 x i16> %result to <4 x i32>
+  ret <4 x i32> %cast
+}
+
+define i64 @v_ashr_i64(i64 %value, i64 %amount) {
+; GFX6-LABEL: v_ashr_i64:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_ashr_i64 v[0:1], v[0:1], v2
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ashr_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_ashrrev_i64 v[0:1], v2, v[0:1]
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ashr_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_ashrrev_i64 v[0:1], v2, v[0:1]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr i64 %value, %amount
+  ret i64 %result
+}
+
+define i64 @v_ashr_i64_63(i64 %value) {
+; GCN-LABEL: v_ashr_i64_63:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_ashrrev_i32_e32 v0, 31, v1
+; GCN-NEXT:    v_mov_b32_e32 v1, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr i64 %value, 63
+  ret i64 %result
+}
+
+define i64 @v_ashr_i64_33(i64 %value) {
+; GCN-LABEL: v_ashr_i64_33:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
+; GCN-NEXT:    v_ashrrev_i32_e32 v0, 1, v1
+; GCN-NEXT:    v_mov_b32_e32 v1, v2
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr i64 %value, 33
+  ret i64 %result
+}
+
+define i64 @v_ashr_i64_32(i64 %value) {
+; GCN-LABEL: v_ashr_i64_32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, v1
+; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr i64 %value, 32
+  ret i64 %result
+}
+
+define i64 @v_ashr_i64_31(i64 %value) {
+; GFX6-LABEL: v_ashr_i64_31:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_ashr_i64 v[0:1], v[0:1], 31
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ashr_i64_31:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_ashrrev_i64 v[0:1], 31, v[0:1]
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ashr_i64_31:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_ashrrev_i64 v[0:1], 31, v[0:1]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr i64 %value, 31
+  ret i64 %result
+}
+
+define amdgpu_ps i64 @s_ashr_i64(i64 inreg %value, i64 inreg %amount) {
+; GCN-LABEL: s_ashr_i64:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_ashr_i64 s[0:1], s[0:1], s2
+; GCN-NEXT:    ; return to shader part epilog
+  %result = ashr i64 %value, %amount
+  ret i64 %result
+}
+
+define amdgpu_ps i64 @s_ashr_i64_63(i64 inreg %value) {
+; GCN-LABEL: s_ashr_i64_63:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_ashr_i32 s0, s1, 31
+; GCN-NEXT:    s_mov_b32 s1, s0
+; GCN-NEXT:    ; return to shader part epilog
+  %result = ashr i64 %value, 63
+  ret i64 %result
+}
+
+define amdgpu_ps i64 @s_ashr_i64_33(i64 inreg %value) {
+; GCN-LABEL: s_ashr_i64_33:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_ashr_i32 s2, s1, 31
+; GCN-NEXT:    s_ashr_i32 s0, s1, 1
+; GCN-NEXT:    s_mov_b32 s1, s2
+; GCN-NEXT:    ; return to shader part epilog
+  %result = ashr i64 %value, 33
+  ret i64 %result
+}
+
+define amdgpu_ps i64 @s_ashr_i64_32(i64 inreg %value) {
+; GCN-LABEL: s_ashr_i64_32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_mov_b32 s0, s1
+; GCN-NEXT:    s_ashr_i32 s1, s1, 31
+; GCN-NEXT:    ; return to shader part epilog
+  %result = ashr i64 %value, 32
+  ret i64 %result
+}
+
+define amdgpu_ps i64 @s_ashr_i64_31(i64 inreg %value) {
+; GCN-LABEL: s_ashr_i64_31:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_ashr_i64 s[0:1], s[0:1], 31
+; GCN-NEXT:    ; return to shader part epilog
+  %result = ashr i64 %value, 31
+  ret i64 %result
+}
+
+define amdgpu_ps <2 x float> @ashr_i64_sv(i64 inreg %value, i64 %amount) {
+; GFX6-LABEL: ashr_i64_sv:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    v_ashr_i64 v[0:1], s[0:1], v0
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: ashr_i64_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_ashrrev_i64 v[0:1], v0, s[0:1]
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: ashr_i64_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_ashrrev_i64 v[0:1], v0, s[0:1]
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = ashr i64 %value, %amount
+  %cast = bitcast i64 %result to <2 x float>
+  ret <2 x float> %cast
+}
+
+define amdgpu_ps <2 x float> @ashr_i64_vs(i64 %value, i64 inreg %amount) {
+; GFX6-LABEL: ashr_i64_vs:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    v_ashr_i64 v[0:1], v[0:1], s0
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: ashr_i64_vs:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_ashrrev_i64 v[0:1], s0, v[0:1]
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: ashr_i64_vs:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_ashrrev_i64 v[0:1], s0, v[0:1]
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = ashr i64 %value, %amount
+  %cast = bitcast i64 %result to <2 x float>
+  ret <2 x float> %cast
+}
+
+define <2 x i64> @v_ashr_v2i64(<2 x i64> %value, <2 x i64> %amount) {
+; GFX6-LABEL: v_ashr_v2i64:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_ashr_i64 v[0:1], v[0:1], v4
+; GFX6-NEXT:    v_ashr_i64 v[2:3], v[2:3], v6
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ashr_v2i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_ashrrev_i64 v[0:1], v4, v[0:1]
+; GFX8-NEXT:    v_ashrrev_i64 v[2:3], v6, v[2:3]
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ashr_v2i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_ashrrev_i64 v[0:1], v4, v[0:1]
+; GFX9-NEXT:    v_ashrrev_i64 v[2:3], v6, v[2:3]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr <2 x i64> %value, %amount
+  ret <2 x i64> %result
+}
+
+define <2 x i64> @v_ashr_v2i64_31(<2 x i64> %value) {
+; GFX6-LABEL: v_ashr_v2i64_31:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_ashr_i64 v[0:1], v[0:1], 31
+; GFX6-NEXT:    v_ashr_i64 v[2:3], v[2:3], 31
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ashr_v2i64_31:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_ashrrev_i64 v[0:1], 31, v[0:1]
+; GFX8-NEXT:    v_ashrrev_i64 v[2:3], 31, v[2:3]
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ashr_v2i64_31:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_ashrrev_i64 v[0:1], 31, v[0:1]
+; GFX9-NEXT:    v_ashrrev_i64 v[2:3], 31, v[2:3]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = ashr <2 x i64> %value, <i64 31, i64 31>
+  ret <2 x i64> %result
+}
+
+define amdgpu_ps <2 x i64> @s_ashr_v2i64(<2 x i64> inreg %value, <2 x i64> inreg %amount) {
+; GCN-LABEL: s_ashr_v2i64:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_ashr_i64 s[0:1], s[0:1], s4
+; GCN-NEXT:    s_ashr_i64 s[2:3], s[2:3], s6
+; GCN-NEXT:    ; return to shader part epilog
+  %result = ashr <2 x i64> %value, %amount
+  ret <2 x i64> %result
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
new file mode 100644
index 000000000000..9d82396bbc36
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
@@ -0,0 +1,1234 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=tahiti < %s | FileCheck -check-prefixes=GCN,GFX6 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=fiji < %s | FileCheck -check-prefixes=GCN,GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+
+define i8 @v_lshr_i8(i8 %value, i8 %amount) {
+; GFX6-LABEL: v_lshr_i8:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    s_movk_i32 s4, 0xff
+; GFX6-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT:    v_lshrrev_b32_e32 v0, v1, v0
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_lshr_i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshrrev_b16_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_lshr_i8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_lshrrev_b16_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr i8 %value, %amount
+  ret i8 %result
+}
+
+define i8 @v_lshr_i8_7(i8 %value) {
+; GFX6-LABEL: v_lshr_i8_7:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_and_b32_e32 v0, 0xff, v0
+; GFX6-NEXT:    v_lshrrev_b32_e32 v0, 7, v0
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_lshr_i8_7:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v1, 7
+; GFX8-NEXT:    v_lshrrev_b16_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_lshr_i8_7:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    s_mov_b32 s4, 7
+; GFX9-NEXT:    v_lshrrev_b16_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr i8 %value, 7
+  ret i8 %result
+}
+
+define amdgpu_ps i8 @s_lshr_i8(i8 inreg %value, i8 inreg %amount) {
+; GFX6-LABEL: s_lshr_i8:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_movk_i32 s2, 0xff
+; GFX6-NEXT:    s_and_b32 s1, s1, s2
+; GFX6-NEXT:    s_and_b32 s0, s0, s2
+; GFX6-NEXT:    s_lshr_b32 s0, s0, s1
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_lshr_i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_movk_i32 s2, 0xff
+; GFX8-NEXT:    s_and_b32 s0, s0, s2
+; GFX8-NEXT:    s_and_b32 s1, s1, s2
+; GFX8-NEXT:    s_lshr_b32 s0, s0, s1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_lshr_i8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_movk_i32 s2, 0xff
+; GFX9-NEXT:    s_and_b32 s0, s0, s2
+; GFX9-NEXT:    s_and_b32 s1, s1, s2
+; GFX9-NEXT:    s_lshr_b32 s0, s0, s1
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = lshr i8 %value, %amount
+  ret i8 %result
+}
+
+define amdgpu_ps i8 @s_lshr_i8_7(i8 inreg %value) {
+; GCN-LABEL: s_lshr_i8_7:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_and_b32 s0, s0, 0xff
+; GCN-NEXT:    s_lshr_b32 s0, s0, 7
+; GCN-NEXT:    ; return to shader part epilog
+  %result = lshr i8 %value, 7
+  ret i8 %result
+}
+
+
+define i24 @v_lshr_i24(i24 %value, i24 %amount) {
+; GCN-LABEL: v_lshr_i24:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 s4, 0xffffff
+; GCN-NEXT:    v_and_b32_e32 v1, s4, v1
+; GCN-NEXT:    v_and_b32_e32 v0, s4, v0
+; GCN-NEXT:    v_lshrrev_b32_e32 v0, v1, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr i24 %value, %amount
+  ret i24 %result
+}
+
+define i24 @v_lshr_i24_7(i24 %value) {
+; GCN-LABEL: v_lshr_i24_7:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
+; GCN-NEXT:    v_lshrrev_b32_e32 v0, 7, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr i24 %value, 7
+  ret i24 %result
+}
+
+define amdgpu_ps i24 @s_lshr_i24(i24 inreg %value, i24 inreg %amount) {
+; GCN-LABEL: s_lshr_i24:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_mov_b32 s2, 0xffffff
+; GCN-NEXT:    s_and_b32 s1, s1, s2
+; GCN-NEXT:    s_and_b32 s0, s0, s2
+; GCN-NEXT:    s_lshr_b32 s0, s0, s1
+; GCN-NEXT:    ; return to shader part epilog
+  %result = lshr i24 %value, %amount
+  ret i24 %result
+}
+
+define amdgpu_ps i24 @s_lshr_i24_7(i24 inreg %value) {
+; GCN-LABEL: s_lshr_i24_7:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_and_b32 s0, s0, 0xffffff
+; GCN-NEXT:    s_lshr_b32 s0, s0, 7
+; GCN-NEXT:    ; return to shader part epilog
+  %result = lshr i24 %value, 7
+  ret i24 %result
+}
+
+define i32 @v_lshr_i32(i32 %value, i32 %amount) {
+; GCN-LABEL: v_lshr_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshrrev_b32_e32 v0, v1, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr i32 %value, %amount
+  ret i32 %result
+}
+
+define i32 @v_lshr_i32_31(i32 %value) {
+; GCN-LABEL: v_lshr_i32_31:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshrrev_b32_e32 v0, 31, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr i32 %value, 31
+  ret i32 %result
+}
+
+define amdgpu_ps i32 @s_lshr_i32(i32 inreg %value, i32 inreg %amount) {
+; GCN-LABEL: s_lshr_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshr_b32 s0, s0, s1
+; GCN-NEXT:    ; return to shader part epilog
+  %result = lshr i32 %value, %amount
+  ret i32 %result
+}
+
+define amdgpu_ps i32 @s_lshr_i32_31(i32 inreg %value) {
+; GCN-LABEL: s_lshr_i32_31:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshr_b32 s0, s0, 31
+; GCN-NEXT:    ; return to shader part epilog
+  %result = lshr i32 %value, 31
+  ret i32 %result
+}
+
+define amdgpu_ps float @lshr_i32_sv(i32 inreg %value, i32 %amount) {
+; GFX6-LABEL: lshr_i32_sv:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    v_lshr_b32_e32 v0, s0, v0
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: lshr_i32_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_lshrrev_b32_e64 v0, v0, s0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: lshr_i32_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_lshrrev_b32_e64 v0, v0, s0
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = lshr i32 %value, %amount
+  %cast = bitcast i32 %result to float
+  ret float %cast
+}
+
+define amdgpu_ps float @lshr_i32_vs(i32 %value, i32 inreg %amount) {
+; GCN-LABEL: lshr_i32_vs:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_lshrrev_b32_e32 v0, s0, v0
+; GCN-NEXT:    ; return to shader part epilog
+  %result = lshr i32 %value, %amount
+  %cast = bitcast i32 %result to float
+  ret float %cast
+}
+
+define <2 x i32> @v_lshr_v2i32(<2 x i32> %value, <2 x i32> %amount) {
+; GCN-LABEL: v_lshr_v2i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshrrev_b32_e32 v0, v2, v0
+; GCN-NEXT:    v_lshrrev_b32_e32 v1, v3, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr <2 x i32> %value, %amount
+  ret <2 x i32> %result
+}
+
+define <2 x i32> @v_lshr_v2i32_31(<2 x i32> %value) {
+; GCN-LABEL: v_lshr_v2i32_31:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshrrev_b32_e32 v0, 31, v0
+; GCN-NEXT:    v_lshrrev_b32_e32 v1, 31, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr <2 x i32> %value, <i32 31, i32 31>
+  ret <2 x i32> %result
+}
+
+define amdgpu_ps <2 x i32> @s_lshr_v2i32(<2 x i32> inreg %value, <2 x i32> inreg %amount) {
+; GCN-LABEL: s_lshr_v2i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshr_b32 s0, s0, s2
+; GCN-NEXT:    s_lshr_b32 s1, s1, s3
+; GCN-NEXT:    ; return to shader part epilog
+  %result = lshr <2 x i32> %value, %amount
+  ret <2 x i32> %result
+}
+
+define <3 x i32> @v_lshr_v3i32(<3 x i32> %value, <3 x i32> %amount) {
+; GCN-LABEL: v_lshr_v3i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshrrev_b32_e32 v0, v3, v0
+; GCN-NEXT:    v_lshrrev_b32_e32 v1, v4, v1
+; GCN-NEXT:    v_lshrrev_b32_e32 v2, v5, v2
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr <3 x i32> %value, %amount
+  ret <3 x i32> %result
+}
+
+define amdgpu_ps <3 x i32> @s_lshr_v3i32(<3 x i32> inreg %value, <3 x i32> inreg %amount) {
+; GCN-LABEL: s_lshr_v3i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshr_b32 s0, s0, s3
+; GCN-NEXT:    s_lshr_b32 s1, s1, s4
+; GCN-NEXT:    s_lshr_b32 s2, s2, s5
+; GCN-NEXT:    ; return to shader part epilog
+  %result = lshr <3 x i32> %value, %amount
+  ret <3 x i32> %result
+}
+
+define <4 x i32> @v_lshr_v4i32(<4 x i32> %value, <4 x i32> %amount) {
+; GCN-LABEL: v_lshr_v4i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshrrev_b32_e32 v0, v4, v0
+; GCN-NEXT:    v_lshrrev_b32_e32 v1, v5, v1
+; GCN-NEXT:    v_lshrrev_b32_e32 v2, v6, v2
+; GCN-NEXT:    v_lshrrev_b32_e32 v3, v7, v3
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr <4 x i32> %value, %amount
+  ret <4 x i32> %result
+}
+
+define amdgpu_ps <4 x i32> @s_lshr_v4i32(<4 x i32> inreg %value, <4 x i32> inreg %amount) {
+; GCN-LABEL: s_lshr_v4i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshr_b32 s0, s0, s4
+; GCN-NEXT:    s_lshr_b32 s1, s1, s5
+; GCN-NEXT:    s_lshr_b32 s2, s2, s6
+; GCN-NEXT:    s_lshr_b32 s3, s3, s7
+; GCN-NEXT:    ; return to shader part epilog
+  %result = lshr <4 x i32> %value, %amount
+  ret <4 x i32> %result
+}
+
+define <5 x i32> @v_lshr_v5i32(<5 x i32> %value, <5 x i32> %amount) {
+; GCN-LABEL: v_lshr_v5i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshrrev_b32_e32 v0, v5, v0
+; GCN-NEXT:    v_lshrrev_b32_e32 v1, v6, v1
+; GCN-NEXT:    v_lshrrev_b32_e32 v2, v7, v2
+; GCN-NEXT:    v_lshrrev_b32_e32 v3, v8, v3
+; GCN-NEXT:    v_lshrrev_b32_e32 v4, v9, v4
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr <5 x i32> %value, %amount
+  ret <5 x i32> %result
+}
+
+define amdgpu_ps <5 x i32> @s_lshr_v5i32(<5 x i32> inreg %value, <5 x i32> inreg %amount) {
+; GCN-LABEL: s_lshr_v5i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshr_b32 s0, s0, s5
+; GCN-NEXT:    s_lshr_b32 s1, s1, s6
+; GCN-NEXT:    s_lshr_b32 s2, s2, s7
+; GCN-NEXT:    s_lshr_b32 s3, s3, s8
+; GCN-NEXT:    s_lshr_b32 s4, s4, s9
+; GCN-NEXT:    ; return to shader part epilog
+  %result = lshr <5 x i32> %value, %amount
+  ret <5 x i32> %result
+}
+
+define <16 x i32> @v_lshr_v16i32(<16 x i32> %value, <16 x i32> %amount) {
+; GCN-LABEL: v_lshr_v16i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshrrev_b32_e32 v0, v16, v0
+; GCN-NEXT:    v_lshrrev_b32_e32 v1, v17, v1
+; GCN-NEXT:    v_lshrrev_b32_e32 v2, v18, v2
+; GCN-NEXT:    v_lshrrev_b32_e32 v3, v19, v3
+; GCN-NEXT:    v_lshrrev_b32_e32 v4, v20, v4
+; GCN-NEXT:    v_lshrrev_b32_e32 v5, v21, v5
+; GCN-NEXT:    v_lshrrev_b32_e32 v6, v22, v6
+; GCN-NEXT:    v_lshrrev_b32_e32 v7, v23, v7
+; GCN-NEXT:    v_lshrrev_b32_e32 v8, v24, v8
+; GCN-NEXT:    v_lshrrev_b32_e32 v9, v25, v9
+; GCN-NEXT:    v_lshrrev_b32_e32 v10, v26, v10
+; GCN-NEXT:    v_lshrrev_b32_e32 v11, v27, v11
+; GCN-NEXT:    v_lshrrev_b32_e32 v12, v28, v12
+; GCN-NEXT:    v_lshrrev_b32_e32 v13, v29, v13
+; GCN-NEXT:    v_lshrrev_b32_e32 v14, v30, v14
+; GCN-NEXT:    v_lshrrev_b32_e32 v15, v31, v15
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr <16 x i32> %value, %amount
+  ret <16 x i32> %result
+}
+
+define amdgpu_ps <16 x i32> @s_lshr_v16i32(<16 x i32> inreg %value, <16 x i32> inreg %amount) {
+; GCN-LABEL: s_lshr_v16i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshr_b32 s0, s0, s16
+; GCN-NEXT:    s_lshr_b32 s1, s1, s17
+; GCN-NEXT:    s_lshr_b32 s2, s2, s18
+; GCN-NEXT:    s_lshr_b32 s3, s3, s19
+; GCN-NEXT:    s_lshr_b32 s4, s4, s20
+; GCN-NEXT:    s_lshr_b32 s5, s5, s21
+; GCN-NEXT:    s_lshr_b32 s6, s6, s22
+; GCN-NEXT:    s_lshr_b32 s7, s7, s23
+; GCN-NEXT:    s_lshr_b32 s8, s8, s24
+; GCN-NEXT:    s_lshr_b32 s9, s9, s25
+; GCN-NEXT:    s_lshr_b32 s10, s10, s26
+; GCN-NEXT:    s_lshr_b32 s11, s11, s27
+; GCN-NEXT:    s_lshr_b32 s12, s12, s28
+; GCN-NEXT:    s_lshr_b32 s13, s13, s29
+; GCN-NEXT:    s_lshr_b32 s14, s14, s30
+; GCN-NEXT:    s_lshr_b32 s15, s15, s31
+; GCN-NEXT:    ; return to shader part epilog
+  %result = lshr <16 x i32> %value, %amount
+  ret <16 x i32> %result
+}
+
+define i16 @v_lshr_i16(i16 %value, i16 %amount) {
+; GFX6-LABEL: v_lshr_i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b32 s4, 0xffff
+; GFX6-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT:    v_lshrrev_b32_e32 v0, v1, v0
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_lshr_i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshrrev_b16_e32 v0, v1, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_lshr_i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_lshrrev_b16_e32 v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr i16 %value, %amount
+  ret i16 %result
+}
+
+define i16 @v_lshr_i16_31(i16 %value) {
+; GCN-LABEL: v_lshr_i16_31:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr i16 %value, 31
+  ret i16 %result
+}
+
+define amdgpu_ps i16 @s_lshr_i16(i16 inreg %value, i16 inreg %amount) {
+; GFX6-LABEL: s_lshr_i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_mov_b32 s2, 0xffff
+; GFX6-NEXT:    s_and_b32 s1, s1, s2
+; GFX6-NEXT:    s_and_b32 s0, s0, s2
+; GFX6-NEXT:    s_lshr_b32 s0, s0, s1
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_lshr_i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_mov_b32 s2, 0xffff
+; GFX8-NEXT:    s_and_b32 s0, s0, s2
+; GFX8-NEXT:    s_and_b32 s1, s1, s2
+; GFX8-NEXT:    s_lshr_b32 s0, s0, s1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_lshr_i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_mov_b32 s2, 0xffff
+; GFX9-NEXT:    s_and_b32 s0, s0, s2
+; GFX9-NEXT:    s_and_b32 s1, s1, s2
+; GFX9-NEXT:    s_lshr_b32 s0, s0, s1
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = lshr i16 %value, %amount
+  ret i16 %result
+}
+
+define amdgpu_ps i16 @s_lshr_i16_15(i16 inreg %value) {
+; GCN-LABEL: s_lshr_i16_15:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_and_b32 s0, s0, 0xffff
+; GCN-NEXT:    s_lshr_b32 s0, s0, 15
+; GCN-NEXT:    ; return to shader part epilog
+  %result = lshr i16 %value, 15
+  ret i16 %result
+}
+
+define amdgpu_ps half @lshr_i16_sv(i16 inreg %value, i16 %amount) {
+; GFX6-LABEL: lshr_i16_sv:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_mov_b32 s1, 0xffff
+; GFX6-NEXT:    v_and_b32_e32 v0, s1, v0
+; GFX6-NEXT:    s_and_b32 s0, s0, s1
+; GFX6-NEXT:    v_lshr_b32_e32 v0, s0, v0
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: lshr_i16_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_lshrrev_b16_e64 v0, v0, s0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: lshr_i16_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_lshrrev_b16_e64 v0, v0, s0
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = lshr i16 %value, %amount
+  %cast = bitcast i16 %result to half
+  ret half %cast
+}
+
+define amdgpu_ps half @lshr_i16_vs(i16 %value, i16 inreg %amount) {
+; GFX6-LABEL: lshr_i16_vs:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_mov_b32 s1, 0xffff
+; GFX6-NEXT:    s_and_b32 s0, s0, s1
+; GFX6-NEXT:    v_and_b32_e32 v0, s1, v0
+; GFX6-NEXT:    v_lshrrev_b32_e32 v0, s0, v0
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: lshr_i16_vs:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_lshrrev_b16_e32 v0, s0, v0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: lshr_i16_vs:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_lshrrev_b16_e32 v0, s0, v0
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = lshr i16 %value, %amount
+  %cast = bitcast i16 %result to half
+  ret half %cast
+}
+
+define <2 x i16> @v_lshr_v2i16(<2 x i16> %value, <2 x i16> %amount) {
+; GFX6-LABEL: v_lshr_v2i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b32 s4, 0xffff
+; GFX6-NEXT:    v_and_b32_e32 v2, s4, v2
+; GFX6-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT:    v_lshrrev_b32_e32 v0, v2, v0
+; GFX6-NEXT:    v_and_b32_e32 v2, s4, v3
+; GFX6-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v1, v2, v1
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_lshr_v2i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshrrev_b16_e32 v2, v1, v0
+; GFX8-NEXT:    v_lshrrev_b16_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v0, v2, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_lshr_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_lshrrev_b16 v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr <2 x i16> %value, %amount
+  ret <2 x i16> %result
+}
+
+define <2 x i16> @v_lshr_v2i16_15(<2 x i16> %value) {
+; GFX6-LABEL: v_lshr_v2i16_15:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b32 s4, 0xffff
+; GFX6-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v0, 15, v0
+; GFX6-NEXT:    v_lshrrev_b32_e32 v1, 15, v1
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_lshr_v2i16_15:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v2, 15
+; GFX8-NEXT:    v_lshrrev_b16_e32 v1, 15, v0
+; GFX8-NEXT:    v_lshrrev_b16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_lshr_v2i16_15:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    s_pack_ll_b32_b16 s4, 15, 15
+; GFX9-NEXT:    v_pk_lshrrev_b16 v0, s4, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr <2 x i16> %value, <i16 15, i16 15>
+  ret <2 x i16> %result
+}
+
+define amdgpu_ps i32 @s_lshr_v2i16(<2 x i16> inreg %value, <2 x i16> inreg %amount) {
+; GFX6-LABEL: s_lshr_v2i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_mov_b32 s4, 0xffff
+; GFX6-NEXT:    s_and_b32 s2, s2, s4
+; GFX6-NEXT:    s_and_b32 s0, s0, s4
+; GFX6-NEXT:    s_lshr_b32 s0, s0, s2
+; GFX6-NEXT:    s_and_b32 s2, s3, s4
+; GFX6-NEXT:    s_and_b32 s1, s1, s4
+; GFX6-NEXT:    s_lshr_b32 s1, s1, s2
+; GFX6-NEXT:    s_and_b32 s1, s1, s4
+; GFX6-NEXT:    s_and_b32 s0, s0, s4
+; GFX6-NEXT:    s_lshl_b32 s1, s1, 16
+; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_lshr_v2i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshr_b32 s2, s0, 16
+; GFX8-NEXT:    s_mov_b32 s3, 0xffff
+; GFX8-NEXT:    s_lshr_b32 s4, s1, 16
+; GFX8-NEXT:    s_and_b32 s0, s0, s3
+; GFX8-NEXT:    s_and_b32 s1, s1, s3
+; GFX8-NEXT:    s_and_b32 s2, s2, s3
+; GFX8-NEXT:    s_and_b32 s4, s4, s3
+; GFX8-NEXT:    s_lshr_b32 s0, s0, s1
+; GFX8-NEXT:    s_lshr_b32 s1, s2, s4
+; GFX8-NEXT:    s_lshl_b32 s1, s1, 16
+; GFX8-NEXT:    s_and_b32 s0, s0, s3
+; GFX8-NEXT:    s_or_b32 s0, s1, s0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_lshr_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_lshr_b32 s2, s0, 16
+; GFX9-NEXT:    s_lshr_b32 s3, s1, 16
+; GFX9-NEXT:    s_lshr_b32 s0, s0, s1
+; GFX9-NEXT:    s_lshr_b32 s1, s2, s3
+; GFX9-NEXT:    s_pack_ll_b32_b16 s0, s0, s1
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = lshr <2 x i16> %value, %amount
+  %cast = bitcast <2 x i16> %result to i32
+  ret i32 %cast
+}
+
+define amdgpu_ps float @lshr_v2i16_sv(<2 x i16> inreg %value, <2 x i16> %amount) {
+; GFX6-LABEL: lshr_v2i16_sv:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_mov_b32 s2, 0xffff
+; GFX6-NEXT:    v_and_b32_e32 v0, s2, v0
+; GFX6-NEXT:    s_and_b32 s0, s0, s2
+; GFX6-NEXT:    v_lshr_b32_e32 v0, s0, v0
+; GFX6-NEXT:    v_and_b32_e32 v1, s2, v1
+; GFX6-NEXT:    s_and_b32 s0, s1, s2
+; GFX6-NEXT:    v_lshr_b32_e32 v1, s0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, s2, v1
+; GFX6-NEXT:    v_and_b32_e32 v0, s2, v0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: lshr_v2i16_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshr_b32 s1, s0, 16
+; GFX8-NEXT:    v_mov_b32_e32 v2, s1
+; GFX8-NEXT:    v_lshrrev_b16_e64 v1, v0, s0
+; GFX8-NEXT:    v_lshrrev_b16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: lshr_v2i16_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_pk_lshrrev_b16 v0, v0, s0
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = lshr <2 x i16> %value, %amount
+  %cast = bitcast <2 x i16> %result to float
+  ret float %cast
+}
+
+define amdgpu_ps float @lshr_v2i16_vs(<2 x i16> %value, <2 x i16> inreg %amount) {
+; GFX6-LABEL: lshr_v2i16_vs:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_mov_b32 s2, 0xffff
+; GFX6-NEXT:    s_and_b32 s0, s0, s2
+; GFX6-NEXT:    v_and_b32_e32 v0, s2, v0
+; GFX6-NEXT:    v_lshrrev_b32_e32 v0, s0, v0
+; GFX6-NEXT:    s_and_b32 s0, s1, s2
+; GFX6-NEXT:    v_and_b32_e32 v1, s2, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v1, s0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, s2, v1
+; GFX6-NEXT:    v_and_b32_e32 v0, s2, v0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: lshr_v2i16_vs:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshr_b32 s1, s0, 16
+; GFX8-NEXT:    v_mov_b32_e32 v2, s1
+; GFX8-NEXT:    v_lshrrev_b16_e32 v1, s0, v0
+; GFX8-NEXT:    v_lshrrev_b16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: lshr_v2i16_vs:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_pk_lshrrev_b16 v0, s0, v0
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = lshr <2 x i16> %value, %amount
+  %cast = bitcast <2 x i16> %result to float
+  ret float %cast
+}
+
+; FIXME
+; define <3 x i16> @v_lshr_v3i16(<3 x i16> %value, <3 x i16> %amount) {
+;   %result = lshr <3 x i16> %value, %amount
+;   ret <3 x i16> %result
+; }
+
+; define amdgpu_ps <3 x i16> @s_lshr_v3i16(<3 x i16> inreg %value, <3 x i16> inreg %amount) {
+;   %result = lshr <3 x i16> %value, %amount
+;   ret <3 x i16> %result
+; }
+
+define <2 x float> @v_lshr_v4i16(<4 x i16> %value, <4 x i16> %amount) {
+; GFX6-LABEL: v_lshr_v4i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b32 s4, 0xffff
+; GFX6-NEXT:    v_and_b32_e32 v4, s4, v4
+; GFX6-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT:    v_lshrrev_b32_e32 v0, v4, v0
+; GFX6-NEXT:    v_and_b32_e32 v4, s4, v5
+; GFX6-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v1, v4, v1
+; GFX6-NEXT:    v_and_b32_e32 v4, s4, v6
+; GFX6-NEXT:    v_and_b32_e32 v2, s4, v2
+; GFX6-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v2, v4, v2
+; GFX6-NEXT:    v_and_b32_e32 v4, s4, v7
+; GFX6-NEXT:    v_and_b32_e32 v3, s4, v3
+; GFX6-NEXT:    v_lshrrev_b32_e32 v3, v4, v3
+; GFX6-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT:    v_and_b32_e32 v2, s4, v3
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT:    v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_lshr_v4i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshrrev_b16_e32 v4, v2, v0
+; GFX8-NEXT:    v_lshrrev_b16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b16_e32 v2, v3, v1
+; GFX8-NEXT:    v_lshrrev_b16_sdwa v1, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v0, v4, v0
+; GFX8-NEXT:    v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_lshr_v4i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_lshrrev_b16 v0, v2, v0
+; GFX9-NEXT:    v_pk_lshrrev_b16 v1, v3, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr <4 x i16> %value, %amount
+  %cast = bitcast <4 x i16> %result to <2 x float>
+  ret <2 x float> %cast
+}
+
+define amdgpu_ps <2 x i32> @s_lshr_v4i16(<4 x i16> inreg %value, <4 x i16> inreg %amount) {
+; GFX6-LABEL: s_lshr_v4i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_mov_b32 s8, 0xffff
+; GFX6-NEXT:    s_and_b32 s4, s4, s8
+; GFX6-NEXT:    s_and_b32 s0, s0, s8
+; GFX6-NEXT:    s_lshr_b32 s0, s0, s4
+; GFX6-NEXT:    s_and_b32 s4, s5, s8
+; GFX6-NEXT:    s_and_b32 s1, s1, s8
+; GFX6-NEXT:    s_lshr_b32 s1, s1, s4
+; GFX6-NEXT:    s_and_b32 s4, s6, s8
+; GFX6-NEXT:    s_and_b32 s2, s2, s8
+; GFX6-NEXT:    s_and_b32 s1, s1, s8
+; GFX6-NEXT:    s_lshr_b32 s2, s2, s4
+; GFX6-NEXT:    s_and_b32 s4, s7, s8
+; GFX6-NEXT:    s_and_b32 s3, s3, s8
+; GFX6-NEXT:    s_lshr_b32 s3, s3, s4
+; GFX6-NEXT:    s_and_b32 s0, s0, s8
+; GFX6-NEXT:    s_lshl_b32 s1, s1, 16
+; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    s_and_b32 s1, s2, s8
+; GFX6-NEXT:    s_and_b32 s2, s3, s8
+; GFX6-NEXT:    s_lshl_b32 s2, s2, 16
+; GFX6-NEXT:    s_or_b32 s1, s1, s2
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_lshr_v4i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshr_b32 s4, s0, 16
+; GFX8-NEXT:    s_mov_b32 s6, 0xffff
+; GFX8-NEXT:    s_lshr_b32 s7, s2, 16
+; GFX8-NEXT:    s_lshr_b32 s5, s1, 16
+; GFX8-NEXT:    s_lshr_b32 s8, s3, 16
+; GFX8-NEXT:    s_and_b32 s0, s0, s6
+; GFX8-NEXT:    s_and_b32 s2, s2, s6
+; GFX8-NEXT:    s_and_b32 s4, s4, s6
+; GFX8-NEXT:    s_and_b32 s7, s7, s6
+; GFX8-NEXT:    s_lshr_b32 s0, s0, s2
+; GFX8-NEXT:    s_lshr_b32 s2, s4, s7
+; GFX8-NEXT:    s_and_b32 s1, s1, s6
+; GFX8-NEXT:    s_and_b32 s3, s3, s6
+; GFX8-NEXT:    s_and_b32 s5, s5, s6
+; GFX8-NEXT:    s_and_b32 s8, s8, s6
+; GFX8-NEXT:    s_lshr_b32 s1, s1, s3
+; GFX8-NEXT:    s_lshr_b32 s3, s5, s8
+; GFX8-NEXT:    s_lshl_b32 s2, s2, 16
+; GFX8-NEXT:    s_and_b32 s0, s0, s6
+; GFX8-NEXT:    s_or_b32 s0, s2, s0
+; GFX8-NEXT:    s_lshl_b32 s2, s3, 16
+; GFX8-NEXT:    s_and_b32 s1, s1, s6
+; GFX8-NEXT:    s_or_b32 s1, s2, s1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_lshr_v4i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_lshr_b32 s4, s0, 16
+; GFX9-NEXT:    s_lshr_b32 s5, s2, 16
+; GFX9-NEXT:    s_lshr_b32 s0, s0, s2
+; GFX9-NEXT:    s_lshr_b32 s2, s4, s5
+; GFX9-NEXT:    s_pack_ll_b32_b16 s0, s0, s2
+; GFX9-NEXT:    s_lshr_b32 s2, s1, 16
+; GFX9-NEXT:    s_lshr_b32 s4, s3, 16
+; GFX9-NEXT:    s_lshr_b32 s1, s1, s3
+; GFX9-NEXT:    s_lshr_b32 s2, s2, s4
+; GFX9-NEXT:    s_pack_ll_b32_b16 s1, s1, s2
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = lshr <4 x i16> %value, %amount
+  %cast = bitcast <4 x i16> %result to <2 x i32>
+  ret <2 x i32> %cast
+}
+
+; FIXME
+; define <5 x i16> @v_lshr_v5i16(<5 x i16> %value, <5 x i16> %amount) {
+;   %result = lshr <5 x i16> %value, %amount
+;   ret <5 x i16> %result
+; }
+
+; define amdgpu_ps <5 x i16> @s_lshr_v5i16(<5 x i16> inreg %value, <5 x i16> inreg %amount) {
+;   %result = lshr <5 x i16> %value, %amount
+;   ret <5 x i16> %result
+; }
+
+; define <3 x float> @v_lshr_v6i16(<6 x i16> %value, <6 x i16> %amount) {
+;   %result = lshr <6 x i16> %value, %amount
+;   %cast = bitcast <6 x i16> %result to <3 x float>
+;   ret <3 x float> %cast
+; }
+
+; define amdgpu_ps <3 x i32> @s_lshr_v6i16(<6 x i16> inreg %value, <6 x i16> inreg %amount) {
+;   %result = lshr <6 x i16> %value, %amount
+;   %cast = bitcast <6 x i16> %result to <3 x i32>
+;   ret <3 x i32> %cast
+; }
+
+define <4 x float> @v_lshr_v8i16(<8 x i16> %value, <8 x i16> %amount) {
+; GFX6-LABEL: v_lshr_v8i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b32 s4, 0xffff
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v8
+; GFX6-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT:    v_lshrrev_b32_e32 v0, v8, v0
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v9
+; GFX6-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v1, v8, v1
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v10
+; GFX6-NEXT:    v_and_b32_e32 v2, s4, v2
+; GFX6-NEXT:    v_lshrrev_b32_e32 v2, v8, v2
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v11
+; GFX6-NEXT:    v_and_b32_e32 v3, s4, v3
+; GFX6-NEXT:    v_mov_b32_e32 v16, 0xffff
+; GFX6-NEXT:    v_lshrrev_b32_e32 v3, v8, v3
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v12
+; GFX6-NEXT:    v_and_b32_e32 v4, s4, v4
+; GFX6-NEXT:    v_and_b32_e32 v1, v1, v16
+; GFX6-NEXT:    v_lshrrev_b32_e32 v4, v8, v4
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v13
+; GFX6-NEXT:    v_and_b32_e32 v5, s4, v5
+; GFX6-NEXT:    v_lshrrev_b32_e32 v5, v8, v5
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v14
+; GFX6-NEXT:    v_and_b32_e32 v6, s4, v6
+; GFX6-NEXT:    v_and_b32_e32 v0, v0, v16
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v6, v8, v6
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, v2, v16
+; GFX6-NEXT:    v_and_b32_e32 v2, v3, v16
+; GFX6-NEXT:    v_and_b32_e32 v8, v15, v16
+; GFX6-NEXT:    v_and_b32_e32 v7, v7, v16
+; GFX6-NEXT:    v_and_b32_e32 v3, v5, v16
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT:    v_lshrrev_b32_e32 v7, v8, v7
+; GFX6-NEXT:    v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT:    v_and_b32_e32 v2, v4, v16
+; GFX6-NEXT:    v_and_b32_e32 v4, v7, v16
+; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT:    v_or_b32_e32 v2, v2, v3
+; GFX6-NEXT:    v_and_b32_e32 v3, v6, v16
+; GFX6-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT:    v_or_b32_e32 v3, v3, v4
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_lshr_v8i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshrrev_b16_e32 v8, v4, v0
+; GFX8-NEXT:    v_lshrrev_b16_sdwa v0, v4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_lshrrev_b16_e32 v4, v5, v1
+; GFX8-NEXT:    v_lshrrev_b16_sdwa v1, v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v1, v4, v1
+; GFX8-NEXT:    v_lshrrev_b16_e32 v4, v6, v2
+; GFX8-NEXT:    v_lshrrev_b16_sdwa v2, v6, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT:    v_lshrrev_b16_e32 v4, v7, v3
+; GFX8-NEXT:    v_lshrrev_b16_sdwa v3, v7, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v0, v8, v0
+; GFX8-NEXT:    v_or_b32_e32 v3, v4, v3
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_lshr_v8i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_lshrrev_b16 v0, v4, v0
+; GFX9-NEXT:    v_pk_lshrrev_b16 v1, v5, v1
+; GFX9-NEXT:    v_pk_lshrrev_b16 v2, v6, v2
+; GFX9-NEXT:    v_pk_lshrrev_b16 v3, v7, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr <8 x i16> %value, %amount
+  %cast = bitcast <8 x i16> %result to <4 x float>
+  ret <4 x float> %cast
+}
+
+define amdgpu_ps <4 x i32> @s_lshr_v8i16(<8 x i16> inreg %value, <8 x i16> inreg %amount) {
+; GFX6-LABEL: s_lshr_v8i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_mov_b32 s16, 0xffff
+; GFX6-NEXT:    s_and_b32 s8, s8, s16
+; GFX6-NEXT:    s_and_b32 s0, s0, s16
+; GFX6-NEXT:    s_lshr_b32 s0, s0, s8
+; GFX6-NEXT:    s_and_b32 s8, s9, s16
+; GFX6-NEXT:    s_and_b32 s1, s1, s16
+; GFX6-NEXT:    s_lshr_b32 s1, s1, s8
+; GFX6-NEXT:    s_and_b32 s8, s10, s16
+; GFX6-NEXT:    s_and_b32 s2, s2, s16
+; GFX6-NEXT:    s_lshr_b32 s2, s2, s8
+; GFX6-NEXT:    s_and_b32 s8, s11, s16
+; GFX6-NEXT:    s_and_b32 s3, s3, s16
+; GFX6-NEXT:    s_lshr_b32 s3, s3, s8
+; GFX6-NEXT:    s_and_b32 s8, s12, s16
+; GFX6-NEXT:    s_and_b32 s4, s4, s16
+; GFX6-NEXT:    s_and_b32 s1, s1, s16
+; GFX6-NEXT:    s_lshr_b32 s4, s4, s8
+; GFX6-NEXT:    s_and_b32 s8, s13, s16
+; GFX6-NEXT:    s_and_b32 s5, s5, s16
+; GFX6-NEXT:    s_lshr_b32 s5, s5, s8
+; GFX6-NEXT:    s_and_b32 s8, s14, s16
+; GFX6-NEXT:    s_and_b32 s6, s6, s16
+; GFX6-NEXT:    s_and_b32 s0, s0, s16
+; GFX6-NEXT:    s_lshl_b32 s1, s1, 16
+; GFX6-NEXT:    s_lshr_b32 s6, s6, s8
+; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    s_and_b32 s1, s2, s16
+; GFX6-NEXT:    s_and_b32 s2, s3, s16
+; GFX6-NEXT:    s_and_b32 s8, s15, s16
+; GFX6-NEXT:    s_and_b32 s7, s7, s16
+; GFX6-NEXT:    s_and_b32 s3, s5, s16
+; GFX6-NEXT:    s_lshl_b32 s2, s2, 16
+; GFX6-NEXT:    s_lshr_b32 s7, s7, s8
+; GFX6-NEXT:    s_or_b32 s1, s1, s2
+; GFX6-NEXT:    s_and_b32 s2, s4, s16
+; GFX6-NEXT:    s_and_b32 s4, s7, s16
+; GFX6-NEXT:    s_lshl_b32 s3, s3, 16
+; GFX6-NEXT:    s_or_b32 s2, s2, s3
+; GFX6-NEXT:    s_and_b32 s3, s6, s16
+; GFX6-NEXT:    s_lshl_b32 s4, s4, 16
+; GFX6-NEXT:    s_or_b32 s3, s3, s4
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_lshr_v8i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshr_b32 s8, s0, 16
+; GFX8-NEXT:    s_mov_b32 s12, 0xffff
+; GFX8-NEXT:    s_lshr_b32 s13, s4, 16
+; GFX8-NEXT:    s_lshr_b32 s9, s1, 16
+; GFX8-NEXT:    s_lshr_b32 s14, s5, 16
+; GFX8-NEXT:    s_and_b32 s0, s0, s12
+; GFX8-NEXT:    s_and_b32 s4, s4, s12
+; GFX8-NEXT:    s_and_b32 s8, s8, s12
+; GFX8-NEXT:    s_and_b32 s13, s13, s12
+; GFX8-NEXT:    s_lshr_b32 s10, s2, 16
+; GFX8-NEXT:    s_lshr_b32 s15, s6, 16
+; GFX8-NEXT:    s_lshr_b32 s0, s0, s4
+; GFX8-NEXT:    s_lshr_b32 s4, s8, s13
+; GFX8-NEXT:    s_and_b32 s1, s1, s12
+; GFX8-NEXT:    s_and_b32 s5, s5, s12
+; GFX8-NEXT:    s_and_b32 s9, s9, s12
+; GFX8-NEXT:    s_and_b32 s14, s14, s12
+; GFX8-NEXT:    s_lshr_b32 s11, s3, 16
+; GFX8-NEXT:    s_lshr_b32 s16, s7, 16
+; GFX8-NEXT:    s_lshr_b32 s1, s1, s5
+; GFX8-NEXT:    s_and_b32 s2, s2, s12
+; GFX8-NEXT:    s_and_b32 s6, s6, s12
+; GFX8-NEXT:    s_and_b32 s10, s10, s12
+; GFX8-NEXT:    s_and_b32 s15, s15, s12
+; GFX8-NEXT:    s_lshr_b32 s5, s9, s14
+; GFX8-NEXT:    s_lshl_b32 s4, s4, 16
+; GFX8-NEXT:    s_and_b32 s0, s0, s12
+; GFX8-NEXT:    s_lshr_b32 s2, s2, s6
+; GFX8-NEXT:    s_or_b32 s0, s4, s0
+; GFX8-NEXT:    s_and_b32 s3, s3, s12
+; GFX8-NEXT:    s_and_b32 s7, s7, s12
+; GFX8-NEXT:    s_and_b32 s11, s11, s12
+; GFX8-NEXT:    s_and_b32 s16, s16, s12
+; GFX8-NEXT:    s_lshr_b32 s6, s10, s15
+; GFX8-NEXT:    s_lshl_b32 s4, s5, 16
+; GFX8-NEXT:    s_and_b32 s1, s1, s12
+; GFX8-NEXT:    s_lshr_b32 s3, s3, s7
+; GFX8-NEXT:    s_or_b32 s1, s4, s1
+; GFX8-NEXT:    s_lshr_b32 s7, s11, s16
+; GFX8-NEXT:    s_lshl_b32 s4, s6, 16
+; GFX8-NEXT:    s_and_b32 s2, s2, s12
+; GFX8-NEXT:    s_or_b32 s2, s4, s2
+; GFX8-NEXT:    s_lshl_b32 s4, s7, 16
+; GFX8-NEXT:    s_and_b32 s3, s3, s12
+; GFX8-NEXT:    s_or_b32 s3, s4, s3
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_lshr_v8i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_lshr_b32 s8, s0, 16
+; GFX9-NEXT:    s_lshr_b32 s9, s4, 16
+; GFX9-NEXT:    s_lshr_b32 s0, s0, s4
+; GFX9-NEXT:    s_lshr_b32 s4, s8, s9
+; GFX9-NEXT:    s_pack_ll_b32_b16 s0, s0, s4
+; GFX9-NEXT:    s_lshr_b32 s4, s1, 16
+; GFX9-NEXT:    s_lshr_b32 s8, s5, 16
+; GFX9-NEXT:    s_lshr_b32 s1, s1, s5
+; GFX9-NEXT:    s_lshr_b32 s4, s4, s8
+; GFX9-NEXT:    s_pack_ll_b32_b16 s1, s1, s4
+; GFX9-NEXT:    s_lshr_b32 s4, s2, 16
+; GFX9-NEXT:    s_lshr_b32 s5, s6, 16
+; GFX9-NEXT:    s_lshr_b32 s4, s4, s5
+; GFX9-NEXT:    s_lshr_b32 s2, s2, s6
+; GFX9-NEXT:    s_pack_ll_b32_b16 s2, s2, s4
+; GFX9-NEXT:    s_lshr_b32 s4, s3, 16
+; GFX9-NEXT:    s_lshr_b32 s5, s7, 16
+; GFX9-NEXT:    s_lshr_b32 s3, s3, s7
+; GFX9-NEXT:    s_lshr_b32 s4, s4, s5
+; GFX9-NEXT:    s_pack_ll_b32_b16 s3, s3, s4
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = lshr <8 x i16> %value, %amount
+  %cast = bitcast <8 x i16> %result to <4 x i32>
+  ret <4 x i32> %cast
+}
+
+define i64 @v_lshr_i64(i64 %value, i64 %amount) {
+; GFX6-LABEL: v_lshr_i64:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_lshr_b64 v[0:1], v[0:1], v2
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_lshr_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshrrev_b64 v[0:1], v2, v[0:1]
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_lshr_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_lshrrev_b64 v[0:1], v2, v[0:1]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr i64 %value, %amount
+  ret i64 %result
+}
+
+define i64 @v_lshr_i64_63(i64 %value) {
+; GCN-LABEL: v_lshr_i64_63:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshrrev_b32_e32 v0, 31, v1
+; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr i64 %value, 63
+  ret i64 %result
+}
+
+define i64 @v_lshr_i64_33(i64 %value) {
+; GCN-LABEL: v_lshr_i64_33:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshrrev_b32_e32 v0, 1, v1
+; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr i64 %value, 33
+  ret i64 %result
+}
+
+define i64 @v_lshr_i64_32(i64 %value) {
+; GCN-LABEL: v_lshr_i64_32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, v1
+; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr i64 %value, 32
+  ret i64 %result
+}
+
+define i64 @v_lshr_i64_31(i64 %value) {
+; GFX6-LABEL: v_lshr_i64_31:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_lshr_b64 v[0:1], v[0:1], 31
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_lshr_i64_31:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshrrev_b64 v[0:1], 31, v[0:1]
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_lshr_i64_31:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_lshrrev_b64 v[0:1], 31, v[0:1]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr i64 %value, 31
+  ret i64 %result
+}
+
+define amdgpu_ps i64 @s_lshr_i64(i64 inreg %value, i64 inreg %amount) {
+; GCN-LABEL: s_lshr_i64:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshr_b64 s[0:1], s[0:1], s2
+; GCN-NEXT:    ; return to shader part epilog
+  %result = lshr i64 %value, %amount
+  ret i64 %result
+}
+
+define amdgpu_ps i64 @s_lshr_i64_63(i64 inreg %value) {
+; GCN-LABEL: s_lshr_i64_63:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshr_b32 s0, s1, 31
+; GCN-NEXT:    s_mov_b32 s1, 0
+; GCN-NEXT:    ; return to shader part epilog
+  %result = lshr i64 %value, 63
+  ret i64 %result
+}
+
+define amdgpu_ps i64 @s_lshr_i64_33(i64 inreg %value) {
+; GCN-LABEL: s_lshr_i64_33:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshr_b32 s0, s1, 1
+; GCN-NEXT:    s_mov_b32 s1, 0
+; GCN-NEXT:    ; return to shader part epilog
+  %result = lshr i64 %value, 33
+  ret i64 %result
+}
+
+define amdgpu_ps i64 @s_lshr_i64_32(i64 inreg %value) {
+; GCN-LABEL: s_lshr_i64_32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_mov_b32 s0, s1
+; GCN-NEXT:    s_mov_b32 s1, 0
+; GCN-NEXT:    ; return to shader part epilog
+  %result = lshr i64 %value, 32
+  ret i64 %result
+}
+
+define amdgpu_ps i64 @s_lshr_i64_31(i64 inreg %value) {
+; GCN-LABEL: s_lshr_i64_31:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshr_b64 s[0:1], s[0:1], 31
+; GCN-NEXT:    ; return to shader part epilog
+  %result = lshr i64 %value, 31
+  ret i64 %result
+}
+
+define amdgpu_ps <2 x float> @lshr_i64_sv(i64 inreg %value, i64 %amount) {
+; GFX6-LABEL: lshr_i64_sv:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    v_lshr_b64 v[0:1], s[0:1], v0
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: lshr_i64_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_lshrrev_b64 v[0:1], v0, s[0:1]
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: lshr_i64_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_lshrrev_b64 v[0:1], v0, s[0:1]
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = lshr i64 %value, %amount
+  %cast = bitcast i64 %result to <2 x float>
+  ret <2 x float> %cast
+}
+
+define amdgpu_ps <2 x float> @lshr_i64_vs(i64 %value, i64 inreg %amount) {
+; GFX6-LABEL: lshr_i64_vs:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    v_lshr_b64 v[0:1], v[0:1], s0
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: lshr_i64_vs:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_lshrrev_b64 v[0:1], s0, v[0:1]
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: lshr_i64_vs:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_lshrrev_b64 v[0:1], s0, v[0:1]
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = lshr i64 %value, %amount
+  %cast = bitcast i64 %result to <2 x float>
+  ret <2 x float> %cast
+}
+
+define <2 x i64> @v_lshr_v2i64(<2 x i64> %value, <2 x i64> %amount) {
+; GFX6-LABEL: v_lshr_v2i64:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_lshr_b64 v[0:1], v[0:1], v4
+; GFX6-NEXT:    v_lshr_b64 v[2:3], v[2:3], v6
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_lshr_v2i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshrrev_b64 v[0:1], v4, v[0:1]
+; GFX8-NEXT:    v_lshrrev_b64 v[2:3], v6, v[2:3]
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_lshr_v2i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_lshrrev_b64 v[0:1], v4, v[0:1]
+; GFX9-NEXT:    v_lshrrev_b64 v[2:3], v6, v[2:3]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr <2 x i64> %value, %amount
+  ret <2 x i64> %result
+}
+
+define <2 x i64> @v_lshr_v2i64_31(<2 x i64> %value) {
+; GFX6-LABEL: v_lshr_v2i64_31:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_lshr_b64 v[0:1], v[0:1], 31
+; GFX6-NEXT:    v_lshr_b64 v[2:3], v[2:3], 31
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_lshr_v2i64_31:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshrrev_b64 v[0:1], 31, v[0:1]
+; GFX8-NEXT:    v_lshrrev_b64 v[2:3], 31, v[2:3]
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_lshr_v2i64_31:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_lshrrev_b64 v[0:1], 31, v[0:1]
+; GFX9-NEXT:    v_lshrrev_b64 v[2:3], 31, v[2:3]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = lshr <2 x i64> %value, <i64 31, i64 31>
+  ret <2 x i64> %result
+}
+
+define amdgpu_ps <2 x i64> @s_lshr_v2i64(<2 x i64> inreg %value, <2 x i64> inreg %amount) {
+; GCN-LABEL: s_lshr_v2i64:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshr_b64 s[0:1], s[0:1], s4
+; GCN-NEXT:    s_lshr_b64 s[2:3], s[2:3], s6
+; GCN-NEXT:    ; return to shader part epilog
+  %result = lshr <2 x i64> %value, %amount
+  ret <2 x i64> %result
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-ashr.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-ashr.mir
index c75ad93ad11b..bdfc6a20c940 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-ashr.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-ashr.mir
@@ -1,69 +1,258 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
-# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -run-pass=regbankselect -regbankselect-fast -o -  %s | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -run-pass=regbankselect -regbankselect-greedy -o - %s | FileCheck %s
 
 ---
-name: ashr_ss
+name: ashr_s32_ss
 legalized: true
 
 body: |
   bb.0:
     liveins: $sgpr0, $sgpr1
-    ; CHECK-LABEL: name: ashr_ss
+    ; CHECK-LABEL: name: ashr_s32_ss
     ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
     ; CHECK: [[ASHR:%[0-9]+]]:sgpr(s32) = G_ASHR [[COPY]], [[COPY1]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[ASHR]](s32)
     %0:_(s32) = COPY $sgpr0
     %1:_(s32) = COPY $sgpr1
     %2:_(s32) = G_ASHR %0, %1
+    S_ENDPGM 0, implicit %2
 ...
 
 ---
-name: ashr_sv
+name: ashr_s32_sv
 legalized: true
 
 body: |
   bb.0:
     liveins: $sgpr0, $vgpr0
-    ; CHECK-LABEL: name: ashr_sv
+    ; CHECK-LABEL: name: ashr_s32_sv
     ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
     ; CHECK: [[ASHR:%[0-9]+]]:vgpr(s32) = G_ASHR [[COPY2]], [[COPY1]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[ASHR]](s32)
     %0:_(s32) = COPY $sgpr0
     %1:_(s32) = COPY $vgpr0
     %2:_(s32) = G_ASHR %0, %1
+    S_ENDPGM 0, implicit %2
 ...
 
 ---
-name: ashr_vs
+name: ashr_s32_vs
 legalized: true
 
 body: |
   bb.0:
     liveins: $sgpr0, $vgpr0
-    ; CHECK-LABEL: name: ashr_vs
+    ; CHECK-LABEL: name: ashr_s32_vs
     ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
     ; CHECK: [[ASHR:%[0-9]+]]:vgpr(s32) = G_ASHR [[COPY]], [[COPY2]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[ASHR]](s32)
     %0:_(s32) = COPY $vgpr0
     %1:_(s32) = COPY $sgpr0
     %2:_(s32) = G_ASHR %0, %1
+    S_ENDPGM 0, implicit %2
 ...
 
 ---
-name: ashr_vv
+name: ashr_s32_vv
 legalized: true
 
 body: |
   bb.0:
     liveins: $vgpr0, $vgpr1
-    ; CHECK-LABEL: name: ashr_vv
+    ; CHECK-LABEL: name: ashr_s32_vv
     ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
     ; CHECK: [[ASHR:%[0-9]+]]:vgpr(s32) = G_ASHR [[COPY]], [[COPY1]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[ASHR]](s32)
     %0:_(s32) = COPY $vgpr0
     %1:_(s32) = COPY $vgpr1
     %2:_(s32) = G_ASHR %0, %1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name: ashr_s16_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: ashr_s16_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK: [[SEXT:%[0-9]+]]:sgpr(s32) = G_SEXT [[TRUNC]](s16)
+    ; CHECK: [[ZEXT:%[0-9]+]]:sgpr(s32) = G_ZEXT [[TRUNC1]](s16)
+    ; CHECK: [[ASHR:%[0-9]+]]:sgpr(s32) = G_ASHR [[SEXT]], [[ZEXT]](s32)
+    ; CHECK: [[TRUNC2:%[0-9]+]]:sgpr(s16) = G_TRUNC [[ASHR]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[TRUNC2]](s16)
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    %4:_(s16) = G_ASHR %2, %3
+    S_ENDPGM 0, implicit %4
+...
+
+---
+name: ashr_s16_sv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+
+    ; CHECK-LABEL: name: ashr_s16_sv
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC]](s16)
+    ; CHECK: [[ASHR:%[0-9]+]]:vgpr(s16) = G_ASHR [[COPY2]], [[TRUNC1]](s16)
+    ; CHECK: S_ENDPGM 0, implicit [[ASHR]](s16)
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $vgpr0
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    %4:_(s16) = G_ASHR %2, %3
+    S_ENDPGM 0, implicit %4
+...
+
+---
+name: ashr_s16_vs
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: ashr_s16_vs
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC1]](s16)
+    ; CHECK: [[ASHR:%[0-9]+]]:vgpr(s16) = G_ASHR [[TRUNC]], [[COPY2]](s16)
+    ; CHECK: S_ENDPGM 0, implicit [[ASHR]](s16)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $sgpr0
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    %4:_(s16) = G_ASHR %2, %3
+    S_ENDPGM 0, implicit %4
+
+...
+
+---
+name: ashr_s16_vv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+    ; CHECK-LABEL: name: ashr_s16_vv
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK: [[ASHR:%[0-9]+]]:vgpr(s16) = G_ASHR [[TRUNC]], [[TRUNC1]](s16)
+    ; CHECK: S_ENDPGM 0, implicit [[ASHR]](s16)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    %4:_(s16) = G_ASHR %2, %3
+    S_ENDPGM 0, implicit %4
+
+...
+
+---
+name: ashr_v2s16_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: ashr_v2s16_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr1
+    ; CHECK: [[BITCAST:%[0-9]+]]:sgpr(s32) = G_BITCAST [[COPY]](<2 x s16>)
+    ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
+    ; CHECK: [[LSHR:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+    ; CHECK: [[BITCAST1:%[0-9]+]]:sgpr(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+    ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
+    ; CHECK: [[LSHR1:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST1]], [[C1]](s32)
+    ; CHECK: [[ASHR:%[0-9]+]]:sgpr(s32) = G_ASHR [[BITCAST]], [[BITCAST1]](s32)
+    ; CHECK: [[ASHR1:%[0-9]+]]:sgpr(s32) = G_ASHR [[LSHR]], [[LSHR1]](s32)
+    ; CHECK: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:sgpr(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ASHR]](s32), [[ASHR1]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+    %0:_(<2 x s16>) = COPY $sgpr0
+    %1:_(<2 x s16>) = COPY $sgpr1
+    %2:_(<2 x s16>) = G_ASHR %0, %1
+    S_ENDPGM 0, implicit %2
+
+...
+
+---
+name: ashr_v2s16_sv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: ashr_v2s16_sv
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[COPY]](<2 x s16>)
+    ; CHECK: [[ASHR:%[0-9]+]]:vgpr(<2 x s16>) = G_ASHR [[COPY2]], [[COPY1]](<2 x s16>)
+    ; CHECK: S_ENDPGM 0, implicit [[ASHR]](<2 x s16>)
+    %0:_(<2 x s16>) = COPY $sgpr0
+    %1:_(<2 x s16>) = COPY $vgpr0
+    %2:_(<2 x s16>) = G_ASHR %0, %1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name: ashr_v2s16_vs
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: ashr_v2s16_vs
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[COPY1]](<2 x s16>)
+    ; CHECK: [[ASHR:%[0-9]+]]:vgpr(<2 x s16>) = G_ASHR [[COPY]], [[COPY2]](<2 x s16>)
+    ; CHECK: S_ENDPGM 0, implicit [[ASHR]](<2 x s16>)
+    %0:_(<2 x s16>) = COPY $vgpr0
+    %1:_(<2 x s16>) = COPY $sgpr0
+    %2:_(<2 x s16>) = G_ASHR %0, %1
+    S_ENDPGM 0, implicit %2
+
+...
+
+---
+name: ashr_v2s16_vv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+    ; CHECK-LABEL: name: ashr_v2s16_vv
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr1
+    ; CHECK: [[ASHR:%[0-9]+]]:vgpr(<2 x s16>) = G_ASHR [[COPY]], [[COPY1]](<2 x s16>)
+    ; CHECK: S_ENDPGM 0, implicit [[ASHR]](<2 x s16>)
+    %0:_(<2 x s16>) = COPY $vgpr0
+    %1:_(<2 x s16>) = COPY $vgpr1
+    %2:_(<2 x s16>) = G_ASHR %0, %1
+    S_ENDPGM 0, implicit %2
+
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-lshr.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-lshr.mir
index 666b6145b3df..e5a2f561772f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-lshr.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-lshr.mir
@@ -1,66 +1,258 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
-# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -run-pass=regbankselect -regbankselect-fast -o - %s | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -run-pass=regbankselect -regbankselect-greedy -o - %s | FileCheck %s
 
 ---
-name: lshr_ss
+name: lshr_s32_ss
 legalized: true
 
 body: |
   bb.0:
     liveins: $sgpr0, $sgpr1
-    ; CHECK-LABEL: name: lshr_ss
+    ; CHECK-LABEL: name: lshr_s32_ss
     ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
     ; CHECK: [[LSHR:%[0-9]+]]:sgpr(s32) = G_LSHR [[COPY]], [[COPY1]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[LSHR]](s32)
     %0:_(s32) = COPY $sgpr0
     %1:_(s32) = COPY $sgpr1
     %2:_(s32) = G_LSHR %0, %1
+    S_ENDPGM 0, implicit %2
 ...
+
 ---
-name: lshr_sv
+name: lshr_s32_sv
 legalized: true
 
 body: |
   bb.0:
     liveins: $sgpr0, $vgpr0
-    ; CHECK-LABEL: name: lshr_sv
+    ; CHECK-LABEL: name: lshr_s32_sv
     ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
     ; CHECK: [[LSHR:%[0-9]+]]:vgpr(s32) = G_LSHR [[COPY2]], [[COPY1]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[LSHR]](s32)
     %0:_(s32) = COPY $sgpr0
     %1:_(s32) = COPY $vgpr0
     %2:_(s32) = G_LSHR %0, %1
+    S_ENDPGM 0, implicit %2
 ...
+
 ---
-name: lshr_vs
+name: lshr_s32_vs
 legalized: true
 
 body: |
   bb.0:
     liveins: $sgpr0, $vgpr0
-    ; CHECK-LABEL: name: lshr_vs
+    ; CHECK-LABEL: name: lshr_s32_vs
     ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
     ; CHECK: [[LSHR:%[0-9]+]]:vgpr(s32) = G_LSHR [[COPY]], [[COPY2]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[LSHR]](s32)
     %0:_(s32) = COPY $vgpr0
     %1:_(s32) = COPY $sgpr0
     %2:_(s32) = G_LSHR %0, %1
+    S_ENDPGM 0, implicit %2
 ...
+
 ---
-name: lshr_vv
+name: lshr_s32_vv
 legalized: true
 
 body: |
   bb.0:
     liveins: $vgpr0, $vgpr1
-    ; CHECK-LABEL: name: lshr_vv
+    ; CHECK-LABEL: name: lshr_s32_vv
     ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
     ; CHECK: [[LSHR:%[0-9]+]]:vgpr(s32) = G_LSHR [[COPY]], [[COPY1]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[LSHR]](s32)
     %0:_(s32) = COPY $vgpr0
     %1:_(s32) = COPY $vgpr1
     %2:_(s32) = G_LSHR %0, %1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name: lshr_s16_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: lshr_s16_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK: [[ZEXT:%[0-9]+]]:sgpr(s32) = G_ZEXT [[TRUNC]](s16)
+    ; CHECK: [[ZEXT1:%[0-9]+]]:sgpr(s32) = G_ZEXT [[TRUNC1]](s16)
+    ; CHECK: [[LSHR:%[0-9]+]]:sgpr(s32) = G_LSHR [[ZEXT]], [[ZEXT1]](s32)
+    ; CHECK: [[TRUNC2:%[0-9]+]]:sgpr(s16) = G_TRUNC [[LSHR]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[TRUNC2]](s16)
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    %4:_(s16) = G_LSHR %2, %3
+    S_ENDPGM 0, implicit %4
+...
+
+---
+name: lshr_s16_sv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+
+    ; CHECK-LABEL: name: lshr_s16_sv
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC]](s16)
+    ; CHECK: [[LSHR:%[0-9]+]]:vgpr(s16) = G_LSHR [[COPY2]], [[TRUNC1]](s16)
+    ; CHECK: S_ENDPGM 0, implicit [[LSHR]](s16)
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $vgpr0
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    %4:_(s16) = G_LSHR %2, %3
+    S_ENDPGM 0, implicit %4
+...
+
+---
+name: lshr_s16_vs
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: lshr_s16_vs
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC1]](s16)
+    ; CHECK: [[LSHR:%[0-9]+]]:vgpr(s16) = G_LSHR [[TRUNC]], [[COPY2]](s16)
+    ; CHECK: S_ENDPGM 0, implicit [[LSHR]](s16)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $sgpr0
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    %4:_(s16) = G_LSHR %2, %3
+    S_ENDPGM 0, implicit %4
+
+...
+
+---
+name: lshr_s16_vv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+    ; CHECK-LABEL: name: lshr_s16_vv
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK: [[LSHR:%[0-9]+]]:vgpr(s16) = G_LSHR [[TRUNC]], [[TRUNC1]](s16)
+    ; CHECK: S_ENDPGM 0, implicit [[LSHR]](s16)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    %4:_(s16) = G_LSHR %2, %3
+    S_ENDPGM 0, implicit %4
+
+...
+
+---
+name: lshr_v2s16_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: lshr_v2s16_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr1
+    ; CHECK: [[BITCAST:%[0-9]+]]:sgpr(s32) = G_BITCAST [[COPY]](<2 x s16>)
+    ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
+    ; CHECK: [[LSHR:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+    ; CHECK: [[BITCAST1:%[0-9]+]]:sgpr(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+    ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
+    ; CHECK: [[LSHR1:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST1]], [[C1]](s32)
+    ; CHECK: [[LSHR2:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST]], [[BITCAST1]](s32)
+    ; CHECK: [[LSHR3:%[0-9]+]]:sgpr(s32) = G_LSHR [[LSHR]], [[LSHR1]](s32)
+    ; CHECK: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:sgpr(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[LSHR2]](s32), [[LSHR3]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+    %0:_(<2 x s16>) = COPY $sgpr0
+    %1:_(<2 x s16>) = COPY $sgpr1
+    %2:_(<2 x s16>) = G_LSHR %0, %1
+    S_ENDPGM 0, implicit %2
+
+...
+
+---
+name: lshr_v2s16_sv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: lshr_v2s16_sv
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[COPY]](<2 x s16>)
+    ; CHECK: [[LSHR:%[0-9]+]]:vgpr(<2 x s16>) = G_LSHR [[COPY2]], [[COPY1]](<2 x s16>)
+    ; CHECK: S_ENDPGM 0, implicit [[LSHR]](<2 x s16>)
+    %0:_(<2 x s16>) = COPY $sgpr0
+    %1:_(<2 x s16>) = COPY $vgpr0
+    %2:_(<2 x s16>) = G_LSHR %0, %1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name: lshr_v2s16_vs
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: lshr_v2s16_vs
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[COPY1]](<2 x s16>)
+    ; CHECK: [[LSHR:%[0-9]+]]:vgpr(<2 x s16>) = G_LSHR [[COPY]], [[COPY2]](<2 x s16>)
+    ; CHECK: S_ENDPGM 0, implicit [[LSHR]](<2 x s16>)
+    %0:_(<2 x s16>) = COPY $vgpr0
+    %1:_(<2 x s16>) = COPY $sgpr0
+    %2:_(<2 x s16>) = G_LSHR %0, %1
+    S_ENDPGM 0, implicit %2
+
+...
+
+---
+name: lshr_v2s16_vv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+    ; CHECK-LABEL: name: lshr_v2s16_vv
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr1
+    ; CHECK: [[LSHR:%[0-9]+]]:vgpr(<2 x s16>) = G_LSHR [[COPY]], [[COPY1]](<2 x s16>)
+    ; CHECK: S_ENDPGM 0, implicit [[LSHR]](<2 x s16>)
+    %0:_(<2 x s16>) = COPY $vgpr0
+    %1:_(<2 x s16>) = COPY $vgpr1
+    %2:_(<2 x s16>) = G_LSHR %0, %1
+    S_ENDPGM 0, implicit %2
+
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-shl.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-shl.mir
index 7b1237bac8b5..67e1a9cd7213 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-shl.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-shl.mir
@@ -1,69 +1,258 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
-# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -run-pass=regbankselect -regbankselect-fast -o - %s | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -run-pass=regbankselect -regbankselect-greedy -o - %s | FileCheck %s
 
 ---
-name: shl_ss
+name: shl_s32_ss
 legalized: true
 
 body: |
   bb.0:
     liveins: $sgpr0, $sgpr1
-    ; CHECK-LABEL: name: shl_ss
+    ; CHECK-LABEL: name: shl_s32_ss
     ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
     ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[COPY]], [[COPY1]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[SHL]](s32)
     %0:_(s32) = COPY $sgpr0
     %1:_(s32) = COPY $sgpr1
     %2:_(s32) = G_SHL %0, %1
+    S_ENDPGM 0, implicit %2
 ...
 
 ---
-name: shl_sv
+name: shl_s32_sv
 legalized: true
 
 body: |
   bb.0:
     liveins: $sgpr0, $vgpr0
-    ; CHECK-LABEL: name: shl_sv
+    ; CHECK-LABEL: name: shl_s32_sv
     ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
     ; CHECK: [[SHL:%[0-9]+]]:vgpr(s32) = G_SHL [[COPY2]], [[COPY1]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[SHL]](s32)
     %0:_(s32) = COPY $sgpr0
     %1:_(s32) = COPY $vgpr0
     %2:_(s32) = G_SHL %0, %1
+    S_ENDPGM 0, implicit %2
 ...
 
 ---
-name: shl_vs
+name: shl_s32_vs
 legalized: true
 
 body: |
   bb.0:
     liveins: $sgpr0, $vgpr0
-    ; CHECK-LABEL: name: shl_vs
+    ; CHECK-LABEL: name: shl_s32_vs
     ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
     ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
     ; CHECK: [[SHL:%[0-9]+]]:vgpr(s32) = G_SHL [[COPY]], [[COPY2]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[SHL]](s32)
     %0:_(s32) = COPY $vgpr0
     %1:_(s32) = COPY $sgpr0
     %2:_(s32) = G_SHL %0, %1
+    S_ENDPGM 0, implicit %2
 ...
 
 ---
-name: shl_vv
+name: shl_s32_vv
 legalized: true
 
 body: |
   bb.0:
     liveins: $vgpr0, $vgpr1
-    ; CHECK-LABEL: name: shl_vv
+    ; CHECK-LABEL: name: shl_s32_vv
     ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
     ; CHECK: [[SHL:%[0-9]+]]:vgpr(s32) = G_SHL [[COPY]], [[COPY1]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[SHL]](s32)
     %0:_(s32) = COPY $vgpr0
     %1:_(s32) = COPY $vgpr1
     %2:_(s32) = G_SHL %0, %1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name: shl_s16_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: shl_s16_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+    ; CHECK: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK: [[ANYEXT:%[0-9]+]]:sgpr(s32) = G_ANYEXT [[TRUNC]](s16)
+    ; CHECK: [[ZEXT:%[0-9]+]]:sgpr(s32) = G_ZEXT [[TRUNC1]](s16)
+    ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[ANYEXT]], [[ZEXT]](s32)
+    ; CHECK: [[TRUNC2:%[0-9]+]]:sgpr(s16) = G_TRUNC [[SHL]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[TRUNC2]](s16)
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    %4:_(s16) = G_SHL %2, %3
+    S_ENDPGM 0, implicit %4
+...
+
+---
+name: shl_s16_sv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+
+    ; CHECK-LABEL: name: shl_s16_sv
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC]](s16)
+    ; CHECK: [[SHL:%[0-9]+]]:vgpr(s16) = G_SHL [[COPY2]], [[TRUNC1]](s16)
+    ; CHECK: S_ENDPGM 0, implicit [[SHL]](s16)
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $vgpr0
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    %4:_(s16) = G_SHL %2, %3
+    S_ENDPGM 0, implicit %4
+...
+
+---
+name: shl_s16_vs
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: shl_s16_vs
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+    ; CHECK: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC1]](s16)
+    ; CHECK: [[SHL:%[0-9]+]]:vgpr(s16) = G_SHL [[TRUNC]], [[COPY2]](s16)
+    ; CHECK: S_ENDPGM 0, implicit [[SHL]](s16)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $sgpr0
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    %4:_(s16) = G_SHL %2, %3
+    S_ENDPGM 0, implicit %4
+
+...
+
+---
+name: shl_s16_vv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+    ; CHECK-LABEL: name: shl_s16_vv
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+    ; CHECK: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32)
+    ; CHECK: [[SHL:%[0-9]+]]:vgpr(s16) = G_SHL [[TRUNC]], [[TRUNC1]](s16)
+    ; CHECK: S_ENDPGM 0, implicit [[SHL]](s16)
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = COPY $vgpr1
+    %2:_(s16) = G_TRUNC %0
+    %3:_(s16) = G_TRUNC %1
+    %4:_(s16) = G_SHL %2, %3
+    S_ENDPGM 0, implicit %4
+
+...
+
+---
+name: shl_v2s16_ss
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+    ; CHECK-LABEL: name: shl_v2s16_ss
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr1
+    ; CHECK: [[BITCAST:%[0-9]+]]:sgpr(s32) = G_BITCAST [[COPY]](<2 x s16>)
+    ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
+    ; CHECK: [[LSHR:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+    ; CHECK: [[BITCAST1:%[0-9]+]]:sgpr(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+    ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
+    ; CHECK: [[LSHR1:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST1]], [[C1]](s32)
+    ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[BITCAST]], [[BITCAST1]](s32)
+    ; CHECK: [[SHL1:%[0-9]+]]:sgpr(s32) = G_SHL [[LSHR]], [[LSHR1]](s32)
+    ; CHECK: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:sgpr(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[SHL]](s32), [[SHL1]](s32)
+    ; CHECK: S_ENDPGM 0, implicit [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+    %0:_(<2 x s16>) = COPY $sgpr0
+    %1:_(<2 x s16>) = COPY $sgpr1
+    %2:_(<2 x s16>) = G_SHL %0, %1
+    S_ENDPGM 0, implicit %2
+
+...
+
+---
+name: shl_v2s16_sv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: shl_v2s16_sv
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[COPY]](<2 x s16>)
+    ; CHECK: [[SHL:%[0-9]+]]:vgpr(<2 x s16>) = G_SHL [[COPY2]], [[COPY1]](<2 x s16>)
+    ; CHECK: S_ENDPGM 0, implicit [[SHL]](<2 x s16>)
+    %0:_(<2 x s16>) = COPY $sgpr0
+    %1:_(<2 x s16>) = COPY $vgpr0
+    %2:_(<2 x s16>) = G_SHL %0, %1
+    S_ENDPGM 0, implicit %2
+...
+
+---
+name: shl_v2s16_vs
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0, $vgpr0
+    ; CHECK-LABEL: name: shl_v2s16_vs
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0
+    ; CHECK: [[COPY2:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[COPY1]](<2 x s16>)
+    ; CHECK: [[SHL:%[0-9]+]]:vgpr(<2 x s16>) = G_SHL [[COPY]], [[COPY2]](<2 x s16>)
+    ; CHECK: S_ENDPGM 0, implicit [[SHL]](<2 x s16>)
+    %0:_(<2 x s16>) = COPY $vgpr0
+    %1:_(<2 x s16>) = COPY $sgpr0
+    %2:_(<2 x s16>) = G_SHL %0, %1
+    S_ENDPGM 0, implicit %2
+
+...
+
+---
+name: shl_v2s16_vv
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+    ; CHECK-LABEL: name: shl_v2s16_vv
+    ; CHECK: [[COPY:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr1
+    ; CHECK: [[SHL:%[0-9]+]]:vgpr(<2 x s16>) = G_SHL [[COPY]], [[COPY1]](<2 x s16>)
+    ; CHECK: S_ENDPGM 0, implicit [[SHL]](<2 x s16>)
+    %0:_(<2 x s16>) = COPY $vgpr0
+    %1:_(<2 x s16>) = COPY $vgpr1
+    %2:_(<2 x s16>) = G_SHL %0, %1
+    S_ENDPGM 0, implicit %2
+
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/shl.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/shl.ll
new file mode 100644
index 000000000000..ed1fe7af5f36
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/shl.ll
@@ -0,0 +1,1200 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=tahiti < %s | FileCheck -check-prefixes=GCN,GFX6 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=fiji < %s | FileCheck -check-prefixes=GCN,GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+
+define i8 @v_shl_i8(i8 %value, i8 %amount) {
+; GFX6-LABEL: v_shl_i8:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_and_b32_e32 v1, 0xff, v1
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, v1, v0
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_shl_i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_shl_i8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_lshlrev_b16_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl i8 %value, %amount
+  ret i8 %result
+}
+
+define i8 @v_shl_i8_7(i8 %value) {
+; GFX6-LABEL: v_shl_i8_7:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 7, v0
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_shl_i8_7:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshlrev_b16_e32 v0, 7, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_shl_i8_7:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_lshlrev_b16_e32 v0, 7, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl i8 %value, 7
+  ret i8 %result
+}
+
+define amdgpu_ps i8 @s_shl_i8(i8 inreg %value, i8 inreg %amount) {
+; GFX6-LABEL: s_shl_i8:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_and_b32 s1, s1, 0xff
+; GFX6-NEXT:    s_lshl_b32 s0, s0, s1
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_shl_i8:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_movk_i32 s2, 0xff
+; GFX8-NEXT:    s_and_b32 s0, s0, s2
+; GFX8-NEXT:    s_and_b32 s1, s1, s2
+; GFX8-NEXT:    s_lshl_b32 s0, s0, s1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_shl_i8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_movk_i32 s2, 0xff
+; GFX9-NEXT:    s_and_b32 s0, s0, s2
+; GFX9-NEXT:    s_and_b32 s1, s1, s2
+; GFX9-NEXT:    s_lshl_b32 s0, s0, s1
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = shl i8 %value, %amount
+  ret i8 %result
+}
+
+define amdgpu_ps i8 @s_shl_i8_7(i8 inreg %value) {
+; GFX6-LABEL: s_shl_i8_7:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_lshl_b32 s0, s0, 7
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_shl_i8_7:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_and_b32 s0, s0, 0xff
+; GFX8-NEXT:    s_lshl_b32 s0, s0, 7
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_shl_i8_7:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_and_b32 s0, s0, 0xff
+; GFX9-NEXT:    s_lshl_b32 s0, s0, 7
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = shl i8 %value, 7
+  ret i8 %result
+}
+
+
+define i24 @v_shl_i24(i24 %value, i24 %amount) {
+; GCN-LABEL: v_shl_i24:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v1, 0xffffff, v1
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, v1, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl i24 %value, %amount
+  ret i24 %result
+}
+
+define i24 @v_shl_i24_7(i24 %value) {
+; GCN-LABEL: v_shl_i24_7:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 7, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl i24 %value, 7
+  ret i24 %result
+}
+
+define amdgpu_ps i24 @s_shl_i24(i24 inreg %value, i24 inreg %amount) {
+; GCN-LABEL: s_shl_i24:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_and_b32 s1, s1, 0xffffff
+; GCN-NEXT:    s_lshl_b32 s0, s0, s1
+; GCN-NEXT:    ; return to shader part epilog
+  %result = shl i24 %value, %amount
+  ret i24 %result
+}
+
+define amdgpu_ps i24 @s_shl_i24_7(i24 inreg %value) {
+; GCN-LABEL: s_shl_i24_7:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshl_b32 s0, s0, 7
+; GCN-NEXT:    ; return to shader part epilog
+  %result = shl i24 %value, 7
+  ret i24 %result
+}
+
+define i32 @v_shl_i32(i32 %value, i32 %amount) {
+; GCN-LABEL: v_shl_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, v1, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl i32 %value, %amount
+  ret i32 %result
+}
+
+define i32 @v_shl_i32_31(i32 %value) {
+; GCN-LABEL: v_shl_i32_31:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 31, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl i32 %value, 31
+  ret i32 %result
+}
+
+define amdgpu_ps i32 @s_shl_i32(i32 inreg %value, i32 inreg %amount) {
+; GCN-LABEL: s_shl_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshl_b32 s0, s0, s1
+; GCN-NEXT:    ; return to shader part epilog
+  %result = shl i32 %value, %amount
+  ret i32 %result
+}
+
+define amdgpu_ps i32 @s_shl_i32_31(i32 inreg %value) {
+; GCN-LABEL: s_shl_i32_31:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshl_b32 s0, s0, 31
+; GCN-NEXT:    ; return to shader part epilog
+  %result = shl i32 %value, 31
+  ret i32 %result
+}
+
+define amdgpu_ps float @shl_i32_sv(i32 inreg %value, i32 %amount) {
+; GFX6-LABEL: shl_i32_sv:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    v_lshl_b32_e32 v0, s0, v0
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: shl_i32_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_lshlrev_b32_e64 v0, v0, s0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: shl_i32_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_lshlrev_b32_e64 v0, v0, s0
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = shl i32 %value, %amount
+  %cast = bitcast i32 %result to float
+  ret float %cast
+}
+
+define amdgpu_ps float @shl_i32_vs(i32 %value, i32 inreg %amount) {
+; GCN-LABEL: shl_i32_vs:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, s0, v0
+; GCN-NEXT:    ; return to shader part epilog
+  %result = shl i32 %value, %amount
+  %cast = bitcast i32 %result to float
+  ret float %cast
+}
+
+define <2 x i32> @v_shl_v2i32(<2 x i32> %value, <2 x i32> %amount) {
+; GCN-LABEL: v_shl_v2i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, v2, v0
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, v3, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl <2 x i32> %value, %amount
+  ret <2 x i32> %result
+}
+
+define <2 x i32> @v_shl_v2i32_31(<2 x i32> %value) {
+; GCN-LABEL: v_shl_v2i32_31:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 31, v0
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 31, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl <2 x i32> %value, <i32 31, i32 31>
+  ret <2 x i32> %result
+}
+
+define amdgpu_ps <2 x i32> @s_shl_v2i32(<2 x i32> inreg %value, <2 x i32> inreg %amount) {
+; GCN-LABEL: s_shl_v2i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshl_b32 s0, s0, s2
+; GCN-NEXT:    s_lshl_b32 s1, s1, s3
+; GCN-NEXT:    ; return to shader part epilog
+  %result = shl <2 x i32> %value, %amount
+  ret <2 x i32> %result
+}
+
+define <3 x i32> @v_shl_v3i32(<3 x i32> %value, <3 x i32> %amount) {
+; GCN-LABEL: v_shl_v3i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, v3, v0
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, v4, v1
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, v5, v2
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl <3 x i32> %value, %amount
+  ret <3 x i32> %result
+}
+
+define amdgpu_ps <3 x i32> @s_shl_v3i32(<3 x i32> inreg %value, <3 x i32> inreg %amount) {
+; GCN-LABEL: s_shl_v3i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshl_b32 s0, s0, s3
+; GCN-NEXT:    s_lshl_b32 s1, s1, s4
+; GCN-NEXT:    s_lshl_b32 s2, s2, s5
+; GCN-NEXT:    ; return to shader part epilog
+  %result = shl <3 x i32> %value, %amount
+  ret <3 x i32> %result
+}
+
+define <4 x i32> @v_shl_v4i32(<4 x i32> %value, <4 x i32> %amount) {
+; GCN-LABEL: v_shl_v4i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, v4, v0
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, v5, v1
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, v6, v2
+; GCN-NEXT:    v_lshlrev_b32_e32 v3, v7, v3
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl <4 x i32> %value, %amount
+  ret <4 x i32> %result
+}
+
+define amdgpu_ps <4 x i32> @s_shl_v4i32(<4 x i32> inreg %value, <4 x i32> inreg %amount) {
+; GCN-LABEL: s_shl_v4i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshl_b32 s0, s0, s4
+; GCN-NEXT:    s_lshl_b32 s1, s1, s5
+; GCN-NEXT:    s_lshl_b32 s2, s2, s6
+; GCN-NEXT:    s_lshl_b32 s3, s3, s7
+; GCN-NEXT:    ; return to shader part epilog
+  %result = shl <4 x i32> %value, %amount
+  ret <4 x i32> %result
+}
+
+define <5 x i32> @v_shl_v5i32(<5 x i32> %value, <5 x i32> %amount) {
+; GCN-LABEL: v_shl_v5i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, v5, v0
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, v6, v1
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, v7, v2
+; GCN-NEXT:    v_lshlrev_b32_e32 v3, v8, v3
+; GCN-NEXT:    v_lshlrev_b32_e32 v4, v9, v4
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl <5 x i32> %value, %amount
+  ret <5 x i32> %result
+}
+
+define amdgpu_ps <5 x i32> @s_shl_v5i32(<5 x i32> inreg %value, <5 x i32> inreg %amount) {
+; GCN-LABEL: s_shl_v5i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshl_b32 s0, s0, s5
+; GCN-NEXT:    s_lshl_b32 s1, s1, s6
+; GCN-NEXT:    s_lshl_b32 s2, s2, s7
+; GCN-NEXT:    s_lshl_b32 s3, s3, s8
+; GCN-NEXT:    s_lshl_b32 s4, s4, s9
+; GCN-NEXT:    ; return to shader part epilog
+  %result = shl <5 x i32> %value, %amount
+  ret <5 x i32> %result
+}
+
+define <16 x i32> @v_shl_v16i32(<16 x i32> %value, <16 x i32> %amount) {
+; GCN-LABEL: v_shl_v16i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, v16, v0
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, v17, v1
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, v18, v2
+; GCN-NEXT:    v_lshlrev_b32_e32 v3, v19, v3
+; GCN-NEXT:    v_lshlrev_b32_e32 v4, v20, v4
+; GCN-NEXT:    v_lshlrev_b32_e32 v5, v21, v5
+; GCN-NEXT:    v_lshlrev_b32_e32 v6, v22, v6
+; GCN-NEXT:    v_lshlrev_b32_e32 v7, v23, v7
+; GCN-NEXT:    v_lshlrev_b32_e32 v8, v24, v8
+; GCN-NEXT:    v_lshlrev_b32_e32 v9, v25, v9
+; GCN-NEXT:    v_lshlrev_b32_e32 v10, v26, v10
+; GCN-NEXT:    v_lshlrev_b32_e32 v11, v27, v11
+; GCN-NEXT:    v_lshlrev_b32_e32 v12, v28, v12
+; GCN-NEXT:    v_lshlrev_b32_e32 v13, v29, v13
+; GCN-NEXT:    v_lshlrev_b32_e32 v14, v30, v14
+; GCN-NEXT:    v_lshlrev_b32_e32 v15, v31, v15
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl <16 x i32> %value, %amount
+  ret <16 x i32> %result
+}
+
+define amdgpu_ps <16 x i32> @s_shl_v16i32(<16 x i32> inreg %value, <16 x i32> inreg %amount) {
+; GCN-LABEL: s_shl_v16i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshl_b32 s0, s0, s16
+; GCN-NEXT:    s_lshl_b32 s1, s1, s17
+; GCN-NEXT:    s_lshl_b32 s2, s2, s18
+; GCN-NEXT:    s_lshl_b32 s3, s3, s19
+; GCN-NEXT:    s_lshl_b32 s4, s4, s20
+; GCN-NEXT:    s_lshl_b32 s5, s5, s21
+; GCN-NEXT:    s_lshl_b32 s6, s6, s22
+; GCN-NEXT:    s_lshl_b32 s7, s7, s23
+; GCN-NEXT:    s_lshl_b32 s8, s8, s24
+; GCN-NEXT:    s_lshl_b32 s9, s9, s25
+; GCN-NEXT:    s_lshl_b32 s10, s10, s26
+; GCN-NEXT:    s_lshl_b32 s11, s11, s27
+; GCN-NEXT:    s_lshl_b32 s12, s12, s28
+; GCN-NEXT:    s_lshl_b32 s13, s13, s29
+; GCN-NEXT:    s_lshl_b32 s14, s14, s30
+; GCN-NEXT:    s_lshl_b32 s15, s15, s31
+; GCN-NEXT:    ; return to shader part epilog
+  %result = shl <16 x i32> %value, %amount
+  ret <16 x i32> %result
+}
+
+define i16 @v_shl_i16(i16 %value, i16 %amount) {
+; GFX6-LABEL: v_shl_i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, v1, v0
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_shl_i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshlrev_b16_e32 v0, v1, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_shl_i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_lshlrev_b16_e32 v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl i16 %value, %amount
+  ret i16 %result
+}
+
+define i16 @v_shl_i16_31(i16 %value) {
+; GCN-LABEL: v_shl_i16_31:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl i16 %value, 31
+  ret i16 %result
+}
+
+define amdgpu_ps i16 @s_shl_i16(i16 inreg %value, i16 inreg %amount) {
+; GFX6-LABEL: s_shl_i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_and_b32 s1, s1, 0xffff
+; GFX6-NEXT:    s_lshl_b32 s0, s0, s1
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_shl_i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_mov_b32 s2, 0xffff
+; GFX8-NEXT:    s_and_b32 s0, s0, s2
+; GFX8-NEXT:    s_and_b32 s1, s1, s2
+; GFX8-NEXT:    s_lshl_b32 s0, s0, s1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_shl_i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_mov_b32 s2, 0xffff
+; GFX9-NEXT:    s_and_b32 s0, s0, s2
+; GFX9-NEXT:    s_and_b32 s1, s1, s2
+; GFX9-NEXT:    s_lshl_b32 s0, s0, s1
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = shl i16 %value, %amount
+  ret i16 %result
+}
+
+define amdgpu_ps i16 @s_shl_i16_15(i16 inreg %value) {
+; GFX6-LABEL: s_shl_i16_15:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_lshl_b32 s0, s0, 15
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_shl_i16_15:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_and_b32 s0, s0, 0xffff
+; GFX8-NEXT:    s_lshl_b32 s0, s0, 15
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_shl_i16_15:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_and_b32 s0, s0, 0xffff
+; GFX9-NEXT:    s_lshl_b32 s0, s0, 15
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = shl i16 %value, 15
+  ret i16 %result
+}
+
+define amdgpu_ps half @shl_i16_sv(i16 inreg %value, i16 %amount) {
+; GFX6-LABEL: shl_i16_sv:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX6-NEXT:    v_lshl_b32_e32 v0, s0, v0
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: shl_i16_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_lshlrev_b16_e64 v0, v0, s0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: shl_i16_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_lshlrev_b16_e64 v0, v0, s0
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = shl i16 %value, %amount
+  %cast = bitcast i16 %result to half
+  ret half %cast
+}
+
+define amdgpu_ps half @shl_i16_vs(i16 %value, i16 inreg %amount) {
+; GFX6-LABEL: shl_i16_vs:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_and_b32 s0, s0, 0xffff
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, s0, v0
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: shl_i16_vs:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_lshlrev_b16_e32 v0, s0, v0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: shl_i16_vs:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_lshlrev_b16_e32 v0, s0, v0
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = shl i16 %value, %amount
+  %cast = bitcast i16 %result to half
+  ret half %cast
+}
+
+define <2 x i16> @v_shl_v2i16(<2 x i16> %value, <2 x i16> %amount) {
+; GFX6-LABEL: v_shl_v2i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b32 s4, 0xffff
+; GFX6-NEXT:    v_and_b32_e32 v2, s4, v2
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, v2, v0
+; GFX6-NEXT:    v_and_b32_e32 v2, s4, v3
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, v2, v1
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_shl_v2i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshlrev_b16_e32 v2, v1, v0
+; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v0, v2, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_shl_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_lshlrev_b16 v0, v1, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl <2 x i16> %value, %amount
+  ret <2 x i16> %result
+}
+
+define <2 x i16> @v_shl_v2i16_15(<2 x i16> %value) {
+; GFX6-LABEL: v_shl_v2i16_15:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 15, v0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 15, v1
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_shl_v2i16_15:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v2, 15
+; GFX8-NEXT:    v_lshlrev_b16_e32 v1, 15, v0
+; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_shl_v2i16_15:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    s_pack_ll_b32_b16 s4, 15, 15
+; GFX9-NEXT:    v_pk_lshlrev_b16 v0, s4, v0
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl <2 x i16> %value, <i16 15, i16 15>
+  ret <2 x i16> %result
+}
+
+define amdgpu_ps i32 @s_shl_v2i16(<2 x i16> inreg %value, <2 x i16> inreg %amount) {
+; GFX6-LABEL: s_shl_v2i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_mov_b32 s4, 0xffff
+; GFX6-NEXT:    s_and_b32 s2, s2, s4
+; GFX6-NEXT:    s_lshl_b32 s0, s0, s2
+; GFX6-NEXT:    s_and_b32 s2, s3, s4
+; GFX6-NEXT:    s_lshl_b32 s1, s1, s2
+; GFX6-NEXT:    s_and_b32 s1, s1, s4
+; GFX6-NEXT:    s_and_b32 s0, s0, s4
+; GFX6-NEXT:    s_lshl_b32 s1, s1, 16
+; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_shl_v2i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshr_b32 s2, s0, 16
+; GFX8-NEXT:    s_mov_b32 s3, 0xffff
+; GFX8-NEXT:    s_lshr_b32 s4, s1, 16
+; GFX8-NEXT:    s_and_b32 s0, s0, s3
+; GFX8-NEXT:    s_and_b32 s1, s1, s3
+; GFX8-NEXT:    s_and_b32 s2, s2, s3
+; GFX8-NEXT:    s_and_b32 s4, s4, s3
+; GFX8-NEXT:    s_lshl_b32 s0, s0, s1
+; GFX8-NEXT:    s_lshl_b32 s1, s2, s4
+; GFX8-NEXT:    s_lshl_b32 s1, s1, 16
+; GFX8-NEXT:    s_and_b32 s0, s0, s3
+; GFX8-NEXT:    s_or_b32 s0, s1, s0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_shl_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_lshr_b32 s2, s0, 16
+; GFX9-NEXT:    s_lshr_b32 s3, s1, 16
+; GFX9-NEXT:    s_lshl_b32 s0, s0, s1
+; GFX9-NEXT:    s_lshl_b32 s1, s2, s3
+; GFX9-NEXT:    s_pack_ll_b32_b16 s0, s0, s1
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = shl <2 x i16> %value, %amount
+  %cast = bitcast <2 x i16> %result to i32
+  ret i32 %cast
+}
+
+define amdgpu_ps float @shl_v2i16_sv(<2 x i16> inreg %value, <2 x i16> %amount) {
+; GFX6-LABEL: shl_v2i16_sv:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_mov_b32 s2, 0xffff
+; GFX6-NEXT:    v_and_b32_e32 v1, s2, v1
+; GFX6-NEXT:    v_and_b32_e32 v0, s2, v0
+; GFX6-NEXT:    v_lshl_b32_e32 v1, s1, v1
+; GFX6-NEXT:    v_lshl_b32_e32 v0, s0, v0
+; GFX6-NEXT:    v_and_b32_e32 v1, s2, v1
+; GFX6-NEXT:    v_and_b32_e32 v0, s2, v0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: shl_v2i16_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshr_b32 s1, s0, 16
+; GFX8-NEXT:    v_mov_b32_e32 v2, s1
+; GFX8-NEXT:    v_lshlrev_b16_e64 v1, v0, s0
+; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: shl_v2i16_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_pk_lshlrev_b16 v0, v0, s0
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = shl <2 x i16> %value, %amount
+  %cast = bitcast <2 x i16> %result to float
+  ret float %cast
+}
+
+define amdgpu_ps float @shl_v2i16_vs(<2 x i16> %value, <2 x i16> inreg %amount) {
+; GFX6-LABEL: shl_v2i16_vs:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_mov_b32 s2, 0xffff
+; GFX6-NEXT:    s_and_b32 s0, s0, s2
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, s0, v0
+; GFX6-NEXT:    s_and_b32 s0, s1, s2
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, s0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, s2, v1
+; GFX6-NEXT:    v_and_b32_e32 v0, s2, v0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: shl_v2i16_vs:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshr_b32 s1, s0, 16
+; GFX8-NEXT:    v_mov_b32_e32 v2, s1
+; GFX8-NEXT:    v_lshlrev_b16_e32 v1, s0, v0
+; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: shl_v2i16_vs:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_pk_lshlrev_b16 v0, s0, v0
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = shl <2 x i16> %value, %amount
+  %cast = bitcast <2 x i16> %result to float
+  ret float %cast
+}
+
+; FIXME
+; define <3 x i16> @v_shl_v3i16(<3 x i16> %value, <3 x i16> %amount) {
+;   %result = shl <3 x i16> %value, %amount
+;   ret <3 x i16> %result
+; }
+
+; define amdgpu_ps <3 x i16> @s_shl_v3i16(<3 x i16> inreg %value, <3 x i16> inreg %amount) {
+;   %result = shl <3 x i16> %value, %amount
+;   ret <3 x i16> %result
+; }
+
+define <2 x float> @v_shl_v4i16(<4 x i16> %value, <4 x i16> %amount) {
+; GFX6-LABEL: v_shl_v4i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b32 s4, 0xffff
+; GFX6-NEXT:    v_and_b32_e32 v4, s4, v4
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, v4, v0
+; GFX6-NEXT:    v_and_b32_e32 v4, s4, v5
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, v4, v1
+; GFX6-NEXT:    v_and_b32_e32 v4, s4, v6
+; GFX6-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, v4, v2
+; GFX6-NEXT:    v_and_b32_e32 v4, s4, v7
+; GFX6-NEXT:    v_lshlrev_b32_e32 v3, v4, v3
+; GFX6-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT:    v_and_b32_e32 v2, s4, v3
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT:    v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_shl_v4i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshlrev_b16_e32 v4, v2, v0
+; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_lshlrev_b16_e32 v2, v3, v1
+; GFX8-NEXT:    v_lshlrev_b16_sdwa v1, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v0, v4, v0
+; GFX8-NEXT:    v_or_b32_e32 v1, v2, v1
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_shl_v4i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_lshlrev_b16 v0, v2, v0
+; GFX9-NEXT:    v_pk_lshlrev_b16 v1, v3, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl <4 x i16> %value, %amount
+  %cast = bitcast <4 x i16> %result to <2 x float>
+  ret <2 x float> %cast
+}
+
+define amdgpu_ps <2 x i32> @s_shl_v4i16(<4 x i16> inreg %value, <4 x i16> inreg %amount) {
+; GFX6-LABEL: s_shl_v4i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_mov_b32 s8, 0xffff
+; GFX6-NEXT:    s_and_b32 s4, s4, s8
+; GFX6-NEXT:    s_lshl_b32 s0, s0, s4
+; GFX6-NEXT:    s_and_b32 s4, s5, s8
+; GFX6-NEXT:    s_lshl_b32 s1, s1, s4
+; GFX6-NEXT:    s_and_b32 s4, s6, s8
+; GFX6-NEXT:    s_and_b32 s1, s1, s8
+; GFX6-NEXT:    s_lshl_b32 s2, s2, s4
+; GFX6-NEXT:    s_and_b32 s4, s7, s8
+; GFX6-NEXT:    s_lshl_b32 s3, s3, s4
+; GFX6-NEXT:    s_and_b32 s0, s0, s8
+; GFX6-NEXT:    s_lshl_b32 s1, s1, 16
+; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    s_and_b32 s1, s2, s8
+; GFX6-NEXT:    s_and_b32 s2, s3, s8
+; GFX6-NEXT:    s_lshl_b32 s2, s2, 16
+; GFX6-NEXT:    s_or_b32 s1, s1, s2
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_shl_v4i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshr_b32 s4, s0, 16
+; GFX8-NEXT:    s_mov_b32 s6, 0xffff
+; GFX8-NEXT:    s_lshr_b32 s7, s2, 16
+; GFX8-NEXT:    s_lshr_b32 s5, s1, 16
+; GFX8-NEXT:    s_lshr_b32 s8, s3, 16
+; GFX8-NEXT:    s_and_b32 s0, s0, s6
+; GFX8-NEXT:    s_and_b32 s2, s2, s6
+; GFX8-NEXT:    s_and_b32 s4, s4, s6
+; GFX8-NEXT:    s_and_b32 s7, s7, s6
+; GFX8-NEXT:    s_lshl_b32 s0, s0, s2
+; GFX8-NEXT:    s_lshl_b32 s2, s4, s7
+; GFX8-NEXT:    s_and_b32 s1, s1, s6
+; GFX8-NEXT:    s_and_b32 s3, s3, s6
+; GFX8-NEXT:    s_and_b32 s5, s5, s6
+; GFX8-NEXT:    s_and_b32 s8, s8, s6
+; GFX8-NEXT:    s_lshl_b32 s1, s1, s3
+; GFX8-NEXT:    s_lshl_b32 s3, s5, s8
+; GFX8-NEXT:    s_lshl_b32 s2, s2, 16
+; GFX8-NEXT:    s_and_b32 s0, s0, s6
+; GFX8-NEXT:    s_or_b32 s0, s2, s0
+; GFX8-NEXT:    s_lshl_b32 s2, s3, 16
+; GFX8-NEXT:    s_and_b32 s1, s1, s6
+; GFX8-NEXT:    s_or_b32 s1, s2, s1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_shl_v4i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_lshr_b32 s4, s0, 16
+; GFX9-NEXT:    s_lshr_b32 s5, s2, 16
+; GFX9-NEXT:    s_lshl_b32 s0, s0, s2
+; GFX9-NEXT:    s_lshl_b32 s2, s4, s5
+; GFX9-NEXT:    s_pack_ll_b32_b16 s0, s0, s2
+; GFX9-NEXT:    s_lshr_b32 s2, s1, 16
+; GFX9-NEXT:    s_lshr_b32 s4, s3, 16
+; GFX9-NEXT:    s_lshl_b32 s1, s1, s3
+; GFX9-NEXT:    s_lshl_b32 s2, s2, s4
+; GFX9-NEXT:    s_pack_ll_b32_b16 s1, s1, s2
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = shl <4 x i16> %value, %amount
+  %cast = bitcast <4 x i16> %result to <2 x i32>
+  ret <2 x i32> %cast
+}
+
+; FIXME
+; define <5 x i16> @v_shl_v5i16(<5 x i16> %value, <5 x i16> %amount) {
+;   %result = shl <5 x i16> %value, %amount
+;   ret <5 x i16> %result
+; }
+
+; define amdgpu_ps <5 x i16> @s_shl_v5i16(<5 x i16> inreg %value, <5 x i16> inreg %amount) {
+;   %result = shl <5 x i16> %value, %amount
+;   ret <5 x i16> %result
+; }
+
+; define <3 x float> @v_shl_v6i16(<6 x i16> %value, <6 x i16> %amount) {
+;   %result = shl <6 x i16> %value, %amount
+;   %cast = bitcast <6 x i16> %result to <3 x float>
+;   ret <3 x float> %cast
+; }
+
+; define amdgpu_ps <3 x i32> @s_shl_v6i16(<6 x i16> inreg %value, <6 x i16> inreg %amount) {
+;   %result = shl <6 x i16> %value, %amount
+;   %cast = bitcast <6 x i16> %result to <3 x i32>
+;   ret <3 x i32> %cast
+; }
+
+define <4 x float> @v_shl_v8i16(<8 x i16> %value, <8 x i16> %amount) {
+; GFX6-LABEL: v_shl_v8i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    s_mov_b32 s4, 0xffff
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v8
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, v8, v0
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v9
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, v8, v1
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v10
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, v8, v2
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v11
+; GFX6-NEXT:    v_lshlrev_b32_e32 v3, v8, v3
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v12
+; GFX6-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT:    v_lshlrev_b32_e32 v4, v8, v4
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v13
+; GFX6-NEXT:    v_lshlrev_b32_e32 v5, v8, v5
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v14
+; GFX6-NEXT:    v_mov_b32_e32 v16, 0xffff
+; GFX6-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    v_lshlrev_b32_e32 v6, v8, v6
+; GFX6-NEXT:    v_and_b32_e32 v8, s4, v15
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT:    v_and_b32_e32 v1, v2, v16
+; GFX6-NEXT:    v_and_b32_e32 v2, v3, v16
+; GFX6-NEXT:    v_and_b32_e32 v3, v5, v16
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT:    v_lshlrev_b32_e32 v7, v8, v7
+; GFX6-NEXT:    v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT:    v_and_b32_e32 v2, v4, v16
+; GFX6-NEXT:    v_and_b32_e32 v4, v7, v16
+; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT:    v_or_b32_e32 v2, v2, v3
+; GFX6-NEXT:    v_and_b32_e32 v3, v6, v16
+; GFX6-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT:    v_or_b32_e32 v3, v3, v4
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_shl_v8i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshlrev_b16_e32 v8, v4, v0
+; GFX8-NEXT:    v_lshlrev_b16_sdwa v0, v4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_lshlrev_b16_e32 v4, v5, v1
+; GFX8-NEXT:    v_lshlrev_b16_sdwa v1, v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v1, v4, v1
+; GFX8-NEXT:    v_lshlrev_b16_e32 v4, v6, v2
+; GFX8-NEXT:    v_lshlrev_b16_sdwa v2, v6, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT:    v_lshlrev_b16_e32 v4, v7, v3
+; GFX8-NEXT:    v_lshlrev_b16_sdwa v3, v7, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT:    v_or_b32_e32 v0, v8, v0
+; GFX8-NEXT:    v_or_b32_e32 v3, v4, v3
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_shl_v8i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_pk_lshlrev_b16 v0, v4, v0
+; GFX9-NEXT:    v_pk_lshlrev_b16 v1, v5, v1
+; GFX9-NEXT:    v_pk_lshlrev_b16 v2, v6, v2
+; GFX9-NEXT:    v_pk_lshlrev_b16 v3, v7, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl <8 x i16> %value, %amount
+  %cast = bitcast <8 x i16> %result to <4 x float>
+  ret <4 x float> %cast
+}
+
+define amdgpu_ps <4 x i32> @s_shl_v8i16(<8 x i16> inreg %value, <8 x i16> inreg %amount) {
+; GFX6-LABEL: s_shl_v8i16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_mov_b32 s16, 0xffff
+; GFX6-NEXT:    s_and_b32 s8, s8, s16
+; GFX6-NEXT:    s_lshl_b32 s0, s0, s8
+; GFX6-NEXT:    s_and_b32 s8, s9, s16
+; GFX6-NEXT:    s_lshl_b32 s1, s1, s8
+; GFX6-NEXT:    s_and_b32 s8, s10, s16
+; GFX6-NEXT:    s_lshl_b32 s2, s2, s8
+; GFX6-NEXT:    s_and_b32 s8, s11, s16
+; GFX6-NEXT:    s_lshl_b32 s3, s3, s8
+; GFX6-NEXT:    s_and_b32 s8, s12, s16
+; GFX6-NEXT:    s_and_b32 s1, s1, s16
+; GFX6-NEXT:    s_lshl_b32 s4, s4, s8
+; GFX6-NEXT:    s_and_b32 s8, s13, s16
+; GFX6-NEXT:    s_lshl_b32 s5, s5, s8
+; GFX6-NEXT:    s_and_b32 s8, s14, s16
+; GFX6-NEXT:    s_and_b32 s0, s0, s16
+; GFX6-NEXT:    s_lshl_b32 s1, s1, 16
+; GFX6-NEXT:    s_lshl_b32 s6, s6, s8
+; GFX6-NEXT:    s_and_b32 s8, s15, s16
+; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    s_and_b32 s1, s2, s16
+; GFX6-NEXT:    s_and_b32 s2, s3, s16
+; GFX6-NEXT:    s_and_b32 s3, s5, s16
+; GFX6-NEXT:    s_lshl_b32 s2, s2, 16
+; GFX6-NEXT:    s_lshl_b32 s7, s7, s8
+; GFX6-NEXT:    s_or_b32 s1, s1, s2
+; GFX6-NEXT:    s_and_b32 s2, s4, s16
+; GFX6-NEXT:    s_and_b32 s4, s7, s16
+; GFX6-NEXT:    s_lshl_b32 s3, s3, 16
+; GFX6-NEXT:    s_or_b32 s2, s2, s3
+; GFX6-NEXT:    s_and_b32 s3, s6, s16
+; GFX6-NEXT:    s_lshl_b32 s4, s4, 16
+; GFX6-NEXT:    s_or_b32 s3, s3, s4
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: s_shl_v8i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_lshr_b32 s8, s0, 16
+; GFX8-NEXT:    s_mov_b32 s12, 0xffff
+; GFX8-NEXT:    s_lshr_b32 s13, s4, 16
+; GFX8-NEXT:    s_lshr_b32 s9, s1, 16
+; GFX8-NEXT:    s_lshr_b32 s14, s5, 16
+; GFX8-NEXT:    s_and_b32 s0, s0, s12
+; GFX8-NEXT:    s_and_b32 s4, s4, s12
+; GFX8-NEXT:    s_and_b32 s8, s8, s12
+; GFX8-NEXT:    s_and_b32 s13, s13, s12
+; GFX8-NEXT:    s_lshr_b32 s10, s2, 16
+; GFX8-NEXT:    s_lshr_b32 s15, s6, 16
+; GFX8-NEXT:    s_lshl_b32 s0, s0, s4
+; GFX8-NEXT:    s_lshl_b32 s4, s8, s13
+; GFX8-NEXT:    s_and_b32 s1, s1, s12
+; GFX8-NEXT:    s_and_b32 s5, s5, s12
+; GFX8-NEXT:    s_and_b32 s9, s9, s12
+; GFX8-NEXT:    s_and_b32 s14, s14, s12
+; GFX8-NEXT:    s_lshr_b32 s11, s3, 16
+; GFX8-NEXT:    s_lshr_b32 s16, s7, 16
+; GFX8-NEXT:    s_lshl_b32 s1, s1, s5
+; GFX8-NEXT:    s_and_b32 s2, s2, s12
+; GFX8-NEXT:    s_and_b32 s6, s6, s12
+; GFX8-NEXT:    s_and_b32 s10, s10, s12
+; GFX8-NEXT:    s_and_b32 s15, s15, s12
+; GFX8-NEXT:    s_lshl_b32 s5, s9, s14
+; GFX8-NEXT:    s_lshl_b32 s4, s4, 16
+; GFX8-NEXT:    s_and_b32 s0, s0, s12
+; GFX8-NEXT:    s_lshl_b32 s2, s2, s6
+; GFX8-NEXT:    s_or_b32 s0, s4, s0
+; GFX8-NEXT:    s_and_b32 s3, s3, s12
+; GFX8-NEXT:    s_and_b32 s7, s7, s12
+; GFX8-NEXT:    s_and_b32 s11, s11, s12
+; GFX8-NEXT:    s_and_b32 s16, s16, s12
+; GFX8-NEXT:    s_lshl_b32 s6, s10, s15
+; GFX8-NEXT:    s_lshl_b32 s4, s5, 16
+; GFX8-NEXT:    s_and_b32 s1, s1, s12
+; GFX8-NEXT:    s_lshl_b32 s3, s3, s7
+; GFX8-NEXT:    s_or_b32 s1, s4, s1
+; GFX8-NEXT:    s_lshl_b32 s7, s11, s16
+; GFX8-NEXT:    s_lshl_b32 s4, s6, 16
+; GFX8-NEXT:    s_and_b32 s2, s2, s12
+; GFX8-NEXT:    s_or_b32 s2, s4, s2
+; GFX8-NEXT:    s_lshl_b32 s4, s7, 16
+; GFX8-NEXT:    s_and_b32 s3, s3, s12
+; GFX8-NEXT:    s_or_b32 s3, s4, s3
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: s_shl_v8i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_lshr_b32 s8, s0, 16
+; GFX9-NEXT:    s_lshr_b32 s9, s4, 16
+; GFX9-NEXT:    s_lshl_b32 s0, s0, s4
+; GFX9-NEXT:    s_lshl_b32 s4, s8, s9
+; GFX9-NEXT:    s_pack_ll_b32_b16 s0, s0, s4
+; GFX9-NEXT:    s_lshr_b32 s4, s1, 16
+; GFX9-NEXT:    s_lshr_b32 s8, s5, 16
+; GFX9-NEXT:    s_lshl_b32 s1, s1, s5
+; GFX9-NEXT:    s_lshl_b32 s4, s4, s8
+; GFX9-NEXT:    s_pack_ll_b32_b16 s1, s1, s4
+; GFX9-NEXT:    s_lshr_b32 s4, s2, 16
+; GFX9-NEXT:    s_lshr_b32 s5, s6, 16
+; GFX9-NEXT:    s_lshl_b32 s4, s4, s5
+; GFX9-NEXT:    s_lshl_b32 s2, s2, s6
+; GFX9-NEXT:    s_pack_ll_b32_b16 s2, s2, s4
+; GFX9-NEXT:    s_lshr_b32 s4, s3, 16
+; GFX9-NEXT:    s_lshr_b32 s5, s7, 16
+; GFX9-NEXT:    s_lshl_b32 s3, s3, s7
+; GFX9-NEXT:    s_lshl_b32 s4, s4, s5
+; GFX9-NEXT:    s_pack_ll_b32_b16 s3, s3, s4
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = shl <8 x i16> %value, %amount
+  %cast = bitcast <8 x i16> %result to <4 x i32>
+  ret <4 x i32> %cast
+}
+
+define i64 @v_shl_i64(i64 %value, i64 %amount) {
+; GFX6-LABEL: v_shl_i64:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_lshl_b64 v[0:1], v[0:1], v2
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_shl_i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshlrev_b64 v[0:1], v2, v[0:1]
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_shl_i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_lshlrev_b64 v[0:1], v2, v[0:1]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl i64 %value, %amount
+  ret i64 %result
+}
+
+define i64 @v_shl_i64_63(i64 %value) {
+; GCN-LABEL: v_shl_i64_63:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 31, v0
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl i64 %value, 63
+  ret i64 %result
+}
+
+define i64 @v_shl_i64_33(i64 %value) {
+; GCN-LABEL: v_shl_i64_33:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl i64 %value, 33
+  ret i64 %result
+}
+
+define i64 @v_shl_i64_32(i64 %value) {
+; GCN-LABEL: v_shl_i64_32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v1, v0
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl i64 %value, 32
+  ret i64 %result
+}
+
+define i64 @v_shl_i64_31(i64 %value) {
+; GFX6-LABEL: v_shl_i64_31:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_lshl_b64 v[0:1], v[0:1], 31
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_shl_i64_31:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshlrev_b64 v[0:1], 31, v[0:1]
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_shl_i64_31:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_lshlrev_b64 v[0:1], 31, v[0:1]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl i64 %value, 31
+  ret i64 %result
+}
+
+define amdgpu_ps i64 @s_shl_i64(i64 inreg %value, i64 inreg %amount) {
+; GCN-LABEL: s_shl_i64:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshl_b64 s[0:1], s[0:1], s2
+; GCN-NEXT:    ; return to shader part epilog
+  %result = shl i64 %value, %amount
+  ret i64 %result
+}
+
+define amdgpu_ps i64 @s_shl_i64_63(i64 inreg %value) {
+; GCN-LABEL: s_shl_i64_63:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshl_b32 s1, s0, 31
+; GCN-NEXT:    s_mov_b32 s0, 0
+; GCN-NEXT:    ; return to shader part epilog
+  %result = shl i64 %value, 63
+  ret i64 %result
+}
+
+define amdgpu_ps i64 @s_shl_i64_33(i64 inreg %value) {
+; GCN-LABEL: s_shl_i64_33:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshl_b32 s1, s0, 1
+; GCN-NEXT:    s_mov_b32 s0, 0
+; GCN-NEXT:    ; return to shader part epilog
+  %result = shl i64 %value, 33
+  ret i64 %result
+}
+
+define amdgpu_ps i64 @s_shl_i64_32(i64 inreg %value) {
+; GCN-LABEL: s_shl_i64_32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_mov_b32 s1, s0
+; GCN-NEXT:    s_mov_b32 s0, 0
+; GCN-NEXT:    ; return to shader part epilog
+  %result = shl i64 %value, 32
+  ret i64 %result
+}
+
+define amdgpu_ps i64 @s_shl_i64_31(i64 inreg %value) {
+; GCN-LABEL: s_shl_i64_31:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshl_b64 s[0:1], s[0:1], 31
+; GCN-NEXT:    ; return to shader part epilog
+  %result = shl i64 %value, 31
+  ret i64 %result
+}
+
+define amdgpu_ps <2 x float> @shl_i64_sv(i64 inreg %value, i64 %amount) {
+; GFX6-LABEL: shl_i64_sv:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    v_lshl_b64 v[0:1], s[0:1], v0
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: shl_i64_sv:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_lshlrev_b64 v[0:1], v0, s[0:1]
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: shl_i64_sv:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_lshlrev_b64 v[0:1], v0, s[0:1]
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = shl i64 %value, %amount
+  %cast = bitcast i64 %result to <2 x float>
+  ret <2 x float> %cast
+}
+
+define amdgpu_ps <2 x float> @shl_i64_vs(i64 %value, i64 inreg %amount) {
+; GFX6-LABEL: shl_i64_vs:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    v_lshl_b64 v[0:1], v[0:1], s0
+; GFX6-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: shl_i64_vs:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    v_lshlrev_b64 v[0:1], s0, v[0:1]
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: shl_i64_vs:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    v_lshlrev_b64 v[0:1], s0, v[0:1]
+; GFX9-NEXT:    ; return to shader part epilog
+  %result = shl i64 %value, %amount
+  %cast = bitcast i64 %result to <2 x float>
+  ret <2 x float> %cast
+}
+
+define <2 x i64> @v_shl_v2i64(<2 x i64> %value, <2 x i64> %amount) {
+; GFX6-LABEL: v_shl_v2i64:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_lshl_b64 v[0:1], v[0:1], v4
+; GFX6-NEXT:    v_lshl_b64 v[2:3], v[2:3], v6
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_shl_v2i64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshlrev_b64 v[0:1], v4, v[0:1]
+; GFX8-NEXT:    v_lshlrev_b64 v[2:3], v6, v[2:3]
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_shl_v2i64:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_lshlrev_b64 v[0:1], v4, v[0:1]
+; GFX9-NEXT:    v_lshlrev_b64 v[2:3], v6, v[2:3]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl <2 x i64> %value, %amount
+  ret <2 x i64> %result
+}
+
+define <2 x i64> @v_shl_v2i64_31(<2 x i64> %value) {
+; GFX6-LABEL: v_shl_v2i64_31:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_lshl_b64 v[0:1], v[0:1], 31
+; GFX6-NEXT:    v_lshl_b64 v[2:3], v[2:3], 31
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_shl_v2i64_31:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshlrev_b64 v[0:1], 31, v[0:1]
+; GFX8-NEXT:    v_lshlrev_b64 v[2:3], 31, v[2:3]
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_shl_v2i64_31:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_lshlrev_b64 v[0:1], 31, v[0:1]
+; GFX9-NEXT:    v_lshlrev_b64 v[2:3], 31, v[2:3]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %result = shl <2 x i64> %value, <i64 31, i64 31>
+  ret <2 x i64> %result
+}
+
+define amdgpu_ps <2 x i64> @s_shl_v2i64(<2 x i64> inreg %value, <2 x i64> inreg %amount) {
+; GCN-LABEL: s_shl_v2i64:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_lshl_b64 s[0:1], s[0:1], s4
+; GCN-NEXT:    s_lshl_b64 s[2:3], s[2:3], s6
+; GCN-NEXT:    ; return to shader part epilog
+  %result = shl <2 x i64> %value, %amount
+  ret <2 x i64> %result
+}


        


More information about the llvm-commits mailing list